mirror of
https://github.com/fluencelabs/tendermint
synced 2025-06-05 09:31:20 +00:00
Merge branch 'master' into anton/block-results
This commit is contained in:
commit
d2db47cc4a
@ -331,6 +331,34 @@ jobs:
|
||||
docker push "tendermint/tendermint"
|
||||
docker logout
|
||||
|
||||
reproducible_builds:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
docker_layer_caching: true
|
||||
- run:
|
||||
name: Build tendermint
|
||||
no_output_timeout: 20m
|
||||
command: |
|
||||
sudo apt-get install -y ruby
|
||||
bash -x ./scripts/gitian-build.sh all
|
||||
for os in darwin linux windows; do
|
||||
cp gitian-build-${os}/result/tendermint-${os}-res.yml .
|
||||
cp gitian-build-${os}/build/out/tendermint-*.tar.gz .
|
||||
rm -rf gitian-build-${os}/
|
||||
done
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/tendermint/tendermint/tendermint-darwin-res.yml
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/tendermint/tendermint/tendermint-linux-res.yml
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/tendermint/tendermint/tendermint-windows-res.yml
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/tendermint/tendermint/tendermint-*.tar.gz
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
test-suite:
|
||||
@ -340,7 +368,6 @@ workflows:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- develop
|
||||
- setup_dependencies
|
||||
- test_abci_apps:
|
||||
requires:
|
||||
@ -364,6 +391,12 @@ workflows:
|
||||
- upload_coverage:
|
||||
requires:
|
||||
- test_cover
|
||||
- reproducible_builds:
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /v[0-9]+\.[0-9]+/
|
||||
release:
|
||||
jobs:
|
||||
- prepare_build
|
||||
|
74
CHANGELOG.md
74
CHANGELOG.md
@ -1,5 +1,49 @@
|
||||
# Changelog
|
||||
|
||||
## v0.32.1
|
||||
|
||||
*July 15, 2019*
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@ParthDesai, @climber73, @jim380, @ashleyvega
|
||||
|
||||
This release contains a minor enhancement to the ABCI and some breaking changes to our libs folder, namely:
|
||||
- CheckTx requests include a `CheckTxType` enum that can be set to `Recheck` to indicate to the application that this transaction was already checked/validated and certain expensive operations (like checking signatures) can be skipped
|
||||
- Removed various functions from `libs` pkgs
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
|
||||
- [abci] [\#2127](https://github.com/tendermint/tendermint/issues/2127) The CheckTx and DeliverTx methods in the ABCI `Application` interface now take structs as arguments (RequestCheckTx and RequestDeliverTx, respectively), instead of just the raw tx bytes. This allows more information to be passed to these methods, for instance, indicating whether a tx has already been checked.
|
||||
- [libs] Remove unused `db/debugDB` and `common/colors.go` & `errors/errors.go` files (@marbar3778)
|
||||
- [libs] [\#2432](https://github.com/tendermint/tendermint/issues/2432) Remove unused `common/heap.go` file (@marbar3778)
|
||||
- [libs] Remove unused `date.go`, `io.go`. Remove `GoPath()`, `Prompt()` and `IsDirEmpty()` functions from `os.go` (@marbar3778)
|
||||
- [libs] Remove unused `FailRand()` func and minor clean up to `fail.go`(@marbar3778)
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [node] Add variadic argument to `NewNode` to support functional options, allowing the Node to be more easily customized.
|
||||
- [node][\#3730](https://github.com/tendermint/tendermint/pull/3730) Add `CustomReactors` option to `NewNode` allowing caller to pass
|
||||
custom reactors to run inside Tendermint node (@ParthDesai)
|
||||
- [abci] [\#2127](https://github.com/tendermint/tendermint/issues/2127)RequestCheckTx has a new field, `CheckTxType`, which can take values of `CheckTxType_New` and `CheckTxType_Recheck`, indicating whether this is a new tx being checked for the first time or whether this tx is being rechecked after a block commit. This allows applications to skip certain expensive operations, like signature checking, if they've already been done once. see [docs](https://github.com/tendermint/tendermint/blob/eddb433d7c082efbeaf8974413a36641519ee895/docs/spec/abci/apps.md#mempool-connection)
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [rpc] [\#3700](https://github.com/tendermint/tendermint/issues/3700) Make possible to set absolute paths for TLS cert and key (@climber73)
|
||||
- [abci] [\#3513](https://github.com/tendermint/tendermint/issues/3513) Call the reqRes callback after the resCb so they always happen in the same order
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
- [p2p] [\#3338](https://github.com/tendermint/tendermint/issues/3338) Prevent "sent next PEX request too soon" errors by not calling
|
||||
ensurePeers outside of ensurePeersRoutine
|
||||
- [behaviour] [\3772](https://github.com/tendermint/tendermint/pull/3772) Return correct reason in MessageOutOfOrder (@jim380)
|
||||
- [config] [\#3723](https://github.com/tendermint/tendermint/issues/3723) Add consensus_params to testnet config generation; document time_iota_ms (@ashleyvega)
|
||||
|
||||
|
||||
## v0.32.0
|
||||
|
||||
*June 25, 2019*
|
||||
@ -21,29 +65,29 @@ program](https://hackerone.com/tendermint).
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
- [cli] \#3613 Switch from golang/dep to Go Modules to resolve dependencies:
|
||||
- [cli] [\#3613](https://github.com/tendermint/tendermint/issues/3613) Switch from golang/dep to Go Modules to resolve dependencies:
|
||||
It is recommended to switch to Go Modules if your project has tendermint as
|
||||
a dependency. Read more on Modules here:
|
||||
https://github.com/golang/go/wiki/Modules
|
||||
- [config] [\#3632](https://github.com/tendermint/tendermint/pull/3632) Removed `leveldb` as generic
|
||||
option for `db_backend`. Must be `goleveldb` or `cleveldb`.
|
||||
- [rpc] \#3616 Fix field names for `/block_results` response (eg. `results.DeliverTx`
|
||||
- [rpc] [\#3616](https://github.com/tendermint/tendermint/issues/3616) Fix field names for `/block_results` response (eg. `results.DeliverTx`
|
||||
-> `results.deliver_tx`). See docs for details.
|
||||
- [rpc] \#3724 RPC now binds to `127.0.0.1` by default instead of `0.0.0.0`
|
||||
- [rpc] [\#3724](https://github.com/tendermint/tendermint/issues/3724) RPC now binds to `127.0.0.1` by default instead of `0.0.0.0`
|
||||
|
||||
* Apps
|
||||
- [abci] \#1859 `ResponseCheckTx`, `ResponseDeliverTx`, `ResponseBeginBlock`,
|
||||
- [abci] [\#1859](https://github.com/tendermint/tendermint/issues/1859) `ResponseCheckTx`, `ResponseDeliverTx`, `ResponseBeginBlock`,
|
||||
and `ResponseEndBlock` now include `Events` instead of `Tags`. Each `Event`
|
||||
contains a `type` and a list of `attributes` (list of key-value pairs)
|
||||
allowing for inclusion of multiple distinct events in each response.
|
||||
|
||||
* Go API
|
||||
- [abci] \#3193 Use RequestDeliverTx and RequestCheckTx in the ABCI
|
||||
- [abci] [\#3193](https://github.com/tendermint/tendermint/issues/3193) Use RequestDeliverTx and RequestCheckTx in the ABCI
|
||||
Application interface
|
||||
- [libs/db] [\#3632](https://github.com/tendermint/tendermint/pull/3632) Removed deprecated `LevelDBBackend` const
|
||||
If you have `db_backend` set to `leveldb` in your config file, please
|
||||
change it to `goleveldb` or `cleveldb`.
|
||||
- [p2p] \#3521 Remove NewNetAddressStringWithOptionalID
|
||||
- [p2p] [\#3521](https://github.com/tendermint/tendermint/issues/3521) Remove NewNetAddressStringWithOptionalID
|
||||
|
||||
* Blockchain Protocol
|
||||
|
||||
@ -52,16 +96,16 @@ program](https://hackerone.com/tendermint).
|
||||
### FEATURES:
|
||||
|
||||
### IMPROVEMENTS:
|
||||
- [abci/examples] \#3659 Change validator update tx format in the `persistent_kvstore` to use base64 for pubkeys instead of hex (@needkane)
|
||||
- [consensus] \#3656 Exit if SwitchToConsensus fails
|
||||
- [p2p] \#3666 Add per channel telemetry to improve reactor observability
|
||||
- [abci/examples] [\#3659](https://github.com/tendermint/tendermint/issues/3659) Change validator update tx format in the `persistent_kvstore` to use base64 for pubkeys instead of hex (@needkane)
|
||||
- [consensus] [\#3656](https://github.com/tendermint/tendermint/issues/3656) Exit if SwitchToConsensus fails
|
||||
- [p2p] [\#3666](https://github.com/tendermint/tendermint/issues/3666) Add per channel telemetry to improve reactor observability
|
||||
- [rpc] [\#3686](https://github.com/tendermint/tendermint/pull/3686) `HTTPClient#Call` returns wrapped errors, so a caller could use `errors.Cause` to retrieve an error code. (@wooparadog)
|
||||
|
||||
### BUG FIXES:
|
||||
- [libs/db] \#3717 Fixed the BoltDB backend's Batch.Delete implementation (@Yawning)
|
||||
- [libs/db] \#3718 Fixed the BoltDB backend's Get and Iterator implementation (@Yawning)
|
||||
- [node] \#3716 Fix a bug where `nil` is recorded as node's address
|
||||
- [node] \#3741 Fix profiler blocking the entire node
|
||||
- [libs/db] [\#3717](https://github.com/tendermint/tendermint/issues/3717) Fixed the BoltDB backend's Batch.Delete implementation (@Yawning)
|
||||
- [libs/db] [\#3718](https://github.com/tendermint/tendermint/issues/3718) Fixed the BoltDB backend's Get and Iterator implementation (@Yawning)
|
||||
- [node] [\#3716](https://github.com/tendermint/tendermint/issues/3716) Fix a bug where `nil` is recorded as node's address
|
||||
- [node] [\#3741](https://github.com/tendermint/tendermint/issues/3741) Fix profiler blocking the entire node
|
||||
|
||||
## v0.31.7
|
||||
|
||||
@ -72,11 +116,11 @@ The regression caused the invalid committed txs to be proposed in blocks over an
|
||||
over again.
|
||||
|
||||
### BUG FIXES:
|
||||
- [mempool] \#3699 Remove all committed txs from the mempool.
|
||||
- [mempool] [\#3699](https://github.com/tendermint/tendermint/issues/3699) Remove all committed txs from the mempool.
|
||||
This reverts the change from v0.31.6 where we only remove valid txs from the mempool.
|
||||
Note this means malicious proposals can cause txs to be dropped from the
|
||||
mempools of other nodes by including them in blocks before they are valid.
|
||||
See \#3322.
|
||||
See [\#3322](https://github.com/tendermint/tendermint/issues/3322).
|
||||
|
||||
## v0.31.6
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
## v0.32.1
|
||||
## v0.32.2
|
||||
|
||||
**
|
||||
\*\*
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
|
||||
@ -9,7 +9,7 @@ program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
- CLI/RPC/Config
|
||||
- [rpc] `/block_results` response format updated (see RPC docs for details)
|
||||
```
|
||||
{
|
||||
@ -24,33 +24,17 @@ program](https://hackerone.com/tendermint).
|
||||
"consensus_param_updates": null
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
* Apps
|
||||
- Apps
|
||||
|
||||
* Go API
|
||||
- [abci] \#2127 ABCI / mempool: Add a "Recheck Tx" indicator. Breaks the ABCI
|
||||
client interface (`abcicli.Client`) to allow for supplying the ABCI
|
||||
`types.RequestCheckTx` and `types.RequestDeliverTx` structs, and lets the
|
||||
mempool indicate to the ABCI app whether a CheckTx request is a recheck or
|
||||
not.
|
||||
- [libs] Remove unused `db/debugDB` and `common/colors.go` & `errors/errors.go` files (@marbar3778)
|
||||
|
||||
* Blockchain Protocol
|
||||
|
||||
* P2P Protocol
|
||||
- Go API
|
||||
- [libs] \#3811 Remove `db` from libs in favor of `https://github.com/tendermint/tm-cmn`
|
||||
|
||||
### FEATURES:
|
||||
- [node] Refactor `NewNode` to use functional options to make it more flexible
|
||||
and extensible in the future.
|
||||
- [node] [\#3730](https://github.com/tendermint/tendermint/pull/3730) Add `CustomReactors` option to `NewNode` allowing caller to pass
|
||||
custom reactors to run inside Tendermint node (@ParthDesai)
|
||||
|
||||
### IMPROVEMENTS:
|
||||
- [rpc] \#3700 Make possible to set absolute paths for TLS cert and key (@climber73)
|
||||
|
||||
- [abci] \#3809 Recover from application panics in `server/socket_server.go` to allow socket cleanup (@ruseinov)
|
||||
|
||||
### BUG FIXES:
|
||||
- [p2p] \#3338 Prevent "sent next PEX request too soon" errors by not calling
|
||||
ensurePeers outside of ensurePeersRoutine
|
||||
- [behaviour] Return correct reason in MessageOutOfOrder (@jim380)
|
||||
|
||||
|
@ -1,34 +0,0 @@
|
||||
FROM alpine:3.7
|
||||
|
||||
ENV DATA_ROOT /tendermint
|
||||
ENV TMHOME $DATA_ROOT
|
||||
|
||||
RUN addgroup tmuser && \
|
||||
adduser -S -G tmuser tmuser
|
||||
|
||||
RUN mkdir -p $DATA_ROOT && \
|
||||
chown -R tmuser:tmuser $DATA_ROOT
|
||||
|
||||
RUN apk add --no-cache bash curl jq
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV PATH "$PATH:/go/bin"
|
||||
RUN mkdir -p /go/src/github.com/tendermint/tendermint && \
|
||||
apk add --no-cache go build-base git && \
|
||||
cd /go/src/github.com/tendermint/tendermint && \
|
||||
git clone https://github.com/tendermint/tendermint . && \
|
||||
git checkout develop && \
|
||||
make get_tools && \
|
||||
make install && \
|
||||
cd - && \
|
||||
rm -rf /go/src/github.com/tendermint/tendermint && \
|
||||
apk del go build-base git
|
||||
|
||||
VOLUME $DATA_ROOT
|
||||
|
||||
EXPOSE 26656
|
||||
EXPOSE 26657
|
||||
|
||||
ENTRYPOINT ["tendermint"]
|
||||
|
||||
CMD ["node", "--moniker=`hostname`", "--proxy_app=kvstore"]
|
@ -12,28 +12,25 @@
|
||||
- `0.9.1`, `0.9`, [(Dockerfile)](https://github.com/tendermint/tendermint/blob/809e0e8c5933604ba8b2d096803ada7c5ec4dfd3/DOCKER/Dockerfile)
|
||||
- `0.9.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/d474baeeea6c22b289e7402449572f7c89ee21da/DOCKER/Dockerfile)
|
||||
- `0.8.0`, `0.8` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/bf64dd21fdb193e54d8addaaaa2ecf7ac371de8c/DOCKER/Dockerfile)
|
||||
- `develop` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/master/DOCKER/Dockerfile.develop)
|
||||
|
||||
`develop` tag points to the [develop](https://github.com/tendermint/tendermint/tree/develop) branch.
|
||||
|
||||
## Quick reference
|
||||
|
||||
* **Where to get help:**
|
||||
https://cosmos.network/community
|
||||
- **Where to get help:**
|
||||
[cosmos.network/ecosystem](https://cosmos.network/ecosystem)
|
||||
|
||||
* **Where to file issues:**
|
||||
https://github.com/tendermint/tendermint/issues
|
||||
- **Where to file issues:**
|
||||
[Tendermint Issues](https://github.com/tendermint/tendermint/issues)
|
||||
|
||||
* **Supported Docker versions:**
|
||||
- **Supported Docker versions:**
|
||||
[the latest release](https://github.com/moby/moby/releases) (down to 1.6 on a best-effort basis)
|
||||
|
||||
## Tendermint
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language, and securely replicates it on many machines.
|
||||
|
||||
For more background, see the [introduction](https://tendermint.readthedocs.io/en/master/introduction.html).
|
||||
For more background, see the [the docs](https://tendermint.com/docs/introduction/#quick-start).
|
||||
|
||||
To get started developing applications, see the [application developers guide](https://tendermint.readthedocs.io/en/master/getting-started.html).
|
||||
To get started developing applications, see the [application developers guide](https://tendermint.com/docs/introduction/quick-start.html).
|
||||
|
||||
## How to use this image
|
||||
|
||||
@ -48,7 +45,7 @@ docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app
|
||||
|
||||
## Local cluster
|
||||
|
||||
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/master/Makefile) and run:
|
||||
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/blob/master/Makefile) and run:
|
||||
|
||||
```
|
||||
make build-linux
|
||||
@ -60,7 +57,7 @@ Note that this will build and use a different image than the ones provided here.
|
||||
|
||||
## License
|
||||
|
||||
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/master/LICENSE).
|
||||
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/blob/master/LICENSE).
|
||||
|
||||
## Contributing
|
||||
|
||||
|
@ -228,18 +228,22 @@ func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response)
|
||||
reqres.Done() // Release waiters
|
||||
reqres.SetDone() // so reqRes.SetCallback will run the callback
|
||||
|
||||
// go routine for callbacks
|
||||
// goroutine for callbacks
|
||||
go func() {
|
||||
// Notify reqRes listener if set
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(res)
|
||||
}
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
// Notify client listener if set
|
||||
if cli.resCb != nil {
|
||||
cli.resCb(reqres.Request, res)
|
||||
}
|
||||
|
||||
// Notify reqRes listener if set
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(res)
|
||||
}
|
||||
}()
|
||||
|
||||
return reqres
|
||||
}
|
||||
|
||||
|
@ -9,8 +9,8 @@ import (
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
dbm "github.com/tendermint/tm-cmn/db"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -9,8 +9,8 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
dbm "github.com/tendermint/tm-cmn/db"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -146,6 +146,16 @@ func (s *SocketServer) waitForClose(closeConn chan error, connID int) {
|
||||
func (s *SocketServer) handleRequests(closeConn chan error, conn net.Conn, responses chan<- *types.Response) {
|
||||
var count int
|
||||
var bufReader = bufio.NewReader(conn)
|
||||
|
||||
defer func() {
|
||||
// make sure to recover from any app-related panics to allow proper socket cleanup
|
||||
r := recover()
|
||||
if r != nil {
|
||||
closeConn <- fmt.Errorf("recovered from panic: %v", r)
|
||||
s.appMtx.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
|
||||
var req = &types.Request{}
|
||||
@ -154,7 +164,7 @@ func (s *SocketServer) handleRequests(closeConn chan error, conn net.Conn, respo
|
||||
if err == io.EOF {
|
||||
closeConn <- err
|
||||
} else {
|
||||
closeConn <- fmt.Errorf("Error reading message: %v", err.Error())
|
||||
closeConn <- fmt.Errorf("error reading message: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -11,7 +11,6 @@ import (
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
@ -19,6 +18,7 @@ import (
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
dbm "github.com/tendermint/tm-cmn/db"
|
||||
)
|
||||
|
||||
var config *cfg.Config
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"sync"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
dbm "github.com/tendermint/tm-cmn/db"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
@ -11,10 +11,11 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tm-cmn/db"
|
||||
dbm "github.com/tendermint/tm-cmn/db"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/db"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
|
||||
|
@ -161,9 +161,10 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Generate genesis doc from generated validators
|
||||
genDoc := &types.GenesisDoc{
|
||||
GenesisTime: tmtime.Now(),
|
||||
ChainID: "chain-" + cmn.RandStr(6),
|
||||
Validators: genVals,
|
||||
ChainID: "chain-" + cmn.RandStr(6),
|
||||
ConsensusParams: types.DefaultConsensusParams(),
|
||||
GenesisTime: tmtime.Now(),
|
||||
Validators: genVals,
|
||||
}
|
||||
|
||||
// Write genesis file.
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
@ -33,6 +32,7 @@ import (
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
dbm "github.com/tendermint/tm-cmn/db"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -11,10 +11,10 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-cmn/db"
|
||||
)
|
||||
|
||||
// for testing
|
||||
|
@ -19,13 +19,13 @@ import (
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/p2p/mock"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-cmn/db"
|
||||
)
|
||||
|
||||
//----------------------------------------------
|
||||
|
@ -13,8 +13,8 @@ import (
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
//auto "github.com/tendermint/tendermint/libs/autofile"
|
||||
dbm "github.com/tendermint/tm-cmn/db"
|
||||
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mock"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
|
@ -10,11 +10,11 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
dbm "github.com/tendermint/tm-cmn/db"
|
||||
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mock"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
|
@ -22,7 +22,6 @@ import (
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mock"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
@ -31,6 +30,7 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
dbm "github.com/tendermint/tm-cmn/db"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
@ -15,13 +15,13 @@ import (
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mock"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tm-cmn/db"
|
||||
)
|
||||
|
||||
// WALGenerateNBlocks generates a consensus WAL. It does this by spinning up a
|
||||
|
@ -6,14 +6,12 @@ The documentation for Tendermint Core is hosted at:
|
||||
- https://tendermint-staging.interblock.io/docs/
|
||||
|
||||
built from the files in this (`/docs`) directory for
|
||||
[master](https://github.com/tendermint/tendermint/tree/master/docs)
|
||||
and [develop](https://github.com/tendermint/tendermint/tree/develop/docs),
|
||||
respectively.
|
||||
[master](https://github.com/tendermint/tendermint/tree/master/docs) respectively.
|
||||
|
||||
## How It Works
|
||||
|
||||
There is a CircleCI job listening for changes in the `/docs` directory, on both
|
||||
the `master` and `develop` branches. Any updates to files in this directory
|
||||
the `master` branch. Any updates to files in this directory
|
||||
on those branches will automatically trigger a website deployment. Under the hood,
|
||||
the private website repository has a `make build-docs` target consumed by a CircleCI job in that repo.
|
||||
|
||||
@ -35,7 +33,7 @@ of the sidebar.
|
||||
**NOTE:** Strongly consider the existing links - both within this directory
|
||||
and to the website docs - when moving or deleting files.
|
||||
|
||||
Links to directories *MUST* end in a `/`.
|
||||
Links to directories _MUST_ end in a `/`.
|
||||
|
||||
Relative links should be used nearly everywhere, having discovered and weighed the following:
|
||||
|
||||
@ -101,4 +99,4 @@ We are using [Algolia](https://www.algolia.com) to power full-text search. This
|
||||
## Consistency
|
||||
|
||||
Because the build processes are identical (as is the information contained herein), this file should be kept in sync as
|
||||
much as possible with its [counterpart in the Cosmos SDK repo](https://github.com/cosmos/cosmos-sdk/blob/develop/docs/DOCS_README.md).
|
||||
much as possible with its [counterpart in the Cosmos SDK repo](https://github.com/cosmos/cosmos-sdk/blob/master/docs/DOCS_README.md).
|
||||
|
@ -62,7 +62,7 @@ as `abci-cli` above. The kvstore just stores transactions in a merkle
|
||||
tree.
|
||||
|
||||
Its code can be found
|
||||
[here](https://github.com/tendermint/tendermint/blob/develop/abci/cmd/abci-cli/abci-cli.go)
|
||||
[here](https://github.com/tendermint/tendermint/blob/master/abci/cmd/abci-cli/abci-cli.go)
|
||||
and looks like:
|
||||
|
||||
```
|
||||
@ -137,7 +137,7 @@ response.
|
||||
|
||||
The server may be generic for a particular language, and we provide a
|
||||
[reference implementation in
|
||||
Golang](https://github.com/tendermint/tendermint/tree/develop/abci/server). See the
|
||||
Golang](https://github.com/tendermint/tendermint/tree/master/abci/server). See the
|
||||
[list of other ABCI implementations](./ecosystem.md) for servers in
|
||||
other languages.
|
||||
|
||||
@ -324,7 +324,7 @@ But the ultimate flexibility comes from being able to write the
|
||||
application easily in any language.
|
||||
|
||||
We have implemented the counter in a number of languages [see the
|
||||
example directory](https://github.com/tendermint/tendermint/tree/develop/abci/example).
|
||||
example directory](https://github.com/tendermint/tendermint/tree/master/abci/example).
|
||||
|
||||
To run the Node.js version, fist download & install [the Javascript ABCI server](https://github.com/tendermint/js-abci):
|
||||
|
||||
|
@ -48,9 +48,9 @@ open ABCI connection with the application, which hosts an ABCI server.
|
||||
Shown are the request and response types sent on each connection.
|
||||
|
||||
Most of the examples below are from [kvstore
|
||||
application](https://github.com/tendermint/tendermint/blob/develop/abci/example/kvstore/kvstore.go),
|
||||
application](https://github.com/tendermint/tendermint/blob/master/abci/example/kvstore/kvstore.go),
|
||||
which is a part of the abci repo. [persistent_kvstore
|
||||
application](https://github.com/tendermint/tendermint/blob/develop/abci/example/kvstore/persistent_kvstore.go)
|
||||
application](https://github.com/tendermint/tendermint/blob/master/abci/example/kvstore/persistent_kvstore.go)
|
||||
is used to show `BeginBlock`, `EndBlock` and `InitChain` example
|
||||
implementations.
|
||||
|
||||
|
@ -2,10 +2,7 @@
|
||||
|
||||
## Changelog
|
||||
|
||||
016-08-2018: Follow up from review:
|
||||
- Revert changes to commit round
|
||||
- Remind about justification for removing pubkey
|
||||
- Update pros/cons
|
||||
016-08-2018: Follow up from review: - Revert changes to commit round - Remind about justification for removing pubkey - Update pros/cons
|
||||
05-08-2018: Initial draft
|
||||
|
||||
## Context
|
||||
@ -35,11 +32,11 @@ message ValidatorUpdate {
|
||||
}
|
||||
```
|
||||
|
||||
As noted in ADR-009[https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-009-ABCI-design.md],
|
||||
As noted in ADR-009[https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-009-ABCI-design.md],
|
||||
the `Validator` does not contain a pubkey because quantum public keys are
|
||||
quite large and it would be wasteful to send them all over ABCI with every block.
|
||||
Thus, applications that want to take advantage of the information in BeginBlock
|
||||
are *required* to store pubkeys in state (or use much less efficient lazy means
|
||||
are _required_ to store pubkeys in state (or use much less efficient lazy means
|
||||
of verifying BeginBlock data).
|
||||
|
||||
### RequestBeginBlock
|
||||
|
391
docs/architecture/adr-043-blockchain-riri-org.md
Normal file
391
docs/architecture/adr-043-blockchain-riri-org.md
Normal file
@ -0,0 +1,391 @@
|
||||
# ADR 043: Blockhchain Reactor Riri-Org
|
||||
|
||||
## Changelog
|
||||
* 18-06-2019: Initial draft
|
||||
* 08-07-2019: Reviewed
|
||||
|
||||
## Context
|
||||
|
||||
The blockchain reactor is responsible for two high level processes:sending/receiving blocks from peers and FastSync-ing blocks to catch upnode who is far behind. The goal of [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md) was to refactor these two processes by separating business logic currently wrapped up in go-channels into pure `handle*` functions. While the ADR specified what the final form of the reactor might look like it lacked guidance on intermediary steps to get there.
|
||||
The following diagram illustrates the state of the [blockchain-reorg](https://github.com/tendermint/tendermint/pull/35610) reactor which will be referred to as `v1`.
|
||||
|
||||

|
||||
|
||||
While `v1` of the blockchain reactor has shown significant improvements in terms of simplifying the concurrency model, the current PR has run into few roadblocks.
|
||||
|
||||
* The current PR large and difficult to review.
|
||||
* Block gossiping and fast sync processes are highly coupled to the shared `Pool` data structure.
|
||||
* Peer communication is spread over multiple components creating complex dependency graph which must be mocked out during testing.
|
||||
* Timeouts modeled as stateful tickers introduce non-determinism in tests
|
||||
|
||||
This ADR is meant to specify the missing components and control necessary to achieve [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md).
|
||||
|
||||
## Decision
|
||||
|
||||
Partition the responsibilities of the blockchain reactor into a set of components which communicate exclusively with events. Events will contain timestamps allowing each component to track time as internal state. The internal state will be mutated by a set of `handle*` which will produce event(s). The integration between components will happen in the reactor and reactor tests will then become integration tests between components. This design will be known as `v2`.
|
||||
|
||||

|
||||
|
||||
### Reactor changes in detail
|
||||
|
||||
The reactor will include a demultiplexing routine which will send each message to each sub routine for independent processing. Each sub routine will then select the messages it's interested in and call the handle specific function specified in [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md). The demuxRoutine acts as "pacemaker" setting the time in which events are expected to be handled.
|
||||
|
||||
|
||||
```go
|
||||
func demuxRoutine(msgs, scheduleMsgs, processorMsgs, ioMsgs) {
|
||||
timer := time.NewTicker(interval)
|
||||
for {
|
||||
select {
|
||||
case <-timer.C:
|
||||
now := evTimeCheck{time.Now()}
|
||||
schedulerMsgs <- now
|
||||
processorMsgs <- now
|
||||
ioMsgs <- now
|
||||
case msg:= <- msgs:
|
||||
msg.time = time.Now()
|
||||
// These channels should produce backpressure before
|
||||
// being full to avoid starving each other
|
||||
schedulerMsgs <- msg
|
||||
processorMsgs <- msg
|
||||
ioMesgs <- msg
|
||||
if msg == stop {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func processRoutine(input chan Message, output chan Message) {
|
||||
processor := NewProcessor(..)
|
||||
for {
|
||||
msg := <- input
|
||||
switch msg := msg.(type) {
|
||||
case bcBlockRequestMessage:
|
||||
output <- processor.handleBlockRequest(msg))
|
||||
...
|
||||
case stop:
|
||||
processor.stop()
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
func scheduleRoutine(input chan Message, output chan Message) {
|
||||
schelduer = NewScheduler(...)
|
||||
for {
|
||||
msg := <-msgs
|
||||
switch msg := input.(type) {
|
||||
case bcBlockResponseMessage:
|
||||
output <- scheduler.handleBlockResponse(msg)
|
||||
...
|
||||
case stop:
|
||||
schedule.stop()
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Lifecycle management
|
||||
|
||||
A set of routines for individual processes allow processes to run in parallel with clear lifecycle management. `Start`, `Stop`, and `AddPeer` hooks currently present in the reactor will delegate to the sub-routines allowing them to manage internal state independent without further coupling to the reactor.
|
||||
|
||||
```go
|
||||
func (r *BlockChainReactor) Start() {
|
||||
r.msgs := make(chan Message, maxInFlight)
|
||||
schedulerMsgs := make(chan Message)
|
||||
processorMsgs := make(chan Message)
|
||||
ioMsgs := make(chan Message)
|
||||
|
||||
go processorRoutine(processorMsgs, r.msgs)
|
||||
go scheduleRoutine(schedulerMsgs, r.msgs)
|
||||
go ioRoutine(ioMsgs, r.msgs)
|
||||
...
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) Receive(...) {
|
||||
...
|
||||
r.msgs <- msg
|
||||
...
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) Stop() {
|
||||
...
|
||||
r.msgs <- stop
|
||||
...
|
||||
}
|
||||
|
||||
...
|
||||
func (r *BlockchainReactor) Stop() {
|
||||
...
|
||||
r.msgs <- stop
|
||||
...
|
||||
}
|
||||
...
|
||||
|
||||
func (r *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
...
|
||||
r.msgs <- bcAddPeerEv{peer.ID}
|
||||
...
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## IO handling
|
||||
An io handling routine within the reactor will isolate peer communication. Message going through the ioRoutine will usually be one way, using `p2p` APIs. In the case in which the `p2p` API such as `trySend` return errors, the ioRoutine can funnel those message back to the demuxRoutine for distribution to the other routines. For instance errors from the ioRoutine can be consumed by the scheduler to inform better peer selection implementations.
|
||||
|
||||
```go
|
||||
func (r *BlockchainReacor) ioRoutine(ioMesgs chan Message, outMsgs chan Message) {
|
||||
...
|
||||
for {
|
||||
msg := <-ioMsgs
|
||||
switch msg := msg.(type) {
|
||||
case scBlockRequestMessage:
|
||||
queued := r.sendBlockRequestToPeer(...)
|
||||
if queued {
|
||||
outMsgs <- ioSendQueued{...}
|
||||
}
|
||||
case scStatusRequestMessage
|
||||
r.sendStatusRequestToPeer(...)
|
||||
case bcPeerError
|
||||
r.Swtich.StopPeerForError(msg.src)
|
||||
...
|
||||
...
|
||||
case bcFinished
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
### Processor Internals
|
||||
|
||||
The processor is responsible for ordering, verifying and executing blocks. The Processor will maintain an internal cursor `height` refering to the last processed block. As a set of blocks arrive unordered, the Processor will check if it has `height+1` necessary to process the next block. The processor also maintains the map `blockPeers` of peers to height, to keep track of which peer provided the block at `height`. `blockPeers` can be used in`handleRemovePeer(...)` to reschedule all unprocessed blocks provided by a peer who has errored.
|
||||
|
||||
```go
|
||||
type Processor struct {
|
||||
height int64 // the height cursor
|
||||
state ...
|
||||
blocks [height]*Block // keep a set of blocks in memory until they are processed
|
||||
blockPeers [height]PeerID // keep track of which heights came from which peerID
|
||||
lastTouch timestamp
|
||||
}
|
||||
|
||||
func (proc *Processor) handleBlockResponse(peerID, block) {
|
||||
if block.height <= height || block[block.height] {
|
||||
} else if blocks[block.height] {
|
||||
return errDuplicateBlock{}
|
||||
} else {
|
||||
blocks[block.height] = block
|
||||
}
|
||||
|
||||
if blocks[height] && blocks[height+1] {
|
||||
... = state.Validators.VerifyCommit(...)
|
||||
... = store.SaveBlock(...)
|
||||
state, err = blockExec.ApplyBlock(...)
|
||||
...
|
||||
if err == nil {
|
||||
delete blocks[height]
|
||||
height++
|
||||
lastTouch = msg.time
|
||||
return pcBlockProcessed{height-1}
|
||||
} else {
|
||||
... // Delete all unprocessed block from the peer
|
||||
return pcBlockProcessError{peerID, height}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (proc *Processor) handleRemovePeer(peerID) {
|
||||
events = []
|
||||
// Delete all unprocessed blocks from peerID
|
||||
for i = height; i < len(blocks); i++ {
|
||||
if blockPeers[i] == peerID {
|
||||
events = append(events, pcBlockReschedule{height})
|
||||
|
||||
delete block[height]
|
||||
}
|
||||
}
|
||||
return events
|
||||
}
|
||||
|
||||
func handleTimeCheckEv(time) {
|
||||
if time - lastTouch > timeout {
|
||||
// Timeout the processor
|
||||
...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Schedule
|
||||
|
||||
The Schedule maintains the internal state used for scheduling blockRequestMessages based on some scheduling algorithm. The schedule needs to maintain state on:
|
||||
|
||||
* The state `blockState` of every block seem up to height of maxHeight
|
||||
* The set of peers and their peer state `peerState`
|
||||
* which peers have which blocks
|
||||
* which blocks have been requested from which peers
|
||||
|
||||
```go
|
||||
type blockState int
|
||||
|
||||
const (
|
||||
blockStateNew = iota
|
||||
blockStatePending,
|
||||
blockStateReceived,
|
||||
blockStateProcessed
|
||||
)
|
||||
|
||||
type schedule {
|
||||
// a list of blocks in which blockState
|
||||
blockStates map[height]blockState
|
||||
|
||||
// a map of which blocks are available from which peers
|
||||
blockPeers map[height]map[p2p.ID]scPeer
|
||||
|
||||
// a map of peerID to schedule specific peer struct `scPeer`
|
||||
peers map[p2p.ID]scPeer
|
||||
|
||||
// a map of heights to the peer we are waiting for a response from
|
||||
pending map[height]scPeer
|
||||
|
||||
targetPending int // the number of blocks we want in blockStatePending
|
||||
targetReceived int // the number of blocks we want in blockStateReceived
|
||||
|
||||
peerTimeout int
|
||||
peerMinSpeed int
|
||||
}
|
||||
|
||||
func (sc *schedule) numBlockInState(state blockState) uint32 {
|
||||
num := 0
|
||||
for i := sc.minHeight(); i <= sc.maxHeight(); i++ {
|
||||
if sc.blockState[i] == state {
|
||||
num++
|
||||
}
|
||||
}
|
||||
return num
|
||||
}
|
||||
|
||||
|
||||
func (sc *schedule) popSchedule(maxRequest int) []scBlockRequestMessage {
|
||||
// We only want to schedule requests such that we have less than sc.targetPending and sc.targetReceived
|
||||
// This ensures we don't saturate the network or flood the processor with unprocessed blocks
|
||||
todo := min(sc.targetPending - sc.numBlockInState(blockStatePending), sc.numBlockInState(blockStateReceived))
|
||||
events := []scBlockRequestMessage{}
|
||||
for i := sc.minHeight(); i < sc.maxMaxHeight(); i++ {
|
||||
if todo == 0 {
|
||||
break
|
||||
}
|
||||
if blockStates[i] == blockStateNew {
|
||||
peer = sc.selectPeer(blockPeers[i])
|
||||
sc.blockStates[i] = blockStatePending
|
||||
sc.pending[i] = peer
|
||||
events = append(events, scBlockRequestMessage{peerID: peer.peerID, height: i})
|
||||
todo--
|
||||
}
|
||||
}
|
||||
return events
|
||||
}
|
||||
...
|
||||
|
||||
type scPeer struct {
|
||||
peerID p2p.ID
|
||||
numOustandingRequest int
|
||||
lastTouched time.Time
|
||||
monitor flow.Monitor
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
# Scheduler
|
||||
The scheduler is configured to maintain a target `n` of in flight
|
||||
messages and will use feedback from `_blockResponseMessage`,
|
||||
`_statusResponseMessage` and `_peerError` produce an optimal assignment
|
||||
of scBlockRequestMessage at each `timeCheckEv`.
|
||||
|
||||
```
|
||||
|
||||
func handleStatusResponse(peerID, height, time) {
|
||||
schedule.touchPeer(peerID, time)
|
||||
schedule.setPeerHeight(peerID, height)
|
||||
}
|
||||
|
||||
func handleBlockResponseMessage(peerID, height, block, time) {
|
||||
schedule.touchPeer(peerID, time)
|
||||
schedule.markReceived(peerID, height, size(block))
|
||||
}
|
||||
|
||||
func handleNoBlockResponseMessage(peerID, height, time) {
|
||||
schedule.touchPeer(peerID, time)
|
||||
// reschedule that block, punish peer...
|
||||
...
|
||||
}
|
||||
|
||||
func handlePeerError(peerID) {
|
||||
// Remove the peer, reschedule the requests
|
||||
...
|
||||
}
|
||||
|
||||
func handleTimeCheckEv(time) {
|
||||
// clean peer list
|
||||
|
||||
events = []
|
||||
for peerID := range schedule.peersNotTouchedSince(time) {
|
||||
pending = schedule.pendingFrom(peerID)
|
||||
schedule.setPeerState(peerID, timedout)
|
||||
schedule.resetBlocks(pending)
|
||||
events = append(events, peerTimeout{peerID})
|
||||
}
|
||||
|
||||
events = append(events, schedule.popSchedule())
|
||||
|
||||
return events
|
||||
}
|
||||
```
|
||||
|
||||
## Peer
|
||||
The Peer Stores per peer state based on messages received by the scheduler.
|
||||
|
||||
```go
|
||||
type Peer struct {
|
||||
lastTouched timestamp
|
||||
lastDownloaded timestamp
|
||||
pending map[height]struct{}
|
||||
height height // max height for the peer
|
||||
state {
|
||||
pending, // we know the peer but not the height
|
||||
active, // we know the height
|
||||
timeout // the peer has timed out
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Status
|
||||
|
||||
Work in progress
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
* Test become deterministic
|
||||
* Simulation becomes a-termporal: no need wait for a wall-time timeout
|
||||
* Peer Selection can be independently tested/simulated
|
||||
* Develop a general approach to refactoring reactors
|
||||
|
||||
### Negative
|
||||
|
||||
### Neutral
|
||||
|
||||
### Implementation Path
|
||||
|
||||
* Implement the scheduler, test the scheduler, review the rescheduler
|
||||
* Implement the processor, test the processor, review the processor
|
||||
* Implement the demuxer, write integration test, review integration tests
|
||||
|
||||
## References
|
||||
|
||||
|
||||
* [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md): The original blockchain reactor re-org proposal
|
||||
* [Blockchain re-org](https://github.com/tendermint/tendermint/pull/3561): The current blockchain reactor re-org implementation (v1)
|
BIN
docs/architecture/img/blockchain-reactor-v1.png
Normal file
BIN
docs/architecture/img/blockchain-reactor-v1.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 121 KiB |
BIN
docs/architecture/img/blockchain-reactor-v2.png
Normal file
BIN
docs/architecture/img/blockchain-reactor-v2.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 118 KiB |
630
docs/guides/go-built-in.md
Normal file
630
docs/guides/go-built-in.md
Normal file
@ -0,0 +1,630 @@
|
||||
# 1 Guide Assumptions
|
||||
|
||||
This guide is designed for beginners who want to get started with a Tendermint
|
||||
Core application from scratch. It does not assume that you have any prior
|
||||
experience with Tendermint Core.
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state
|
||||
transition machine - written in any programming language - and securely
|
||||
replicates it on many machines.
|
||||
|
||||
Although Tendermint Core is written in the Golang programming language, prior
|
||||
knowledge of it is not required for this guide. You can learn it as we go due
|
||||
to it's simplicity. However, you may want to go through [Learn X in Y minutes
|
||||
Where X=Go](https://learnxinyminutes.com/docs/go/) first to familiarize
|
||||
yourself with the syntax.
|
||||
|
||||
By following along with this guide, you'll create a Tendermint Core project
|
||||
called kvstore, a (very) simple distributed BFT key-value store.
|
||||
|
||||
# 1 Creating a built-in application in Go
|
||||
|
||||
Running your application inside the same process as Tendermint Core will give
|
||||
you the best possible performance.
|
||||
|
||||
For other languages, your application have to communicate with Tendermint Core
|
||||
through a TCP, Unix domain socket or gRPC.
|
||||
|
||||
## 1.1 Installing Go
|
||||
|
||||
Please refer to [the official guide for installing
|
||||
Go](https://golang.org/doc/install).
|
||||
|
||||
Verify that you have the latest version of Go installed:
|
||||
|
||||
```sh
|
||||
$ go version
|
||||
go version go1.12.7 darwin/amd64
|
||||
```
|
||||
|
||||
Make sure you have `$GOPATH` environment variable set:
|
||||
|
||||
```sh
|
||||
$ echo $GOPATH
|
||||
/Users/melekes/go
|
||||
```
|
||||
|
||||
## 1.2 Creating a new Go project
|
||||
|
||||
We'll start by creating a new Go project.
|
||||
|
||||
```sh
|
||||
$ mkdir -p $GOPATH/src/github.com/me/kvstore
|
||||
$ cd $GOPATH/src/github.com/me/kvstore
|
||||
```
|
||||
|
||||
Inside the example directory create a `main.go` file with the following content:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
fmt.Println("Hello, Tendermint Core")
|
||||
}
|
||||
```
|
||||
|
||||
When run, this should print "Hello, Tendermint Core" to the standard output.
|
||||
|
||||
```sh
|
||||
$ go run main.go
|
||||
Hello, Tendermint Core
|
||||
```
|
||||
|
||||
## 1.3 Writing a Tendermint Core application
|
||||
|
||||
Tendermint Core communicates with the application through the Application
|
||||
BlockChain Interface (ABCI). All message types are defined in the [protobuf
|
||||
file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto).
|
||||
This allows Tendermint Core to run applications written in any programming
|
||||
language.
|
||||
|
||||
Create a file called `app.go` with the following content:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
abcitypes "github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
type KVStoreApplication struct {}
|
||||
|
||||
var _ abcitypes.Application = (*KVStoreApplication)(nil)
|
||||
|
||||
func NewKVStoreApplication() *KVStoreApplication {
|
||||
return &KVStoreApplication{}
|
||||
}
|
||||
|
||||
func (KVStoreApplication) Info(req abcitypes.RequestInfo) abcitypes.ResponseInfo {
|
||||
return abcitypes.ResponseInfo{}
|
||||
}
|
||||
|
||||
func (KVStoreApplication) SetOption(req abcitypes.RequestSetOption) abcitypes.ResponseSetOption {
|
||||
return abcitypes.ResponseSetOption{}
|
||||
}
|
||||
|
||||
func (KVStoreApplication) DeliverTx(req abcitypes.RequestDeliverTx) abcitypes.ResponseDeliverTx {
|
||||
return abcitypes.ResponseDeliverTx{Code: 0}
|
||||
}
|
||||
|
||||
func (KVStoreApplication) CheckTx(req abcitypes.RequestCheckTx) abcitypes.ResponseCheckTx {
|
||||
return abcitypes.ResponseCheckTx{Code: 0}
|
||||
}
|
||||
|
||||
func (KVStoreApplication) Commit() abcitypes.ResponseCommit {
|
||||
return abcitypes.ResponseCommit{}
|
||||
}
|
||||
|
||||
func (KVStoreApplication) Query(req abcitypes.RequestQuery) abcitypes.ResponseQuery {
|
||||
return abcitypes.ResponseQuery{Code: 0}
|
||||
}
|
||||
|
||||
func (KVStoreApplication) InitChain(req abcitypes.RequestInitChain) abcitypes.ResponseInitChain {
|
||||
return abcitypes.ResponseInitChain{}
|
||||
}
|
||||
|
||||
func (KVStoreApplication) BeginBlock(req abcitypes.RequestBeginBlock) abcitypes.ResponseBeginBlock {
|
||||
return abcitypes.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
func (KVStoreApplication) EndBlock(req abcitypes.RequestEndBlock) abcitypes.ResponseEndBlock {
|
||||
return abcitypes.ResponseEndBlock{}
|
||||
}
|
||||
```
|
||||
|
||||
Now I will go through each method explaining when it's called and adding
|
||||
required business logic.
|
||||
|
||||
### 1.3.1 CheckTx
|
||||
|
||||
When a new transaction is added to the Tendermint Core, it will ask the
|
||||
application to check it (validate the format, signatures, etc.).
|
||||
|
||||
```go
|
||||
func (app *KVStoreApplication) isValid(tx []byte) (code uint32) {
|
||||
// check format
|
||||
parts := bytes.Split(tx, []byte("="))
|
||||
if len(parts) != 2 {
|
||||
return 1
|
||||
}
|
||||
|
||||
key, value := parts[0], parts[1]
|
||||
|
||||
// check if the same key=value already exists
|
||||
err := app.db.View(func(txn *badger.Txn) error {
|
||||
item, err := txn.Get(key)
|
||||
if err != nil && err != badger.ErrKeyNotFound {
|
||||
return err
|
||||
}
|
||||
if err == nil {
|
||||
return item.Value(func(val []byte) error {
|
||||
if bytes.Equal(val, value) {
|
||||
code = 2
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
||||
|
||||
func (app *KVStoreApplication) CheckTx(req abcitypes.RequestCheckTx) abcitypes.ResponseCheckTx {
|
||||
code := app.isValid(req.Tx)
|
||||
return abcitypes.ResponseCheckTx{Code: code, GasWanted: 1}
|
||||
}
|
||||
```
|
||||
|
||||
Don't worry if this does not compile yet.
|
||||
|
||||
If the transaction does not have a form of `{bytes}={bytes}`, we return `1`
|
||||
code. When the same key=value already exist (same key and value), we return `2`
|
||||
code. For others, we return a zero code indicating that they are valid.
|
||||
|
||||
Note that anything with non-zero code will be considered invalid (`-1`, `100`,
|
||||
etc.) by Tendermint Core.
|
||||
|
||||
Valid transactions will eventually be committed given they are not too big and
|
||||
have enough gas. To learn more about gas, check out ["the
|
||||
specification"](https://tendermint.com/docs/spec/abci/apps.html#gas).
|
||||
|
||||
For the underlying key-value store we'll use
|
||||
[badger](https://github.com/dgraph-io/badger), which is an embeddable,
|
||||
persistent and fast key-value (KV) database.
|
||||
|
||||
```go
|
||||
import "github.com/dgraph-io/badger"
|
||||
|
||||
type KVStoreApplication struct {
|
||||
db *badger.DB
|
||||
currentBatch *badger.Txn
|
||||
}
|
||||
|
||||
func NewKVStoreApplication(db *badger.DB) *KVStoreApplication {
|
||||
return &KVStoreApplication{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 1.3.2 BeginBlock -> DeliverTx -> EndBlock -> Commit
|
||||
|
||||
When Tendermint Core has decided on the block, it's transfered to the
|
||||
application in 3 parts: `BeginBlock`, one `DeliverTx` per transaction and
|
||||
`EndBlock` in the end. DeliverTx are being transfered asynchronously, but the
|
||||
responses are expected to come in order.
|
||||
|
||||
```
|
||||
func (app *KVStoreApplication) BeginBlock(req abcitypes.RequestBeginBlock) abcitypes.ResponseBeginBlock {
|
||||
app.currentBatch = app.db.NewTransaction(true)
|
||||
return abcitypes.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Here we create a batch, which will store block's transactions.
|
||||
|
||||
```go
|
||||
func (app *KVStoreApplication) DeliverTx(req abcitypes.RequestDeliverTx) abcitypes.ResponseDeliverTx {
|
||||
code := app.isValid(req.Tx)
|
||||
if code != 0 {
|
||||
return abcitypes.ResponseDeliverTx{Code: code}
|
||||
}
|
||||
|
||||
parts := bytes.Split(req.Tx, []byte("="))
|
||||
key, value := parts[0], parts[1]
|
||||
|
||||
err := app.currentBatch.Set(key, value)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return abcitypes.ResponseDeliverTx{Code: 0}
|
||||
}
|
||||
```
|
||||
|
||||
If the transaction is badly formatted or the same key=value already exist, we
|
||||
again return the non-zero code. Otherwise, we add it to the current batch.
|
||||
|
||||
In the current design, a block can include incorrect transactions (those who
|
||||
passed CheckTx, but failed DeliverTx or transactions included by the proposer
|
||||
directly). This is done for performance reasons.
|
||||
|
||||
Note we can't commit transactions inside the `DeliverTx` because in such case
|
||||
`Query`, which may be called in parallel, will return inconsistent data (i.e.
|
||||
it will report that some value already exist even when the actual block was not
|
||||
yet committed).
|
||||
|
||||
`Commit` instructs the application to persist the new state.
|
||||
|
||||
```go
|
||||
func (app *KVStoreApplication) Commit() abcitypes.ResponseCommit {
|
||||
app.currentBatch.Commit()
|
||||
return abcitypes.ResponseCommit{Data: []byte{}}
|
||||
}
|
||||
```
|
||||
|
||||
### 1.3.3 Query
|
||||
|
||||
Now, when the client wants to know whenever a particular key/value exist, it
|
||||
will call Tendermint Core RPC `/abci_query` endpoint, which in turn will call
|
||||
the application's `Query` method.
|
||||
|
||||
Applications are free to provide their own APIs. But by using Tendermint Core
|
||||
as a proxy, clients (including [light client
|
||||
package](https://godoc.org/github.com/tendermint/tendermint/lite)) can leverage
|
||||
the unified API across different applications. Plus they won't have to call the
|
||||
otherwise separate Tendermint Core API for additional proofs.
|
||||
|
||||
Note we don't include a proof here.
|
||||
|
||||
```go
|
||||
func (app *KVStoreApplication) Query(reqQuery abcitypes.RequestQuery) (resQuery abcitypes.ResponseQuery) {
|
||||
resQuery.Key = reqQuery.Data
|
||||
err := app.db.View(func(txn *badger.Txn) error {
|
||||
item, err := txn.Get(reqQuery.Data)
|
||||
if err != nil && err != badger.ErrKeyNotFound {
|
||||
return err
|
||||
}
|
||||
if err == badger.ErrKeyNotFound {
|
||||
resQuery.Log = "does not exist"
|
||||
} else {
|
||||
return item.Value(func(val []byte) error {
|
||||
resQuery.Log = "exists"
|
||||
resQuery.Value = val
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
```
|
||||
|
||||
The complete specification can be found
|
||||
[here](https://tendermint.com/docs/spec/abci/).
|
||||
|
||||
## 1.4 Starting an application and a Tendermint Core instance in the same process
|
||||
|
||||
Put the following code into the "main.go" file:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/dgraph-io/badger"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
tmflags "github.com/tendermint/tendermint/libs/cli/flags"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
nm "github.com/tendermint/tendermint/node"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
)
|
||||
|
||||
var configFile string
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&configFile, "config", "$HOME/.tendermint/config/config.toml", "Path to config.toml")
|
||||
}
|
||||
|
||||
func main() {
|
||||
db, err := badger.Open(badger.DefaultOptions("/tmp/badger"))
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to open badger db: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer db.Close()
|
||||
app := NewKVStoreApplication(db)
|
||||
|
||||
flag.Parse()
|
||||
|
||||
node, err := newTendermint(app, configFile)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v", err)
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
node.Start()
|
||||
defer func() {
|
||||
node.Stop()
|
||||
node.Wait()
|
||||
}()
|
||||
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
<-c
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
func newTendermint(app abci.Application, configFile string) (*nm.Node, error) {
|
||||
// read config
|
||||
config := cfg.DefaultConfig()
|
||||
config.RootDir = filepath.Dir(filepath.Dir(configFile))
|
||||
viper.SetConfigFile(configFile)
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
return nil, errors.Wrap(err, "viper failed to read config file")
|
||||
}
|
||||
if err := viper.Unmarshal(config); err != nil {
|
||||
return nil, errors.Wrap(err, "viper failed to unmarshal config")
|
||||
}
|
||||
if err := config.ValidateBasic(); err != nil {
|
||||
return nil, errors.Wrap(err, "config is invalid")
|
||||
}
|
||||
|
||||
// create logger
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
var err error
|
||||
logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse log level")
|
||||
}
|
||||
|
||||
// read private validator
|
||||
pv := privval.LoadFilePV(
|
||||
config.PrivValidatorKeyFile(),
|
||||
config.PrivValidatorStateFile(),
|
||||
)
|
||||
|
||||
// read node key
|
||||
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to load node's key")
|
||||
}
|
||||
|
||||
// create node
|
||||
node, err := nm.NewNode(
|
||||
config,
|
||||
pv,
|
||||
nodeKey,
|
||||
proxy.NewLocalClientCreator(app),
|
||||
nm.DefaultGenesisDocProviderFunc(config),
|
||||
nm.DefaultDBProvider,
|
||||
nm.DefaultMetricsProvider(config.Instrumentation),
|
||||
logger)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create new Tendermint node")
|
||||
}
|
||||
|
||||
return node, nil
|
||||
}
|
||||
```
|
||||
|
||||
This is a huge blob of code, so let's break it down into pieces.
|
||||
|
||||
First, we initialize the Badger database and create an app instance:
|
||||
|
||||
```go
|
||||
db, err := badger.Open(badger.DefaultOptions("/tmp/badger"))
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to open badger db: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer db.Close()
|
||||
app := NewKVStoreApplication(db)
|
||||
```
|
||||
|
||||
Then we use it to create a Tendermint Core `Node` instance:
|
||||
|
||||
```go
|
||||
flag.Parse()
|
||||
|
||||
node, err := newTendermint(app, configFile)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v", err)
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
...
|
||||
|
||||
// create node
|
||||
node, err := nm.NewNode(
|
||||
config,
|
||||
pv,
|
||||
nodeKey,
|
||||
proxy.NewLocalClientCreator(app),
|
||||
nm.DefaultGenesisDocProviderFunc(config),
|
||||
nm.DefaultDBProvider,
|
||||
nm.DefaultMetricsProvider(config.Instrumentation),
|
||||
logger)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create new Tendermint node")
|
||||
}
|
||||
```
|
||||
|
||||
`NewNode` requires a few things including a configuration file, a private
|
||||
validator, a node key and a few others in order to construct the full node.
|
||||
|
||||
Note we use `proxy.NewLocalClientCreator` here to create a local client instead
|
||||
of one communicating through a socket or gRPC.
|
||||
|
||||
[viper](https://github.com/spf13/viper) is being used for reading the config,
|
||||
which we will generate later using the `tendermint init` command.
|
||||
|
||||
```go
|
||||
config := cfg.DefaultConfig()
|
||||
config.RootDir = filepath.Dir(filepath.Dir(configFile))
|
||||
viper.SetConfigFile(configFile)
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
return nil, errors.Wrap(err, "viper failed to read config file")
|
||||
}
|
||||
if err := viper.Unmarshal(config); err != nil {
|
||||
return nil, errors.Wrap(err, "viper failed to unmarshal config")
|
||||
}
|
||||
if err := config.ValidateBasic(); err != nil {
|
||||
return nil, errors.Wrap(err, "config is invalid")
|
||||
}
|
||||
```
|
||||
|
||||
We use `FilePV`, which is a private validator (i.e. thing which signs consensus
|
||||
messages). Normally, you would use `SignerRemote` to connect to an external
|
||||
[HSM](https://kb.certus.one/hsm.html).
|
||||
|
||||
```go
|
||||
pv := privval.LoadFilePV(
|
||||
config.PrivValidatorKeyFile(),
|
||||
config.PrivValidatorStateFile(),
|
||||
)
|
||||
|
||||
```
|
||||
|
||||
`nodeKey` is needed to identify the node in a p2p network.
|
||||
|
||||
```go
|
||||
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to load node's key")
|
||||
}
|
||||
```
|
||||
|
||||
As for the logger, we use the build-in library, which provides a nice
|
||||
abstraction over [go-kit's
|
||||
logger](https://github.com/go-kit/kit/tree/master/log).
|
||||
|
||||
```go
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
var err error
|
||||
logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse log level")
|
||||
}
|
||||
```
|
||||
|
||||
Finally, we start the node and add some signal handling to gracefully stop it
|
||||
upon receiving SIGTERM or Ctrl-C.
|
||||
|
||||
```go
|
||||
node.Start()
|
||||
defer func() {
|
||||
node.Stop()
|
||||
node.Wait()
|
||||
}()
|
||||
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
<-c
|
||||
os.Exit(0)
|
||||
```
|
||||
|
||||
## 1.5 Getting Up and Running
|
||||
|
||||
We are going to use [Go modules](https://github.com/golang/go/wiki/Modules) for
|
||||
dependency management.
|
||||
|
||||
```sh
|
||||
$ export GO111MODULE=on
|
||||
$ go mod init github.com/me/example
|
||||
$ go build
|
||||
```
|
||||
|
||||
This should build the binary.
|
||||
|
||||
To create a default configuration, nodeKey and private validator files, let's
|
||||
execute `tendermint init`. But before we do that, we will need to install
|
||||
Tendermint Core.
|
||||
|
||||
```sh
|
||||
$ rm -rf /tmp/example
|
||||
$ cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
$ make install
|
||||
$ TMHOME="/tmp/example" tendermint init
|
||||
|
||||
I[2019-07-16|18:40:36.480] Generated private validator module=main keyFile=/tmp/example/config/priv_validator_key.json stateFile=/tmp/example2/data/priv_validator_state.json
|
||||
I[2019-07-16|18:40:36.481] Generated node key module=main path=/tmp/example/config/node_key.json
|
||||
I[2019-07-16|18:40:36.482] Generated genesis file module=main path=/tmp/example/config/genesis.json
|
||||
```
|
||||
|
||||
We are ready to start our application:
|
||||
|
||||
```sh
|
||||
$ ./example -config "/tmp/example/config/config.toml"
|
||||
|
||||
badger 2019/07/16 18:42:25 INFO: All 0 tables opened in 0s
|
||||
badger 2019/07/16 18:42:25 INFO: Replaying file id: 0 at offset: 0
|
||||
badger 2019/07/16 18:42:25 INFO: Replay took: 695.227s
|
||||
E[2019-07-16|18:42:25.818] Couldn't connect to any seeds module=p2p
|
||||
I[2019-07-16|18:42:26.853] Executed block module=state height=1 validTxs=0 invalidTxs=0
|
||||
I[2019-07-16|18:42:26.865] Committed state module=state height=1 txs=0 appHash=
|
||||
```
|
||||
|
||||
Now open another tab in your terminal and try sending a transaction:
|
||||
|
||||
```sh
|
||||
$ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"'
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": "",
|
||||
"result": {
|
||||
"check_tx": {
|
||||
"gasWanted": "1"
|
||||
},
|
||||
"deliver_tx": {},
|
||||
"hash": "1B3C5A1093DB952C331B1749A21DCCBB0F6C7F4E0055CD04D16346472FC60EC6",
|
||||
"height": "128"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Response should contain the height where this transaction was committed.
|
||||
|
||||
Now let's check if the given key now exists and its value:
|
||||
|
||||
```
|
||||
$ curl -s 'localhost:26657/abci_query?data="tendermint"'
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": "",
|
||||
"result": {
|
||||
"response": {
|
||||
"log": "exists",
|
||||
"key": "dGVuZGVybWludA==",
|
||||
"value": "cm9ja3M="
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
"dGVuZGVybWludA==" and "cm9ja3M=" are the base64-encoding of the ASCII of
|
||||
"tendermint" and "rocks" accordingly.
|
514
docs/guides/go.md
Normal file
514
docs/guides/go.md
Normal file
@ -0,0 +1,514 @@
|
||||
# 1 Guide Assumptions
|
||||
|
||||
This guide is designed for beginners who want to get started with a Tendermint
|
||||
Core application from scratch. It does not assume that you have any prior
|
||||
experience with Tendermint Core.
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state
|
||||
transition machine - written in any programming language - and securely
|
||||
replicates it on many machines.
|
||||
|
||||
Although Tendermint Core is written in the Golang programming language, prior
|
||||
knowledge of it is not required for this guide. You can learn it as we go due
|
||||
to it's simplicity. However, you may want to go through [Learn X in Y minutes
|
||||
Where X=Go](https://learnxinyminutes.com/docs/go/) first to familiarize
|
||||
yourself with the syntax.
|
||||
|
||||
By following along with this guide, you'll create a Tendermint Core project
|
||||
called kvstore, a (very) simple distributed BFT key-value store.
|
||||
|
||||
# 1 Creating an application in Go
|
||||
|
||||
To get maximum performance it is better to run your application alongside
|
||||
Tendermint Core. [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) is written
|
||||
this way. Please refer to [Writing a built-in Tendermint Core application in
|
||||
Go](./go-built-in.md) guide for details.
|
||||
|
||||
Having a separate application might give you better security guarantees as two
|
||||
processes would be communicating via established binary protocol. Tendermint
|
||||
Core will not have access to application's state.
|
||||
|
||||
## 1.1 Installing Go
|
||||
|
||||
Please refer to [the official guide for installing
|
||||
Go](https://golang.org/doc/install).
|
||||
|
||||
Verify that you have the latest version of Go installed:
|
||||
|
||||
```sh
|
||||
$ go version
|
||||
go version go1.12.7 darwin/amd64
|
||||
```
|
||||
|
||||
Make sure you have `$GOPATH` environment variable set:
|
||||
|
||||
```sh
|
||||
$ echo $GOPATH
|
||||
/Users/melekes/go
|
||||
```
|
||||
|
||||
## 1.2 Creating a new Go project
|
||||
|
||||
We'll start by creating a new Go project.
|
||||
|
||||
```sh
|
||||
$ mkdir -p $GOPATH/src/github.com/me/kvstore
|
||||
$ cd $GOPATH/src/github.com/me/kvstore
|
||||
```
|
||||
|
||||
Inside the example directory create a `main.go` file with the following content:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
fmt.Println("Hello, Tendermint Core")
|
||||
}
|
||||
```
|
||||
|
||||
When run, this should print "Hello, Tendermint Core" to the standard output.
|
||||
|
||||
```sh
|
||||
$ go run main.go
|
||||
Hello, Tendermint Core
|
||||
```
|
||||
|
||||
## 1.3 Writing a Tendermint Core application
|
||||
|
||||
Tendermint Core communicates with the application through the Application
|
||||
BlockChain Interface (ABCI). All message types are defined in the [protobuf
|
||||
file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto).
|
||||
This allows Tendermint Core to run applications written in any programming
|
||||
language.
|
||||
|
||||
Create a file called `app.go` with the following content:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
abcitypes "github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
type KVStoreApplication struct {}
|
||||
|
||||
var _ abcitypes.Application = (*KVStoreApplication)(nil)
|
||||
|
||||
func NewKVStoreApplication() *KVStoreApplication {
|
||||
return &KVStoreApplication{}
|
||||
}
|
||||
|
||||
func (KVStoreApplication) Info(req abcitypes.RequestInfo) abcitypes.ResponseInfo {
|
||||
return abcitypes.ResponseInfo{}
|
||||
}
|
||||
|
||||
func (KVStoreApplication) SetOption(req abcitypes.RequestSetOption) abcitypes.ResponseSetOption {
|
||||
return abcitypes.ResponseSetOption{}
|
||||
}
|
||||
|
||||
func (KVStoreApplication) DeliverTx(req abcitypes.RequestDeliverTx) abcitypes.ResponseDeliverTx {
|
||||
return abcitypes.ResponseDeliverTx{Code: 0}
|
||||
}
|
||||
|
||||
func (KVStoreApplication) CheckTx(req abcitypes.RequestCheckTx) abcitypes.ResponseCheckTx {
|
||||
return abcitypes.ResponseCheckTx{Code: 0}
|
||||
}
|
||||
|
||||
func (KVStoreApplication) Commit() abcitypes.ResponseCommit {
|
||||
return abcitypes.ResponseCommit{}
|
||||
}
|
||||
|
||||
func (KVStoreApplication) Query(req abcitypes.RequestQuery) abcitypes.ResponseQuery {
|
||||
return abcitypes.ResponseQuery{Code: 0}
|
||||
}
|
||||
|
||||
func (KVStoreApplication) InitChain(req abcitypes.RequestInitChain) abcitypes.ResponseInitChain {
|
||||
return abcitypes.ResponseInitChain{}
|
||||
}
|
||||
|
||||
func (KVStoreApplication) BeginBlock(req abcitypes.RequestBeginBlock) abcitypes.ResponseBeginBlock {
|
||||
return abcitypes.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
func (KVStoreApplication) EndBlock(req abcitypes.RequestEndBlock) abcitypes.ResponseEndBlock {
|
||||
return abcitypes.ResponseEndBlock{}
|
||||
}
|
||||
```
|
||||
|
||||
Now I will go through each method explaining when it's called and adding
|
||||
required business logic.
|
||||
|
||||
### 1.3.1 CheckTx
|
||||
|
||||
When a new transaction is added to the Tendermint Core, it will ask the
|
||||
application to check it (validate the format, signatures, etc.).
|
||||
|
||||
```go
|
||||
func (app *KVStoreApplication) isValid(tx []byte) (code uint32) {
|
||||
// check format
|
||||
parts := bytes.Split(tx, []byte("="))
|
||||
if len(parts) != 2 {
|
||||
return 1
|
||||
}
|
||||
|
||||
key, value := parts[0], parts[1]
|
||||
|
||||
// check if the same key=value already exists
|
||||
err := app.db.View(func(txn *badger.Txn) error {
|
||||
item, err := txn.Get(key)
|
||||
if err != nil && err != badger.ErrKeyNotFound {
|
||||
return err
|
||||
}
|
||||
if err == nil {
|
||||
return item.Value(func(val []byte) error {
|
||||
if bytes.Equal(val, value) {
|
||||
code = 2
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
||||
|
||||
func (app *KVStoreApplication) CheckTx(req abcitypes.RequestCheckTx) abcitypes.ResponseCheckTx {
|
||||
code := app.isValid(req.Tx)
|
||||
return abcitypes.ResponseCheckTx{Code: code, GasWanted: 1}
|
||||
}
|
||||
```
|
||||
|
||||
Don't worry if this does not compile yet.
|
||||
|
||||
If the transaction does not have a form of `{bytes}={bytes}`, we return `1`
|
||||
code. When the same key=value already exist (same key and value), we return `2`
|
||||
code. For others, we return a zero code indicating that they are valid.
|
||||
|
||||
Note that anything with non-zero code will be considered invalid (`-1`, `100`,
|
||||
etc.) by Tendermint Core.
|
||||
|
||||
Valid transactions will eventually be committed given they are not too big and
|
||||
have enough gas. To learn more about gas, check out ["the
|
||||
specification"](https://tendermint.com/docs/spec/abci/apps.html#gas).
|
||||
|
||||
For the underlying key-value store we'll use
|
||||
[badger](https://github.com/dgraph-io/badger), which is an embeddable,
|
||||
persistent and fast key-value (KV) database.
|
||||
|
||||
```go
|
||||
import "github.com/dgraph-io/badger"
|
||||
|
||||
type KVStoreApplication struct {
|
||||
db *badger.DB
|
||||
currentBatch *badger.Txn
|
||||
}
|
||||
|
||||
func NewKVStoreApplication(db *badger.DB) *KVStoreApplication {
|
||||
return &KVStoreApplication{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 1.3.2 BeginBlock -> DeliverTx -> EndBlock -> Commit
|
||||
|
||||
When Tendermint Core has decided on the block, it's transfered to the
|
||||
application in 3 parts: `BeginBlock`, one `DeliverTx` per transaction and
|
||||
`EndBlock` in the end. DeliverTx are being transfered asynchronously, but the
|
||||
responses are expected to come in order.
|
||||
|
||||
```
|
||||
func (app *KVStoreApplication) BeginBlock(req abcitypes.RequestBeginBlock) abcitypes.ResponseBeginBlock {
|
||||
app.currentBatch = app.db.NewTransaction(true)
|
||||
return abcitypes.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Here we create a batch, which will store block's transactions.
|
||||
|
||||
```go
|
||||
func (app *KVStoreApplication) DeliverTx(req abcitypes.RequestDeliverTx) abcitypes.ResponseDeliverTx {
|
||||
code := app.isValid(req.Tx)
|
||||
if code != 0 {
|
||||
return abcitypes.ResponseDeliverTx{Code: code}
|
||||
}
|
||||
|
||||
parts := bytes.Split(req.Tx, []byte("="))
|
||||
key, value := parts[0], parts[1]
|
||||
|
||||
err := app.currentBatch.Set(key, value)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return abcitypes.ResponseDeliverTx{Code: 0}
|
||||
}
|
||||
```
|
||||
|
||||
If the transaction is badly formatted or the same key=value already exist, we
|
||||
again return the non-zero code. Otherwise, we add it to the current batch.
|
||||
|
||||
In the current design, a block can include incorrect transactions (those who
|
||||
passed CheckTx, but failed DeliverTx or transactions included by the proposer
|
||||
directly). This is done for performance reasons.
|
||||
|
||||
Note we can't commit transactions inside the `DeliverTx` because in such case
|
||||
`Query`, which may be called in parallel, will return inconsistent data (i.e.
|
||||
it will report that some value already exist even when the actual block was not
|
||||
yet committed).
|
||||
|
||||
`Commit` instructs the application to persist the new state.
|
||||
|
||||
```go
|
||||
func (app *KVStoreApplication) Commit() abcitypes.ResponseCommit {
|
||||
app.currentBatch.Commit()
|
||||
return abcitypes.ResponseCommit{Data: []byte{}}
|
||||
}
|
||||
```
|
||||
|
||||
### 1.3.3 Query
|
||||
|
||||
Now, when the client wants to know whenever a particular key/value exist, it
|
||||
will call Tendermint Core RPC `/abci_query` endpoint, which in turn will call
|
||||
the application's `Query` method.
|
||||
|
||||
Applications are free to provide their own APIs. But by using Tendermint Core
|
||||
as a proxy, clients (including [light client
|
||||
package](https://godoc.org/github.com/tendermint/tendermint/lite)) can leverage
|
||||
the unified API across different applications. Plus they won't have to call the
|
||||
otherwise separate Tendermint Core API for additional proofs.
|
||||
|
||||
Note we don't include a proof here.
|
||||
|
||||
```go
|
||||
func (app *KVStoreApplication) Query(reqQuery abcitypes.RequestQuery) (resQuery abcitypes.ResponseQuery) {
|
||||
resQuery.Key = reqQuery.Data
|
||||
err := app.db.View(func(txn *badger.Txn) error {
|
||||
item, err := txn.Get(reqQuery.Data)
|
||||
if err != nil && err != badger.ErrKeyNotFound {
|
||||
return err
|
||||
}
|
||||
if err == badger.ErrKeyNotFound {
|
||||
resQuery.Log = "does not exist"
|
||||
} else {
|
||||
return item.Value(func(val []byte) error {
|
||||
resQuery.Log = "exists"
|
||||
resQuery.Value = val
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
```
|
||||
|
||||
The complete specification can be found
|
||||
[here](https://tendermint.com/docs/spec/abci/).
|
||||
|
||||
## 1.4 Starting an application and a Tendermint Core instances
|
||||
|
||||
Put the following code into the "main.go" file:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/dgraph-io/badger"
|
||||
|
||||
abciserver "github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
var socketAddr string
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&socketAddr, "socket-addr", "unix://example.sock", "Unix domain socket address")
|
||||
}
|
||||
|
||||
func main() {
|
||||
db, err := badger.Open(badger.DefaultOptions("/tmp/badger"))
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to open badger db: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer db.Close()
|
||||
app := NewKVStoreApplication(db)
|
||||
|
||||
flag.Parse()
|
||||
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
server := abciserver.NewSocketServer(socketAddr, app)
|
||||
server.SetLogger(logger)
|
||||
if err := server.Start(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error starting socket server: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer server.Stop()
|
||||
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
<-c
|
||||
os.Exit(0)
|
||||
}
|
||||
```
|
||||
|
||||
This is a huge blob of code, so let's break it down into pieces.
|
||||
|
||||
First, we initialize the Badger database and create an app instance:
|
||||
|
||||
```go
|
||||
db, err := badger.Open(badger.DefaultOptions("/tmp/badger"))
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to open badger db: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer db.Close()
|
||||
app := NewKVStoreApplication(db)
|
||||
```
|
||||
|
||||
Then we start the ABCI server and add some signal handling to gracefully stop
|
||||
it upon receiving SIGTERM or Ctrl-C. Tendermint Core will act as a client,
|
||||
which connects to our server and send us transactions and other messages.
|
||||
|
||||
```go
|
||||
server := abciserver.NewSocketServer(socketAddr, app)
|
||||
server.SetLogger(logger)
|
||||
if err := server.Start(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error starting socket server: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer server.Stop()
|
||||
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
<-c
|
||||
os.Exit(0)
|
||||
```
|
||||
|
||||
## 1.5 Getting Up and Running
|
||||
|
||||
We are going to use [Go modules](https://github.com/golang/go/wiki/Modules) for
|
||||
dependency management.
|
||||
|
||||
```sh
|
||||
$ export GO111MODULE=on
|
||||
$ go mod init github.com/me/example
|
||||
$ go build
|
||||
```
|
||||
|
||||
This should build the binary.
|
||||
|
||||
To create a default configuration, nodeKey and private validator files, let's
|
||||
execute `tendermint init`. But before we do that, we will need to install
|
||||
Tendermint Core.
|
||||
|
||||
```sh
|
||||
$ rm -rf /tmp/example
|
||||
$ cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
$ make install
|
||||
$ TMHOME="/tmp/example" tendermint init
|
||||
|
||||
I[2019-07-16|18:20:36.480] Generated private validator module=main keyFile=/tmp/example/config/priv_validator_key.json stateFile=/tmp/example2/data/priv_validator_state.json
|
||||
I[2019-07-16|18:20:36.481] Generated node key module=main path=/tmp/example/config/node_key.json
|
||||
I[2019-07-16|18:20:36.482] Generated genesis file module=main path=/tmp/example/config/genesis.json
|
||||
```
|
||||
|
||||
Feel free to explore the generated files, which can be found at
|
||||
`/tmp/example/config` directory. Documentation on the config can be found
|
||||
[here](https://tendermint.com/docs/tendermint-core/configuration.html).
|
||||
|
||||
We are ready to start our application:
|
||||
|
||||
```sh
|
||||
$ rm example.sock
|
||||
$ ./example
|
||||
|
||||
badger 2019/07/16 18:25:11 INFO: All 0 tables opened in 0s
|
||||
badger 2019/07/16 18:25:11 INFO: Replaying file id: 0 at offset: 0
|
||||
badger 2019/07/16 18:25:11 INFO: Replay took: 300.4s
|
||||
I[2019-07-16|18:25:11.523] Starting ABCIServer impl=ABCIServ
|
||||
```
|
||||
|
||||
Then we need to start Tendermint Core and point it to our application. Staying
|
||||
within the application directory execute:
|
||||
|
||||
```sh
|
||||
$ TMHOME="/tmp/example" tendermint node --proxy_app=unix://example.sock
|
||||
|
||||
I[2019-07-16|18:26:20.362] Version info module=main software=0.32.1 block=10 p2p=7
|
||||
I[2019-07-16|18:26:20.383] Starting Node module=main impl=Node
|
||||
E[2019-07-16|18:26:20.392] Couldn't connect to any seeds module=p2p
|
||||
I[2019-07-16|18:26:20.394] Started node module=main nodeInfo="{ProtocolVersion:{P2P:7 Block:10 App:0} ID_:8dab80770ae8e295d4ce905d86af78c4ff634b79 ListenAddr:tcp://0.0.0.0:26656 Network:test-chain-nIO96P Version:0.32.1 Channels:4020212223303800 Moniker:app48.fun-box.ru Other:{TxIndex:on RPCAddress:tcp://127.0.0.1:26657}}"
|
||||
I[2019-07-16|18:26:21.440] Executed block module=state height=1 validTxs=0 invalidTxs=0
|
||||
I[2019-07-16|18:26:21.446] Committed state module=state height=1 txs=0 appHash=
|
||||
```
|
||||
|
||||
This should start the full node and connect to our ABCI application.
|
||||
|
||||
```
|
||||
I[2019-07-16|18:25:11.525] Waiting for new connection...
|
||||
I[2019-07-16|18:26:20.329] Accepted a new connection
|
||||
I[2019-07-16|18:26:20.329] Waiting for new connection...
|
||||
I[2019-07-16|18:26:20.330] Accepted a new connection
|
||||
I[2019-07-16|18:26:20.330] Waiting for new connection...
|
||||
I[2019-07-16|18:26:20.330] Accepted a new connection
|
||||
```
|
||||
|
||||
Now open another tab in your terminal and try sending a transaction:
|
||||
|
||||
```sh
|
||||
$ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"'
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": "",
|
||||
"result": {
|
||||
"check_tx": {
|
||||
"gasWanted": "1"
|
||||
},
|
||||
"deliver_tx": {},
|
||||
"hash": "CDD3C6DFA0A08CAEDF546F9938A2EEC232209C24AA0E4201194E0AFB78A2C2BB",
|
||||
"height": "33"
|
||||
}
|
||||
```
|
||||
|
||||
Response should contain the height where this transaction was committed.
|
||||
|
||||
Now let's check if the given key now exists and its value:
|
||||
|
||||
```
|
||||
$ curl -s 'localhost:26657/abci_query?data="tendermint"'
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": "",
|
||||
"result": {
|
||||
"response": {
|
||||
"log": "exists",
|
||||
"key": "dGVuZGVybWludA==",
|
||||
"value": "cm9ja3My"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
"dGVuZGVybWludA==" and "cm9ja3M=" are the base64-encoding of the ASCII of
|
||||
"tendermint" and "rocks" accordingly.
|
@ -1,9 +1,9 @@
|
||||
# Install Tendermint
|
||||
|
||||
The fastest and easiest way to install the `tendermint` binary
|
||||
is to run [this script](https://github.com/tendermint/tendermint/blob/develop/scripts/install/install_tendermint_ubuntu.sh) on
|
||||
is to run [this script](https://github.com/tendermint/tendermint/blob/master/scripts/install/install_tendermint_ubuntu.sh) on
|
||||
a fresh Ubuntu instance,
|
||||
or [this script](https://github.com/tendermint/tendermint/blob/develop/scripts/install/install_tendermint_bsd.sh)
|
||||
or [this script](https://github.com/tendermint/tendermint/blob/master/scripts/install/install_tendermint_bsd.sh)
|
||||
on a fresh FreeBSD instance. Read the comments / instructions carefully (i.e., reset your terminal after running the script,
|
||||
make sure you are okay with the network connections being made).
|
||||
|
||||
|
@ -122,7 +122,7 @@ consensus engine, and provides a particular application state.
|
||||
## ABCI Overview
|
||||
|
||||
The [Application BlockChain Interface
|
||||
(ABCI)](https://github.com/tendermint/tendermint/tree/develop/abci)
|
||||
(ABCI)](https://github.com/tendermint/tendermint/tree/master/abci)
|
||||
allows for Byzantine Fault Tolerant replication of applications
|
||||
written in any programming language.
|
||||
|
||||
@ -190,7 +190,7 @@ core to the application. The application replies with corresponding
|
||||
response messages.
|
||||
|
||||
The messages are specified here: [ABCI Message
|
||||
Types](https://github.com/tendermint/tendermint/blob/develop/abci/README.md#message-types).
|
||||
Types](https://github.com/tendermint/tendermint/blob/master/abci/README.md#message-types).
|
||||
|
||||
The **DeliverTx** message is the work horse of the application. Each
|
||||
transaction in the blockchain is delivered with this message. The
|
||||
|
@ -116,7 +116,7 @@ consensus engine, and provides a particular application state.
|
||||
## ABCI Overview
|
||||
|
||||
The [Application BlockChain Interface
|
||||
(ABCI)](https://github.com/tendermint/tendermint/tree/develop/abci)
|
||||
(ABCI)](https://github.com/tendermint/tendermint/tree/master/abci)
|
||||
allows for Byzantine Fault Tolerant replication of applications
|
||||
written in any programming language.
|
||||
|
||||
@ -184,7 +184,7 @@ core to the application. The application replies with corresponding
|
||||
response messages.
|
||||
|
||||
The messages are specified here: [ABCI Message
|
||||
Types](https://github.com/tendermint/tendermint/blob/develop/abci/README.md#message-types).
|
||||
Types](https://github.com/tendermint/tendermint/blob/master/abci/README.md#message-types).
|
||||
|
||||
The **DeliverTx** message is the work horse of the application. Each
|
||||
transaction in the blockchain is delivered with this message. The
|
||||
|
@ -78,9 +78,9 @@ cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
rm -rf ./build/node*
|
||||
```
|
||||
|
||||
## Configuring abci containers
|
||||
## Configuring abci containers
|
||||
|
||||
To use your own abci applications with 4-node setup edit the [docker-compose.yaml](https://github.com/tendermint/tendermint/blob/develop/docker-compose.yml) file and add image to your abci application.
|
||||
To use your own abci applications with 4-node setup edit the [docker-compose.yaml](https://github.com/tendermint/tendermint/blob/master/docker-compose.yml) file and add image to your abci application.
|
||||
|
||||
```
|
||||
abci0:
|
||||
@ -129,7 +129,7 @@ To use your own abci applications with 4-node setup edit the [docker-compose.yam
|
||||
|
||||
```
|
||||
|
||||
Override the [command](https://github.com/tendermint/tendermint/blob/master/networks/local/localnode/Dockerfile#L12) in each node to connect to it's abci.
|
||||
Override the [command](https://github.com/tendermint/tendermint/blob/master/networks/local/localnode/Dockerfile#L12) in each node to connect to it's abci.
|
||||
|
||||
```
|
||||
node0:
|
||||
|
@ -8,7 +8,7 @@ testnets on those servers.
|
||||
## Install
|
||||
|
||||
NOTE: see the [integration bash
|
||||
script](https://github.com/tendermint/tendermint/blob/develop/networks/remote/integration.sh)
|
||||
script](https://github.com/tendermint/tendermint/blob/master/networks/remote/integration.sh)
|
||||
that can be run on a fresh DO droplet and will automatically spin up a 4
|
||||
node testnet. The script more or less does everything described below.
|
||||
|
||||
|
@ -2,11 +2,11 @@
|
||||
|
||||
ABCI is the interface between Tendermint (a state-machine replication engine)
|
||||
and your application (the actual state machine). It consists of a set of
|
||||
*methods*, where each method has a corresponding `Request` and `Response`
|
||||
_methods_, where each method has a corresponding `Request` and `Response`
|
||||
message type. Tendermint calls the ABCI methods on the ABCI application by sending the `Request*`
|
||||
messages and receiving the `Response*` messages in return.
|
||||
|
||||
All message types are defined in a [protobuf file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto).
|
||||
All message types are defined in a [protobuf file](https://github.com/tendermint/tendermint/blob/master/abci/types/types.proto).
|
||||
This allows Tendermint to run applications written in any programming language.
|
||||
|
||||
This specification is split as follows:
|
||||
|
@ -3,9 +3,9 @@
|
||||
## Overview
|
||||
|
||||
The ABCI message types are defined in a [protobuf
|
||||
file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto).
|
||||
file](https://github.com/tendermint/tendermint/blob/master/abci/types/types.proto).
|
||||
|
||||
ABCI methods are split across 3 separate ABCI *connections*:
|
||||
ABCI methods are split across 3 separate ABCI _connections_:
|
||||
|
||||
- `Consensus Connection`: `InitChain, BeginBlock, DeliverTx, EndBlock, Commit`
|
||||
- `Mempool Connection`: `CheckTx`
|
||||
@ -85,7 +85,7 @@ Example:
|
||||
cmn.KVPair{Key: []byte("amount"), Value: []byte("...")},
|
||||
cmn.KVPair{Key: []byte("reason"), Value: []byte("...")},
|
||||
},
|
||||
},
|
||||
},
|
||||
// ...
|
||||
},
|
||||
}
|
||||
@ -115,19 +115,19 @@ non-determinism must be fixed and the nodes restarted.
|
||||
Sources of non-determinism in applications may include:
|
||||
|
||||
- Hardware failures
|
||||
- Cosmic rays, overheating, etc.
|
||||
- Cosmic rays, overheating, etc.
|
||||
- Node-dependent state
|
||||
- Random numbers
|
||||
- Time
|
||||
- Random numbers
|
||||
- Time
|
||||
- Underspecification
|
||||
- Library version changes
|
||||
- Race conditions
|
||||
- Floating point numbers
|
||||
- JSON serialization
|
||||
- Iterating through hash-tables/maps/dictionaries
|
||||
- Library version changes
|
||||
- Race conditions
|
||||
- Floating point numbers
|
||||
- JSON serialization
|
||||
- Iterating through hash-tables/maps/dictionaries
|
||||
- External Sources
|
||||
- Filesystem
|
||||
- Network calls (eg. some external REST API service)
|
||||
- Filesystem
|
||||
- Network calls (eg. some external REST API service)
|
||||
|
||||
See [#56](https://github.com/tendermint/abci/issues/56) for original discussion.
|
||||
|
||||
@ -240,9 +240,9 @@ Commit are included in the header of the next block.
|
||||
- `Path (string)`: Path of request, like an HTTP GET path. Can be
|
||||
used with or in liue of Data.
|
||||
- Apps MUST interpret '/store' as a query by key on the
|
||||
underlying store. The key SHOULD be specified in the Data field.
|
||||
underlying store. The key SHOULD be specified in the Data field.
|
||||
- Apps SHOULD allow queries over specific types like
|
||||
'/accounts/...' or '/votes/...'
|
||||
'/accounts/...' or '/votes/...'
|
||||
- `Height (int64)`: The block height for which you want the query
|
||||
(default=0 returns data for the latest committed block). Note
|
||||
that this is the height of the block containing the
|
||||
@ -269,7 +269,7 @@ Commit are included in the header of the next block.
|
||||
- Query for data from the application at current or past height.
|
||||
- Optionally return Merkle proof.
|
||||
- Merkle proof includes self-describing `type` field to support many types
|
||||
of Merkle trees and encoding formats.
|
||||
of Merkle trees and encoding formats.
|
||||
|
||||
### BeginBlock
|
||||
|
||||
@ -297,11 +297,9 @@ Commit are included in the header of the next block.
|
||||
- **Request**:
|
||||
- `Tx ([]byte)`: The request transaction bytes
|
||||
- `Type (CheckTxType)`: What type of `CheckTx` request is this? At present,
|
||||
there are two possible values: `CheckTx_Unchecked` (the default, which says
|
||||
that a full check is required), and `CheckTx_Checked` (when the mempool is
|
||||
there are two possible values: `CheckTx_New` (the default, which says
|
||||
that a full check is required), and `CheckTx_Recheck` (when the mempool is
|
||||
initiating a normal recheck of a transaction).
|
||||
- `AdditionalData ([]byte)`: Reserved for future use. See
|
||||
[here](https://github.com/tendermint/tendermint/issues/2127#issuecomment-456661420).
|
||||
- **Response**:
|
||||
- `Code (uint32)`: Response code
|
||||
- `Data ([]byte)`: Result bytes, if any.
|
||||
@ -486,7 +484,7 @@ Commit are included in the header of the next block.
|
||||
- `Votes ([]VoteInfo)`: List of validators addresses in the last validator set
|
||||
with their voting power and whether or not they signed a vote.
|
||||
|
||||
### ConsensusParams
|
||||
### ConsensusParams
|
||||
|
||||
- **Fields**:
|
||||
- `Block (BlockParams)`: Parameters limiting the size of a block and time between consecutive blocks.
|
||||
@ -500,17 +498,17 @@ Commit are included in the header of the next block.
|
||||
- `MaxBytes (int64)`: Max size of a block, in bytes.
|
||||
- `MaxGas (int64)`: Max sum of `GasWanted` in a proposed block.
|
||||
- NOTE: blocks that violate this may be committed if there are Byzantine proposers.
|
||||
It's the application's responsibility to handle this when processing a
|
||||
block!
|
||||
It's the application's responsibility to handle this when processing a
|
||||
block!
|
||||
|
||||
### EvidenceParams
|
||||
|
||||
- **Fields**:
|
||||
- `MaxAge (int64)`: Max age of evidence, in blocks. Evidence older than this
|
||||
is considered stale and ignored.
|
||||
- This should correspond with an app's "unbonding period" or other
|
||||
similar mechanism for handling Nothing-At-Stake attacks.
|
||||
- NOTE: this should change to time (instead of blocks)!
|
||||
- This should correspond with an app's "unbonding period" or other
|
||||
similar mechanism for handling Nothing-At-Stake attacks.
|
||||
- NOTE: this should change to time (instead of blocks)!
|
||||
|
||||
### ValidatorParams
|
||||
|
||||
@ -532,4 +530,3 @@ Commit are included in the header of the next block.
|
||||
- `Type (string)`: Type of Merkle proof and how it's encoded.
|
||||
- `Key ([]byte)`: Key in the Merkle tree that this proof is for.
|
||||
- `Data ([]byte)`: Encoded Merkle proof for the key.
|
||||
|
||||
|
@ -65,7 +65,10 @@ begin.
|
||||
After `Commit`, CheckTx is run again on all transactions that remain in the
|
||||
node's local mempool after filtering those included in the block. To prevent the
|
||||
mempool from rechecking all transactions every time a block is committed, set
|
||||
the configuration option `mempool.recheck=false`.
|
||||
the configuration option `mempool.recheck=false`. As of Tendermint v0.32.1,
|
||||
an additional `Type` parameter is made available to the CheckTx function that
|
||||
indicates whether an incoming transaction is new (`CheckTxType_New`), or a
|
||||
recheck (`CheckTxType_Recheck`).
|
||||
|
||||
Finally, the mempool will unlock and new transactions can be processed through CheckTx again.
|
||||
|
||||
|
@ -9,7 +9,7 @@ Applications](./apps.md).
|
||||
## Message Protocol
|
||||
|
||||
The message protocol consists of pairs of requests and responses defined in the
|
||||
[protobuf file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto).
|
||||
[protobuf file](https://github.com/tendermint/tendermint/blob/master/abci/types/types.proto).
|
||||
|
||||
Some messages have no fields, while others may include byte-arrays, strings, integers,
|
||||
or custom protobuf types.
|
||||
@ -33,9 +33,9 @@ The latter two can be tested using the `abci-cli` by setting the `--abci` flag
|
||||
appropriately (ie. to `socket` or `grpc`).
|
||||
|
||||
See examples, in various stages of maintenance, in
|
||||
[Go](https://github.com/tendermint/tendermint/tree/develop/abci/server),
|
||||
[Go](https://github.com/tendermint/tendermint/tree/master/abci/server),
|
||||
[JavaScript](https://github.com/tendermint/js-abci),
|
||||
[Python](https://github.com/tendermint/tendermint/tree/develop/abci/example/python3/abci),
|
||||
[Python](https://github.com/tendermint/tendermint/tree/master/abci/example/python3/abci),
|
||||
[C++](https://github.com/mdyring/cpp-tmsp), and
|
||||
[Java](https://github.com/jTendermint/jabci).
|
||||
|
||||
@ -44,14 +44,13 @@ See examples, in various stages of maintenance, in
|
||||
The simplest implementation uses function calls within Golang.
|
||||
This means ABCI applications written in Golang can be compiled with TendermintCore and run as a single binary.
|
||||
|
||||
|
||||
### GRPC
|
||||
|
||||
If GRPC is available in your language, this is the easiest approach,
|
||||
though it will have significant performance overhead.
|
||||
|
||||
To get started with GRPC, copy in the [protobuf
|
||||
file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto)
|
||||
file](https://github.com/tendermint/tendermint/blob/master/abci/types/types.proto)
|
||||
and compile it using the GRPC plugin for your language. For instance,
|
||||
for golang, the command is `protoc --go_out=plugins=grpc:. types.proto`.
|
||||
See the [grpc documentation for more details](http://www.grpc.io/docs/).
|
||||
@ -107,4 +106,4 @@ received or a block is committed.
|
||||
|
||||
It is unlikely that you will need to implement a client. For details of
|
||||
our client, see
|
||||
[here](https://github.com/tendermint/tendermint/tree/develop/abci/client).
|
||||
[here](https://github.com/tendermint/tendermint/tree/master/abci/client).
|
||||
|
@ -59,20 +59,20 @@ familiar with amino encoding.
|
||||
You can simply use below table and concatenate Prefix || Length (of raw bytes) || raw bytes
|
||||
( while || stands for byte concatenation here).
|
||||
|
||||
| Type | Name | Prefix | Length | Notes |
|
||||
| ------------------ | ----------------------------- | ---------- | -------- | ----- |
|
||||
| PubKeyEd25519 | tendermint/PubKeyEd25519 | 0x1624DE64 | 0x20 | |
|
||||
| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE987 | 0x21 | |
|
||||
| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288910 | 0x40 | |
|
||||
| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79B | 0x20 | |
|
||||
| PubKeyMultisigThreshold | tendermint/PubKeyMultisigThreshold | 0x22C1F7E2 | variable | |
|
||||
| Type | Name | Prefix | Length | Notes |
|
||||
| ----------------------- | ---------------------------------- | ---------- | -------- | ----- |
|
||||
| PubKeyEd25519 | tendermint/PubKeyEd25519 | 0x1624DE64 | 0x20 | |
|
||||
| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE987 | 0x21 | |
|
||||
| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288910 | 0x40 | |
|
||||
| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79B | 0x20 | |
|
||||
| PubKeyMultisigThreshold | tendermint/PubKeyMultisigThreshold | 0x22C1F7E2 | variable | |
|
||||
|
||||
### Example
|
||||
|
||||
For example, the 33-byte (or 0x21-byte in hex) Secp256k1 pubkey
|
||||
`020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9`
|
||||
would be encoded as
|
||||
`EB5AE98721020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9`
|
||||
`020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9`
|
||||
would be encoded as
|
||||
`EB5AE98721020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9`
|
||||
|
||||
### Key Types
|
||||
|
||||
@ -170,11 +170,11 @@ We use the RFC 6962 specification of a merkle tree, with sha256 as the hash func
|
||||
Merkle trees are used throughout Tendermint to compute a cryptographic digest of a data structure.
|
||||
The differences between RFC 6962 and the simplest form a merkle tree are that:
|
||||
|
||||
1) leaf nodes and inner nodes have different hashes.
|
||||
1. leaf nodes and inner nodes have different hashes.
|
||||
This is for "second pre-image resistance", to prevent the proof to an inner node being valid as the proof of a leaf.
|
||||
The leaf nodes are `SHA256(0x00 || leaf_data)`, and inner nodes are `SHA256(0x01 || left_hash || right_hash)`.
|
||||
|
||||
2) When the number of items isn't a power of two, the left half of the tree is as big as it could be.
|
||||
2. When the number of items isn't a power of two, the left half of the tree is as big as it could be.
|
||||
(The largest power of two less than the number of items) This allows new leaves to be added with less
|
||||
recomputation. For example:
|
||||
|
||||
@ -290,7 +290,7 @@ func computeHashFromAunts(index, total int, leafHash []byte, innerHashes [][]byt
|
||||
|
||||
### IAVL+ Tree
|
||||
|
||||
Because Tendermint only uses a Simple Merkle Tree, application developers are expect to use their own Merkle tree in their applications. For example, the IAVL+ Tree - an immutable self-balancing binary tree for persisting application state is used by the [Cosmos SDK](https://github.com/cosmos/cosmos-sdk/blob/develop/docs/sdk/core/multistore.md)
|
||||
Because Tendermint only uses a Simple Merkle Tree, application developers are expect to use their own Merkle tree in their applications. For example, the IAVL+ Tree - an immutable self-balancing binary tree for persisting application state is used by the [Cosmos SDK](https://github.com/cosmos/cosmos-sdk/blob/master/docs/clients/lite/specification.md)
|
||||
|
||||
## JSON
|
||||
|
||||
|
@ -73,11 +73,11 @@ parameters over each successive round.
|
||||
|(When +2/3 Precommits for block found) |
|
||||
v |
|
||||
+--------------------------------------------------------------------+
|
||||
| Commit |
|
||||
| |
|
||||
| * Set CommitTime = now; |
|
||||
| * Wait for block, then stage/save/commit block; |
|
||||
+--------------------------------------------------------------------+
|
||||
| Commit |
|
||||
| |
|
||||
| * Set CommitTime = now; |
|
||||
| * Wait for block, then stage/save/commit block; |
|
||||
+--------------------------------------------------------------------+
|
||||
```
|
||||
|
||||
# Background Gossip
|
||||
@ -120,7 +120,7 @@ A proposal is signed and published by the designated proposer at each
|
||||
round. The proposer is chosen by a deterministic and non-choking round
|
||||
robin selection algorithm that selects proposers in proportion to their
|
||||
voting power (see
|
||||
[implementation](https://github.com/tendermint/tendermint/blob/develop/types/validator_set.go)).
|
||||
[implementation](https://github.com/tendermint/tendermint/blob/master/types/validator_set.go)).
|
||||
|
||||
A proposal at `(H,R)` is composed of a block and an optional latest
|
||||
`PoLC-Round < R` which is included iff the proposer knows of one. This
|
||||
@ -131,13 +131,15 @@ liveness property.
|
||||
|
||||
### Propose Step (height:H,round:R)
|
||||
|
||||
Upon entering `Propose`: - The designated proposer proposes a block at
|
||||
`(H,R)`.
|
||||
Upon entering `Propose`:
|
||||
- The designated proposer proposes a block at `(H,R)`.
|
||||
|
||||
The `Propose` step ends: - After `timeoutProposeR` after entering
|
||||
`Propose`. --> goto `Prevote(H,R)` - After receiving proposal block
|
||||
and all prevotes at `PoLC-Round`. --> goto `Prevote(H,R)` - After
|
||||
[common exit conditions](#common-exit-conditions)
|
||||
The `Propose` step ends:
|
||||
- After `timeoutProposeR` after entering `Propose`. --> goto
|
||||
`Prevote(H,R)`
|
||||
- After receiving proposal block and all prevotes at `PoLC-Round`. -->
|
||||
goto `Prevote(H,R)`
|
||||
- After [common exit conditions](#common-exit-conditions)
|
||||
|
||||
### Prevote Step (height:H,round:R)
|
||||
|
||||
@ -152,10 +154,12 @@ Upon entering `Prevote`, each validator broadcasts its prevote vote.
|
||||
- Else, if the proposal is invalid or wasn't received on time, it
|
||||
prevotes `<nil>`.
|
||||
|
||||
The `Prevote` step ends: - After +2/3 prevotes for a particular block or
|
||||
`<nil>`. -->; goto `Precommit(H,R)` - After `timeoutPrevote` after
|
||||
receiving any +2/3 prevotes. --> goto `Precommit(H,R)` - After
|
||||
[common exit conditions](#common-exit-conditions)
|
||||
The `Prevote` step ends:
|
||||
- After +2/3 prevotes for a particular block or `<nil>`. -->; goto
|
||||
`Precommit(H,R)`
|
||||
- After `timeoutPrevote` after receiving any +2/3 prevotes. --> goto
|
||||
`Precommit(H,R)`
|
||||
- After [common exit conditions](#common-exit-conditions)
|
||||
|
||||
### Precommit Step (height:H,round:R)
|
||||
|
||||
@ -163,17 +167,19 @@ Upon entering `Precommit`, each validator broadcasts its precommit vote.
|
||||
|
||||
- If the validator has a PoLC at `(H,R)` for a particular block `B`, it
|
||||
(re)locks (or changes lock to) and precommits `B` and sets
|
||||
`LastLockRound = R`. - Else, if the validator has a PoLC at `(H,R)` for
|
||||
`<nil>`, it unlocks and precommits `<nil>`. - Else, it keeps the lock
|
||||
unchanged and precommits `<nil>`.
|
||||
`LastLockRound = R`.
|
||||
- Else, if the validator has a PoLC at `(H,R)` for `<nil>`, it unlocks
|
||||
and precommits `<nil>`.
|
||||
- Else, it keeps the lock unchanged and precommits `<nil>`.
|
||||
|
||||
A precommit for `<nil>` means "I didn’t see a PoLC for this round, but I
|
||||
did get +2/3 prevotes and waited a bit".
|
||||
|
||||
The Precommit step ends: - After +2/3 precommits for `<nil>`. -->
|
||||
goto `Propose(H,R+1)` - After `timeoutPrecommit` after receiving any
|
||||
+2/3 precommits. --> goto `Propose(H,R+1)` - After [common exit
|
||||
conditions](#common-exit-conditions)
|
||||
The Precommit step ends:
|
||||
- After +2/3 precommits for `<nil>`. --> goto `Propose(H,R+1)`
|
||||
- After `timeoutPrecommit` after receiving any +2/3 precommits. --> goto
|
||||
`Propose(H,R+1)`
|
||||
- After [common exit conditions](#common-exit-conditions)
|
||||
|
||||
### Common exit conditions
|
||||
|
||||
|
@ -7,7 +7,7 @@ See [this issue](https://github.com/tendermint/tendermint/issues/1503)
|
||||
Mempool maintains a cache of the last 10000 transactions to prevent
|
||||
replaying old transactions (plus transactions coming from other
|
||||
validators, who are continually exchanging transactions). Read [Replay
|
||||
Protection](../../../../app-development.md#replay-protection)
|
||||
Protection](../../../app-dev/app-development.md#replay-protection)
|
||||
for details.
|
||||
|
||||
Sending incorrectly encoded data or data exceeding `maxMsgSize` will result
|
||||
|
@ -28,5 +28,5 @@ WAL. Then it will go to precommit, and that time it will work because the
|
||||
private validator contains the `LastSignBytes` and then we’ll replay the
|
||||
precommit from the WAL.
|
||||
|
||||
Make sure to read about [WAL corruption](../../../tendermint-core/running-in-production.md#wal-corruption)
|
||||
Make sure to read about [WAL corruption](../../tendermint-core/running-in-production.md#wal-corruption)
|
||||
and recovery strategies.
|
||||
|
@ -315,8 +315,7 @@ namespace = "tendermint"
|
||||
|
||||
If `create_empty_blocks` is set to `true` in your config, blocks will be
|
||||
created ~ every second (with default consensus parameters). You can regulate
|
||||
the delay between blocks by changing the `timeout_commit`. E.g. `timeout_commit
|
||||
= "10s"` should result in ~ 10 second blocks.
|
||||
the delay between blocks by changing the `timeout_commit`. E.g. `timeout_commit = "10s"` should result in ~ 10 second blocks.
|
||||
|
||||
**create_empty_blocks = false**
|
||||
|
||||
@ -342,7 +341,7 @@ Tendermint will only create blocks if there are transactions, or after waiting
|
||||
## Consensus timeouts explained
|
||||
|
||||
There's a variety of information about timeouts in [Running in
|
||||
production](./running-in-production.html)
|
||||
production](./running-in-production.md)
|
||||
|
||||
You can also find more detailed technical explanation in the spec: [The latest
|
||||
gossip on BFT consensus](https://arxiv.org/abs/1807.04938).
|
||||
|
@ -115,7 +115,7 @@ little overview what they do.
|
||||
- `abci-client` As mentioned in [Application Development Guide](../app-dev/app-development.md), Tendermint acts as an ABCI
|
||||
client with respect to the application and maintains 3 connections:
|
||||
mempool, consensus and query. The code used by Tendermint Core can
|
||||
be found [here](https://github.com/tendermint/tendermint/tree/develop/abci/client).
|
||||
be found [here](https://github.com/tendermint/tendermint/tree/master/abci/client).
|
||||
- `blockchain` Provides storage, pool (a group of peers), and reactor
|
||||
for both storing and exchanging blocks between peers.
|
||||
- `consensus` The heart of Tendermint core, which is the
|
||||
|
@ -4,4 +4,4 @@ The RPC documentation is hosted here:
|
||||
|
||||
- [https://tendermint.com/rpc/](https://tendermint.com/rpc/)
|
||||
|
||||
To update the documentation, edit the relevant `godoc` comments in the [rpc/core directory](https://github.com/tendermint/tendermint/tree/develop/rpc/core).
|
||||
To update the documentation, edit the relevant `godoc` comments in the [rpc/core directory](https://github.com/tendermint/tendermint/tree/master/rpc/core).
|
||||
|
@ -20,7 +20,7 @@ Initialize the root directory by running:
|
||||
tendermint init
|
||||
```
|
||||
|
||||
This will create a new private key (`priv_validator.json`), and a
|
||||
This will create a new private key (`priv_validator_key.json`), and a
|
||||
genesis file (`genesis.json`) containing the associated public key, in
|
||||
`$TMHOME/config`. This is all that's necessary to run a local testnet
|
||||
with one validator.
|
||||
@ -43,6 +43,11 @@ definition](https://github.com/tendermint/tendermint/blob/master/types/genesis.g
|
||||
- `chain_id`: ID of the blockchain. This must be unique for
|
||||
every blockchain. If your testnet blockchains do not have unique
|
||||
chain IDs, you will have a bad time. The ChainID must be less than 50 symbols.
|
||||
- `consensus_params`
|
||||
- `block`
|
||||
- `time_iota_ms`: Minimum time increment between consecutive blocks (in
|
||||
milliseconds). If the block header timestamp is ahead of the system clock,
|
||||
decrease this value.
|
||||
- `validators`: List of initial validators. Note this may be overridden entirely by the
|
||||
application, and may be left empty to make explicit that the
|
||||
application will initialize the validator set with ResponseInitChain.
|
||||
@ -63,9 +68,10 @@ definition](https://github.com/tendermint/tendermint/blob/master/types/genesis.g
|
||||
"genesis_time": "2018-11-13T18:11:50.277637Z",
|
||||
"chain_id": "test-chain-s4ui7D",
|
||||
"consensus_params": {
|
||||
"block_size": {
|
||||
"block": {
|
||||
"max_bytes": "22020096",
|
||||
"max_gas": "-1"
|
||||
"max_gas": "-1",
|
||||
"time_iota_ms": "1000"
|
||||
},
|
||||
"evidence": {
|
||||
"max_age": "100000"
|
||||
@ -308,7 +314,7 @@ write-ahead-log](../tendermint-core/running-in-production.md#mempool-wal)
|
||||
## Tendermint Networks
|
||||
|
||||
When `tendermint init` is run, both a `genesis.json` and
|
||||
`priv_validator.json` are created in `~/.tendermint/config`. The
|
||||
`priv_validator_key.json` are created in `~/.tendermint/config`. The
|
||||
`genesis.json` might look like:
|
||||
|
||||
```
|
||||
@ -329,7 +335,7 @@ When `tendermint init` is run, both a `genesis.json` and
|
||||
}
|
||||
```
|
||||
|
||||
And the `priv_validator.json`:
|
||||
And the `priv_validator_key.json`:
|
||||
|
||||
```
|
||||
{
|
||||
@ -348,20 +354,20 @@ And the `priv_validator.json`:
|
||||
}
|
||||
```
|
||||
|
||||
The `priv_validator.json` actually contains a private key, and should
|
||||
The `priv_validator_key.json` actually contains a private key, and should
|
||||
thus be kept absolutely secret; for now we work with the plain text.
|
||||
Note the `last_` fields, which are used to prevent us from signing
|
||||
conflicting messages.
|
||||
|
||||
Note also that the `pub_key` (the public key) in the
|
||||
`priv_validator.json` is also present in the `genesis.json`.
|
||||
`priv_validator_key.json` is also present in the `genesis.json`.
|
||||
|
||||
The genesis file contains the list of public keys which may participate
|
||||
in the consensus, and their corresponding voting power. Greater than 2/3
|
||||
of the voting power must be active (i.e. the corresponding private keys
|
||||
must be producing signatures) for the consensus to make progress. In our
|
||||
case, the genesis file contains the public key of our
|
||||
`priv_validator.json`, so a Tendermint node started with the default
|
||||
`priv_validator_key.json`, so a Tendermint node started with the default
|
||||
root directory will be able to make progress. Voting power uses an int64
|
||||
but must be positive, thus the range is: 0 through 9223372036854775807.
|
||||
Because of how the current proposer selection algorithm works, we do not
|
||||
@ -447,16 +453,16 @@ not connected to the other peer.
|
||||
|
||||
The easiest way to add new validators is to do it in the `genesis.json`,
|
||||
before starting the network. For instance, we could make a new
|
||||
`priv_validator.json`, and copy it's `pub_key` into the above genesis.
|
||||
`priv_validator_key.json`, and copy it's `pub_key` into the above genesis.
|
||||
|
||||
We can generate a new `priv_validator.json` with the command:
|
||||
We can generate a new `priv_validator_key.json` with the command:
|
||||
|
||||
```
|
||||
tendermint gen_validator
|
||||
```
|
||||
|
||||
Now we can update our genesis file. For instance, if the new
|
||||
`priv_validator.json` looks like:
|
||||
`priv_validator_key.json` looks like:
|
||||
|
||||
```
|
||||
{
|
||||
@ -504,7 +510,7 @@ then the new `genesis.json` will be:
|
||||
```
|
||||
|
||||
Update the `genesis.json` in `~/.tendermint/config`. Copy the genesis
|
||||
file and the new `priv_validator.json` to the `~/.tendermint/config` on
|
||||
file and the new `priv_validator_key.json` to the `~/.tendermint/config` on
|
||||
a new machine.
|
||||
|
||||
Now run `tendermint node` on both machines, and use either
|
||||
|
@ -5,8 +5,8 @@ import (
|
||||
"sync"
|
||||
|
||||
clist "github.com/tendermint/tendermint/libs/clist"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
dbm "github.com/tendermint/tm-cmn/db"
|
||||
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
|
@ -7,10 +7,10 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
dbm "github.com/tendermint/tm-cmn/db"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
@ -11,10 +11,10 @@ import (
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/crypto/secp256k1"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-cmn/db"
|
||||
)
|
||||
|
||||
// evidenceLogger is a TestingLogger which uses a different
|
||||
|
@ -3,8 +3,8 @@ package evidence
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-cmn/db"
|
||||
)
|
||||
|
||||
/*
|
||||
|
@ -4,8 +4,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-cmn/db"
|
||||
)
|
||||
|
||||
//-------------------------------------------
|
||||
|
20
go.mod
20
go.mod
@ -3,31 +3,26 @@ module github.com/tendermint/tendermint
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/VividCortex/gohistogram v1.0.0 // indirect
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect
|
||||
github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d
|
||||
github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a
|
||||
github.com/etcd-io/bbolt v1.3.2
|
||||
github.com/fortytw2/leaktest v1.2.0
|
||||
github.com/go-kit/kit v0.6.0
|
||||
github.com/go-logfmt/logfmt v0.3.0
|
||||
github.com/go-stack/stack v1.8.0 // indirect
|
||||
github.com/gogo/protobuf v1.2.1
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect
|
||||
github.com/golang/protobuf v1.3.0
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/google/gofuzz v1.0.0 // indirect
|
||||
github.com/gorilla/websocket v1.2.0
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jmhodges/levigo v1.0.0
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 // indirect
|
||||
github.com/magiconair/properties v1.8.0
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.1.2 // indirect
|
||||
github.com/pelletier/go-toml v1.2.0 // indirect
|
||||
github.com/pkg/errors v0.8.0
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/prometheus/client_golang v0.9.1
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 // indirect
|
||||
github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39 // indirect
|
||||
@ -40,12 +35,11 @@ require (
|
||||
github.com/spf13/jwalterweatherman v1.0.0 // indirect
|
||||
github.com/spf13/pflag v1.0.3 // indirect
|
||||
github.com/spf13/viper v1.0.0
|
||||
github.com/stretchr/testify v1.2.2
|
||||
github.com/syndtr/goleveldb v0.0.0-20181012014443-6b91fda63f2e
|
||||
github.com/stretchr/testify v1.3.0
|
||||
github.com/tendermint/go-amino v0.14.1
|
||||
go.etcd.io/bbolt v1.3.3 // indirect
|
||||
golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd
|
||||
github.com/tendermint/tm-cmn v0.0.0-20190716080004-dfcde30d5acb
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7
|
||||
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2 // indirect
|
||||
google.golang.org/grpc v1.13.0
|
||||
google.golang.org/grpc v1.22.0
|
||||
)
|
||||
|
45
go.sum
45
go.sum
@ -1,3 +1,4 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE=
|
||||
@ -15,11 +16,13 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/etcd-io/bbolt v1.3.2 h1:RLRQ0TKLX7DlBRXAJHvbmXL17Q3KNnTBtZ9B6Qo+/Y0=
|
||||
github.com/etcd-io/bbolt v1.3.2/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
|
||||
github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM=
|
||||
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
|
||||
github.com/fortytw2/leaktest v1.2.0 h1:cj6GCiwJDH7l3tMHLjZDo0QqPtrXJiWSI9JgpeQKw+Q=
|
||||
github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
@ -34,11 +37,14 @@ github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk=
|
||||
github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/gorilla/websocket v1.2.0 h1:VJtLvh6VQym50czpZzx07z/kw9EgAxI3x1ZB8taTMQQ=
|
||||
@ -71,8 +77,9 @@ github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1 h1:K47Rk0v/fkEfwfQet2KWhscE0cJzjgCCDBG2KHZoVno=
|
||||
@ -99,32 +106,49 @@ github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/viper v1.0.0 h1:RUA/ghS2i64rlnn4ydTfblY8Og8QzcPtCcHvgMn+w/I=
|
||||
github.com/spf13/viper v1.0.0/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/syndtr/goleveldb v0.0.0-20181012014443-6b91fda63f2e h1:91EeXI4y4ShkyzkMqZ7QP/ZTIqwXp3RuDu5WFzxcFAs=
|
||||
github.com/syndtr/goleveldb v0.0.0-20181012014443-6b91fda63f2e/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 h1:1oFLiOyVl+W7bnBzGhf7BbIv9loSFQcieWWYIjLqcAw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA=
|
||||
github.com/tendermint/go-amino v0.14.1 h1:o2WudxNfdLNBwMyl2dqOJxiro5rfrEaU0Ugs6offJMk=
|
||||
github.com/tendermint/go-amino v0.14.1/go.mod h1:i/UKE5Uocn+argJJBb12qTZsCDBcAYMbR92AaJVmKso=
|
||||
github.com/tendermint/tm-cmn v0.0.0-20190716080004-dfcde30d5acb h1:t/HdvqJc9e1iJDl+hf8wQKfOo40aen+Rkqh4AwEaNsI=
|
||||
github.com/tendermint/tm-cmn v0.0.0-20190716080004-dfcde30d5acb/go.mod h1:SLI3Mc+gRrorRsAXJArnHz4xmAdJT8O7Ns0NL4HslXE=
|
||||
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25 h1:jsG6UpNLt9iAsb0S2AGW28DveNzzgmbXR+ENoPjUeIU=
|
||||
golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2 h1:67iHsV9djwGdZpdZNbLuQj6FOzCaZe3w+vhLjn5AcFA=
|
||||
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/grpc v1.13.0 h1:bHIbVsCwmvbArgCJmLdgOdHFXlKqTOVjbibbS19cXHc=
|
||||
google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw=
|
||||
google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
@ -133,3 +157,4 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
@ -1,21 +0,0 @@
|
||||
machine:
|
||||
environment:
|
||||
GOPATH: "${HOME}/.go_workspace"
|
||||
PROJECT_PARENT_PATH: "$GOPATH/src/github.com/$CIRCLE_PROJECT_USERNAME"
|
||||
PROJECT_PATH: $GOPATH/src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME
|
||||
hosts:
|
||||
localhost: 127.0.0.1
|
||||
|
||||
dependencies:
|
||||
override:
|
||||
- mkdir -p "$PROJECT_PARENT_PATH"
|
||||
- ln -sf "$HOME/$CIRCLE_PROJECT_REPONAME/" "$PROJECT_PATH"
|
||||
post:
|
||||
- go version
|
||||
|
||||
test:
|
||||
override:
|
||||
- cd $PROJECT_PATH && make get_tools && bash ./test.sh
|
||||
post:
|
||||
- cd "$PROJECT_PATH" && bash <(curl -s https://codecov.io/bash) -f coverage.txt
|
||||
- cd "$PROJECT_PATH" && mv coverage.txt "${CIRCLE_ARTIFACTS}"
|
@ -1,43 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// TimeLayout helps to parse a date string of the format YYYY-MM-DD
|
||||
// Intended to be used with the following function:
|
||||
// time.Parse(TimeLayout, date)
|
||||
var TimeLayout = "2006-01-02" //this represents YYYY-MM-DD
|
||||
|
||||
// ParseDateRange parses a date range string of the format start:end
|
||||
// where the start and end date are of the format YYYY-MM-DD.
|
||||
// The parsed dates are time.Time and will return the zero time for
|
||||
// unbounded dates, ex:
|
||||
// unbounded start: :2000-12-31
|
||||
// unbounded end: 2000-12-31:
|
||||
func ParseDateRange(dateRange string) (startDate, endDate time.Time, err error) {
|
||||
dates := strings.Split(dateRange, ":")
|
||||
if len(dates) != 2 {
|
||||
err = errors.New("bad date range, must be in format date:date")
|
||||
return
|
||||
}
|
||||
parseDate := func(date string) (out time.Time, err error) {
|
||||
if len(date) == 0 {
|
||||
return
|
||||
}
|
||||
out, err = time.Parse(TimeLayout, date)
|
||||
return
|
||||
}
|
||||
startDate, err = parseDate(dates[0])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
endDate, err = parseDate(dates[1])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
@ -1,46 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var (
|
||||
date = time.Date(2015, time.Month(12), 31, 0, 0, 0, 0, time.UTC)
|
||||
date2 = time.Date(2016, time.Month(12), 31, 0, 0, 0, 0, time.UTC)
|
||||
zero time.Time
|
||||
)
|
||||
|
||||
func TestParseDateRange(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
var testDates = []struct {
|
||||
dateStr string
|
||||
start time.Time
|
||||
end time.Time
|
||||
errNil bool
|
||||
}{
|
||||
{"2015-12-31:2016-12-31", date, date2, true},
|
||||
{"2015-12-31:", date, zero, true},
|
||||
{":2016-12-31", zero, date2, true},
|
||||
{"2016-12-31", zero, zero, false},
|
||||
{"2016-31-12:", zero, zero, false},
|
||||
{":2016-31-12", zero, zero, false},
|
||||
}
|
||||
|
||||
for _, test := range testDates {
|
||||
start, end, err := ParseDateRange(test.dateStr)
|
||||
if test.errNil {
|
||||
assert.Nil(err)
|
||||
testPtr := func(want, have time.Time) {
|
||||
assert.True(have.Equal(want))
|
||||
}
|
||||
testPtr(test.start, start)
|
||||
testPtr(test.end, end)
|
||||
} else {
|
||||
assert.NotNil(err)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,125 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"container/heap"
|
||||
)
|
||||
|
||||
/*
|
||||
Example usage:
|
||||
|
||||
```
|
||||
h := NewHeap()
|
||||
|
||||
h.Push("msg1", 1)
|
||||
h.Push("msg3", 3)
|
||||
h.Push("msg2", 2)
|
||||
|
||||
fmt.Println(h.Pop()) // msg1
|
||||
fmt.Println(h.Pop()) // msg2
|
||||
fmt.Println(h.Pop()) // msg3
|
||||
```
|
||||
*/
|
||||
type Heap struct {
|
||||
pq priorityQueue
|
||||
}
|
||||
|
||||
func NewHeap() *Heap {
|
||||
return &Heap{pq: make([]*pqItem, 0)}
|
||||
}
|
||||
|
||||
func (h *Heap) Len() int64 {
|
||||
return int64(len(h.pq))
|
||||
}
|
||||
|
||||
func (h *Heap) Push(value interface{}, priority int) {
|
||||
heap.Push(&h.pq, &pqItem{value: value, priority: cmpInt(priority)})
|
||||
}
|
||||
|
||||
func (h *Heap) PushBytes(value interface{}, priority []byte) {
|
||||
heap.Push(&h.pq, &pqItem{value: value, priority: cmpBytes(priority)})
|
||||
}
|
||||
|
||||
func (h *Heap) PushComparable(value interface{}, priority Comparable) {
|
||||
heap.Push(&h.pq, &pqItem{value: value, priority: priority})
|
||||
}
|
||||
|
||||
func (h *Heap) Peek() interface{} {
|
||||
if len(h.pq) == 0 {
|
||||
return nil
|
||||
}
|
||||
return h.pq[0].value
|
||||
}
|
||||
|
||||
func (h *Heap) Update(value interface{}, priority Comparable) {
|
||||
h.pq.Update(h.pq[0], value, priority)
|
||||
}
|
||||
|
||||
func (h *Heap) Pop() interface{} {
|
||||
item := heap.Pop(&h.pq).(*pqItem)
|
||||
return item.value
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// From: http://golang.org/pkg/container/heap/#example__priorityQueue
|
||||
|
||||
type pqItem struct {
|
||||
value interface{}
|
||||
priority Comparable
|
||||
index int
|
||||
}
|
||||
|
||||
type priorityQueue []*pqItem
|
||||
|
||||
func (pq priorityQueue) Len() int { return len(pq) }
|
||||
|
||||
func (pq priorityQueue) Less(i, j int) bool {
|
||||
return pq[i].priority.Less(pq[j].priority)
|
||||
}
|
||||
|
||||
func (pq priorityQueue) Swap(i, j int) {
|
||||
pq[i], pq[j] = pq[j], pq[i]
|
||||
pq[i].index = i
|
||||
pq[j].index = j
|
||||
}
|
||||
|
||||
func (pq *priorityQueue) Push(x interface{}) {
|
||||
n := len(*pq)
|
||||
item := x.(*pqItem)
|
||||
item.index = n
|
||||
*pq = append(*pq, item)
|
||||
}
|
||||
|
||||
func (pq *priorityQueue) Pop() interface{} {
|
||||
old := *pq
|
||||
n := len(old)
|
||||
item := old[n-1]
|
||||
item.index = -1 // for safety
|
||||
*pq = old[0 : n-1]
|
||||
return item
|
||||
}
|
||||
|
||||
func (pq *priorityQueue) Update(item *pqItem, value interface{}, priority Comparable) {
|
||||
item.value = value
|
||||
item.priority = priority
|
||||
heap.Fix(pq, item.index)
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
// Comparable
|
||||
|
||||
type Comparable interface {
|
||||
Less(o interface{}) bool
|
||||
}
|
||||
|
||||
type cmpInt int
|
||||
|
||||
func (i cmpInt) Less(o interface{}) bool {
|
||||
return int(i) < int(o.(cmpInt))
|
||||
}
|
||||
|
||||
type cmpBytes []byte
|
||||
|
||||
func (bz cmpBytes) Less(o interface{}) bool {
|
||||
return bytes.Compare([]byte(bz), []byte(o.(cmpBytes))) < 0
|
||||
}
|
@ -1,74 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
type PrefixedReader struct {
|
||||
Prefix []byte
|
||||
reader io.Reader
|
||||
}
|
||||
|
||||
func NewPrefixedReader(prefix []byte, reader io.Reader) *PrefixedReader {
|
||||
return &PrefixedReader{prefix, reader}
|
||||
}
|
||||
|
||||
func (pr *PrefixedReader) Read(p []byte) (n int, err error) {
|
||||
if len(pr.Prefix) > 0 {
|
||||
read := copy(p, pr.Prefix)
|
||||
pr.Prefix = pr.Prefix[read:]
|
||||
return read, nil
|
||||
}
|
||||
return pr.reader.Read(p)
|
||||
}
|
||||
|
||||
// NOTE: Not goroutine safe
|
||||
type BufferCloser struct {
|
||||
bytes.Buffer
|
||||
Closed bool
|
||||
}
|
||||
|
||||
func NewBufferCloser(buf []byte) *BufferCloser {
|
||||
return &BufferCloser{
|
||||
*bytes.NewBuffer(buf),
|
||||
false,
|
||||
}
|
||||
}
|
||||
|
||||
func (bc *BufferCloser) Close() error {
|
||||
if bc.Closed {
|
||||
return errors.New("BufferCloser already closed")
|
||||
}
|
||||
bc.Closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bc *BufferCloser) Write(p []byte) (n int, err error) {
|
||||
if bc.Closed {
|
||||
return 0, errors.New("Cannot write to closed BufferCloser")
|
||||
}
|
||||
return bc.Buffer.Write(p)
|
||||
}
|
||||
|
||||
func (bc *BufferCloser) WriteByte(c byte) error {
|
||||
if bc.Closed {
|
||||
return errors.New("Cannot write to closed BufferCloser")
|
||||
}
|
||||
return bc.Buffer.WriteByte(c)
|
||||
}
|
||||
|
||||
func (bc *BufferCloser) WriteRune(r rune) (n int, err error) {
|
||||
if bc.Closed {
|
||||
return 0, errors.New("Cannot write to closed BufferCloser")
|
||||
}
|
||||
return bc.Buffer.WriteRune(r)
|
||||
}
|
||||
|
||||
func (bc *BufferCloser) WriteString(s string) (n int, err error) {
|
||||
if bc.Closed {
|
||||
return 0, errors.New("Cannot write to closed BufferCloser")
|
||||
}
|
||||
return bc.Buffer.WriteString(s)
|
||||
}
|
@ -1,39 +1,13 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var gopath string
|
||||
|
||||
// GoPath returns GOPATH env variable value. If it is not set, this function
|
||||
// will try to call `go env GOPATH` subcommand.
|
||||
func GoPath() string {
|
||||
if gopath != "" {
|
||||
return gopath
|
||||
}
|
||||
|
||||
path := os.Getenv("GOPATH")
|
||||
if len(path) == 0 {
|
||||
goCmd := exec.Command("go", "env", "GOPATH")
|
||||
out, err := goCmd.Output()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to determine gopath: %v", err))
|
||||
}
|
||||
path = string(out)
|
||||
}
|
||||
gopath = path
|
||||
return path
|
||||
}
|
||||
|
||||
type logger interface {
|
||||
Info(msg string, keyvals ...interface{})
|
||||
}
|
||||
@ -78,25 +52,6 @@ func EnsureDir(dir string, mode os.FileMode) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func IsDirEmpty(name string) (bool, error) {
|
||||
f, err := os.Open(name)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return true, err
|
||||
}
|
||||
// Otherwise perhaps a permission
|
||||
// error or some other error.
|
||||
return false, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
_, err = f.Readdirnames(1) // Or f.Readdir(1)
|
||||
if err == io.EOF {
|
||||
return true, nil
|
||||
}
|
||||
return false, err // Either not empty or error, suits both cases
|
||||
}
|
||||
|
||||
func FileExists(filePath string) bool {
|
||||
_, err := os.Stat(filePath)
|
||||
return !os.IsNotExist(err)
|
||||
@ -125,19 +80,3 @@ func MustWriteFile(filePath string, contents []byte, mode os.FileMode) {
|
||||
Exit(fmt.Sprintf("MustWriteFile failed: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
|
||||
func Prompt(prompt string, defaultValue string) (string, error) {
|
||||
fmt.Print(prompt)
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
line, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return defaultValue, err
|
||||
}
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
return defaultValue, nil
|
||||
}
|
||||
return line, nil
|
||||
}
|
||||
|
@ -1,46 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestOSGoPath(t *testing.T) {
|
||||
// restore original gopath upon exit
|
||||
path := os.Getenv("GOPATH")
|
||||
defer func() {
|
||||
_ = os.Setenv("GOPATH", path)
|
||||
}()
|
||||
|
||||
err := os.Setenv("GOPATH", "~/testgopath")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
path = GoPath()
|
||||
if path != "~/testgopath" {
|
||||
t.Fatalf("should get GOPATH env var value, got %v", path)
|
||||
}
|
||||
os.Unsetenv("GOPATH")
|
||||
|
||||
path = GoPath()
|
||||
if path != "~/testgopath" {
|
||||
t.Fatalf("subsequent calls should return the same value, got %v", path)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOSGoPathWithoutEnvVar(t *testing.T) {
|
||||
// restore original gopath upon exit
|
||||
path := os.Getenv("GOPATH")
|
||||
defer func() {
|
||||
_ = os.Setenv("GOPATH", path)
|
||||
}()
|
||||
|
||||
os.Unsetenv("GOPATH")
|
||||
// reset cache
|
||||
gopath = ""
|
||||
|
||||
path = GoPath()
|
||||
if path == "" || path == "~/testgopath" {
|
||||
t.Fatalf("should get nonempty result of calling go env GOPATH, got %v", path)
|
||||
}
|
||||
}
|
@ -1,223 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
func cleanupDBDir(dir, name string) {
|
||||
err := os.RemoveAll(filepath.Join(dir, name) + ".db")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testBackendGetSetDelete(t *testing.T, backend DBBackendType) {
|
||||
// Default
|
||||
dirname, err := ioutil.TempDir("", fmt.Sprintf("test_backend_%s_", backend))
|
||||
require.Nil(t, err)
|
||||
db := NewDB("testdb", backend, dirname)
|
||||
defer cleanupDBDir(dirname, "testdb")
|
||||
|
||||
// A nonexistent key should return nil, even if the key is empty
|
||||
require.Nil(t, db.Get([]byte("")))
|
||||
|
||||
// A nonexistent key should return nil, even if the key is nil
|
||||
require.Nil(t, db.Get(nil))
|
||||
|
||||
// A nonexistent key should return nil.
|
||||
key := []byte("abc")
|
||||
require.Nil(t, db.Get(key))
|
||||
|
||||
// Set empty value.
|
||||
db.Set(key, []byte(""))
|
||||
require.NotNil(t, db.Get(key))
|
||||
require.Empty(t, db.Get(key))
|
||||
|
||||
// Set nil value.
|
||||
db.Set(key, nil)
|
||||
require.NotNil(t, db.Get(key))
|
||||
require.Empty(t, db.Get(key))
|
||||
|
||||
// Delete.
|
||||
db.Delete(key)
|
||||
require.Nil(t, db.Get(key))
|
||||
}
|
||||
|
||||
func TestBackendsGetSetDelete(t *testing.T) {
|
||||
for dbType := range backends {
|
||||
testBackendGetSetDelete(t, dbType)
|
||||
}
|
||||
}
|
||||
|
||||
func withDB(t *testing.T, creator dbCreator, fn func(DB)) {
|
||||
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
|
||||
dir := os.TempDir()
|
||||
db, err := creator(name, dir)
|
||||
require.Nil(t, err)
|
||||
defer cleanupDBDir(dir, name)
|
||||
fn(db)
|
||||
db.Close()
|
||||
}
|
||||
|
||||
func TestBackendsNilKeys(t *testing.T) {
|
||||
|
||||
// Test all backends.
|
||||
for dbType, creator := range backends {
|
||||
withDB(t, creator, func(db DB) {
|
||||
t.Run(fmt.Sprintf("Testing %s", dbType), func(t *testing.T) {
|
||||
|
||||
// Nil keys are treated as the empty key for most operations.
|
||||
expect := func(key, value []byte) {
|
||||
if len(key) == 0 { // nil or empty
|
||||
assert.Equal(t, db.Get(nil), db.Get([]byte("")))
|
||||
assert.Equal(t, db.Has(nil), db.Has([]byte("")))
|
||||
}
|
||||
assert.Equal(t, db.Get(key), value)
|
||||
assert.Equal(t, db.Has(key), value != nil)
|
||||
}
|
||||
|
||||
// Not set
|
||||
expect(nil, nil)
|
||||
|
||||
// Set nil value
|
||||
db.Set(nil, nil)
|
||||
expect(nil, []byte(""))
|
||||
|
||||
// Set empty value
|
||||
db.Set(nil, []byte(""))
|
||||
expect(nil, []byte(""))
|
||||
|
||||
// Set nil, Delete nil
|
||||
db.Set(nil, []byte("abc"))
|
||||
expect(nil, []byte("abc"))
|
||||
db.Delete(nil)
|
||||
expect(nil, nil)
|
||||
|
||||
// Set nil, Delete empty
|
||||
db.Set(nil, []byte("abc"))
|
||||
expect(nil, []byte("abc"))
|
||||
db.Delete([]byte(""))
|
||||
expect(nil, nil)
|
||||
|
||||
// Set empty, Delete nil
|
||||
db.Set([]byte(""), []byte("abc"))
|
||||
expect(nil, []byte("abc"))
|
||||
db.Delete(nil)
|
||||
expect(nil, nil)
|
||||
|
||||
// Set empty, Delete empty
|
||||
db.Set([]byte(""), []byte("abc"))
|
||||
expect(nil, []byte("abc"))
|
||||
db.Delete([]byte(""))
|
||||
expect(nil, nil)
|
||||
|
||||
// SetSync nil, DeleteSync nil
|
||||
db.SetSync(nil, []byte("abc"))
|
||||
expect(nil, []byte("abc"))
|
||||
db.DeleteSync(nil)
|
||||
expect(nil, nil)
|
||||
|
||||
// SetSync nil, DeleteSync empty
|
||||
db.SetSync(nil, []byte("abc"))
|
||||
expect(nil, []byte("abc"))
|
||||
db.DeleteSync([]byte(""))
|
||||
expect(nil, nil)
|
||||
|
||||
// SetSync empty, DeleteSync nil
|
||||
db.SetSync([]byte(""), []byte("abc"))
|
||||
expect(nil, []byte("abc"))
|
||||
db.DeleteSync(nil)
|
||||
expect(nil, nil)
|
||||
|
||||
// SetSync empty, DeleteSync empty
|
||||
db.SetSync([]byte(""), []byte("abc"))
|
||||
expect(nil, []byte("abc"))
|
||||
db.DeleteSync([]byte(""))
|
||||
expect(nil, nil)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoLevelDBBackend(t *testing.T) {
|
||||
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
|
||||
db := NewDB(name, GoLevelDBBackend, "")
|
||||
defer cleanupDBDir("", name)
|
||||
|
||||
_, ok := db.(*GoLevelDB)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func TestDBIterator(t *testing.T) {
|
||||
for dbType := range backends {
|
||||
t.Run(fmt.Sprintf("%v", dbType), func(t *testing.T) {
|
||||
testDBIterator(t, dbType)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testDBIterator(t *testing.T, backend DBBackendType) {
|
||||
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
|
||||
dir := os.TempDir()
|
||||
db := NewDB(name, backend, dir)
|
||||
defer cleanupDBDir(dir, name)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
if i != 6 { // but skip 6.
|
||||
db.Set(int642Bytes(int64(i)), nil)
|
||||
}
|
||||
}
|
||||
|
||||
verifyIterator(t, db.Iterator(nil, nil), []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator")
|
||||
verifyIterator(t, db.ReverseIterator(nil, nil), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator")
|
||||
|
||||
verifyIterator(t, db.Iterator(nil, int642Bytes(0)), []int64(nil), "forward iterator to 0")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(10), nil), []int64(nil), "reverse iterator from 10 (ex)")
|
||||
|
||||
verifyIterator(t, db.Iterator(int642Bytes(0), nil), []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 0")
|
||||
verifyIterator(t, db.Iterator(int642Bytes(1), nil), []int64{1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 1")
|
||||
verifyIterator(t, db.ReverseIterator(nil, int642Bytes(10)), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 10 (ex)")
|
||||
verifyIterator(t, db.ReverseIterator(nil, int642Bytes(9)), []int64{8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 9 (ex)")
|
||||
verifyIterator(t, db.ReverseIterator(nil, int642Bytes(8)), []int64{7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 8 (ex)")
|
||||
|
||||
verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(6)), []int64{5}, "forward iterator from 5 to 6")
|
||||
verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(7)), []int64{5}, "forward iterator from 5 to 7")
|
||||
verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(8)), []int64{5, 7}, "forward iterator from 5 to 8")
|
||||
verifyIterator(t, db.Iterator(int642Bytes(6), int642Bytes(7)), []int64(nil), "forward iterator from 6 to 7")
|
||||
verifyIterator(t, db.Iterator(int642Bytes(6), int642Bytes(8)), []int64{7}, "forward iterator from 6 to 8")
|
||||
verifyIterator(t, db.Iterator(int642Bytes(7), int642Bytes(8)), []int64{7}, "forward iterator from 7 to 8")
|
||||
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(5)), []int64{4}, "reverse iterator from 5 (ex) to 4")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(6)), []int64{5, 4}, "reverse iterator from 6 (ex) to 4")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(7)), []int64{5, 4}, "reverse iterator from 7 (ex) to 4")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(5), int642Bytes(6)), []int64{5}, "reverse iterator from 6 (ex) to 5")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(5), int642Bytes(7)), []int64{5}, "reverse iterator from 7 (ex) to 5")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(6), int642Bytes(7)), []int64(nil), "reverse iterator from 7 (ex) to 6")
|
||||
|
||||
verifyIterator(t, db.Iterator(int642Bytes(0), int642Bytes(1)), []int64{0}, "forward iterator from 0 to 1")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(8), int642Bytes(9)), []int64{8}, "reverse iterator from 9 (ex) to 8")
|
||||
|
||||
verifyIterator(t, db.Iterator(int642Bytes(2), int642Bytes(4)), []int64{2, 3}, "forward iterator from 2 to 4")
|
||||
verifyIterator(t, db.Iterator(int642Bytes(4), int642Bytes(2)), []int64(nil), "forward iterator from 4 to 2")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(2), int642Bytes(4)), []int64{3, 2}, "reverse iterator from 4 (ex) to 2")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(2)), []int64(nil), "reverse iterator from 2 (ex) to 4")
|
||||
|
||||
}
|
||||
|
||||
func verifyIterator(t *testing.T, itr Iterator, expected []int64, msg string) {
|
||||
var list []int64
|
||||
for itr.Valid() {
|
||||
list = append(list, bytes2Int64(itr.Key()))
|
||||
itr.Next()
|
||||
}
|
||||
assert.Equal(t, expected, list, msg)
|
||||
}
|
@ -1,349 +0,0 @@
|
||||
// +build boltdb
|
||||
|
||||
package db
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/etcd-io/bbolt"
|
||||
)
|
||||
|
||||
var bucket = []byte("tm")
|
||||
|
||||
func init() {
|
||||
registerDBCreator(BoltDBBackend, func(name, dir string) (DB, error) {
|
||||
return NewBoltDB(name, dir)
|
||||
}, false)
|
||||
}
|
||||
|
||||
// BoltDB is a wrapper around etcd's fork of bolt
|
||||
// (https://github.com/etcd-io/bbolt).
|
||||
//
|
||||
// NOTE: All operations (including Set, Delete) are synchronous by default. One
|
||||
// can globally turn it off by using NoSync config option (not recommended).
|
||||
//
|
||||
// A single bucket ([]byte("tm")) is used per a database instance. This could
|
||||
// lead to performance issues when/if there will be lots of keys.
|
||||
type BoltDB struct {
|
||||
db *bbolt.DB
|
||||
}
|
||||
|
||||
// NewBoltDB returns a BoltDB with default options.
|
||||
func NewBoltDB(name, dir string) (DB, error) {
|
||||
return NewBoltDBWithOpts(name, dir, bbolt.DefaultOptions)
|
||||
}
|
||||
|
||||
// NewBoltDBWithOpts allows you to supply *bbolt.Options. ReadOnly: true is not
|
||||
// supported because NewBoltDBWithOpts creates a global bucket.
|
||||
func NewBoltDBWithOpts(name string, dir string, opts *bbolt.Options) (DB, error) {
|
||||
if opts.ReadOnly {
|
||||
return nil, errors.New("ReadOnly: true is not supported")
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(dir, name+".db")
|
||||
db, err := bbolt.Open(dbPath, os.ModePerm, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// create a global bucket
|
||||
err = db.Update(func(tx *bbolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists(bucket)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &BoltDB{db: db}, nil
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) Get(key []byte) (value []byte) {
|
||||
key = nonEmptyKey(nonNilBytes(key))
|
||||
err := bdb.db.View(func(tx *bbolt.Tx) error {
|
||||
b := tx.Bucket(bucket)
|
||||
if v := b.Get(key); v != nil {
|
||||
value = append([]byte{}, v...)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) Has(key []byte) bool {
|
||||
return bdb.Get(key) != nil
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) Set(key, value []byte) {
|
||||
key = nonEmptyKey(nonNilBytes(key))
|
||||
value = nonNilBytes(value)
|
||||
err := bdb.db.Update(func(tx *bbolt.Tx) error {
|
||||
b := tx.Bucket(bucket)
|
||||
return b.Put(key, value)
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) SetSync(key, value []byte) {
|
||||
bdb.Set(key, value)
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) Delete(key []byte) {
|
||||
key = nonEmptyKey(nonNilBytes(key))
|
||||
err := bdb.db.Update(func(tx *bbolt.Tx) error {
|
||||
return tx.Bucket(bucket).Delete(key)
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) DeleteSync(key []byte) {
|
||||
bdb.Delete(key)
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) Close() {
|
||||
bdb.db.Close()
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) Print() {
|
||||
stats := bdb.db.Stats()
|
||||
fmt.Printf("%v\n", stats)
|
||||
|
||||
err := bdb.db.View(func(tx *bbolt.Tx) error {
|
||||
tx.Bucket(bucket).ForEach(func(k, v []byte) error {
|
||||
fmt.Printf("[%X]:\t[%X]\n", k, v)
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) Stats() map[string]string {
|
||||
stats := bdb.db.Stats()
|
||||
m := make(map[string]string)
|
||||
|
||||
// Freelist stats
|
||||
m["FreePageN"] = fmt.Sprintf("%v", stats.FreePageN)
|
||||
m["PendingPageN"] = fmt.Sprintf("%v", stats.PendingPageN)
|
||||
m["FreeAlloc"] = fmt.Sprintf("%v", stats.FreeAlloc)
|
||||
m["FreelistInuse"] = fmt.Sprintf("%v", stats.FreelistInuse)
|
||||
|
||||
// Transaction stats
|
||||
m["TxN"] = fmt.Sprintf("%v", stats.TxN)
|
||||
m["OpenTxN"] = fmt.Sprintf("%v", stats.OpenTxN)
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// boltDBBatch stores key values in sync.Map and dumps them to the underlying
|
||||
// DB upon Write call.
|
||||
type boltDBBatch struct {
|
||||
db *BoltDB
|
||||
ops []operation
|
||||
}
|
||||
|
||||
// NewBatch returns a new batch.
|
||||
func (bdb *BoltDB) NewBatch() Batch {
|
||||
return &boltDBBatch{
|
||||
ops: nil,
|
||||
db: bdb,
|
||||
}
|
||||
}
|
||||
|
||||
// It is safe to modify the contents of the argument after Set returns but not
|
||||
// before.
|
||||
func (bdb *boltDBBatch) Set(key, value []byte) {
|
||||
bdb.ops = append(bdb.ops, operation{opTypeSet, key, value})
|
||||
}
|
||||
|
||||
// It is safe to modify the contents of the argument after Delete returns but
|
||||
// not before.
|
||||
func (bdb *boltDBBatch) Delete(key []byte) {
|
||||
bdb.ops = append(bdb.ops, operation{opTypeDelete, key, nil})
|
||||
}
|
||||
|
||||
// NOTE: the operation is synchronous (see BoltDB for reasons)
|
||||
func (bdb *boltDBBatch) Write() {
|
||||
err := bdb.db.db.Batch(func(tx *bbolt.Tx) error {
|
||||
b := tx.Bucket(bucket)
|
||||
for _, op := range bdb.ops {
|
||||
key := nonEmptyKey(nonNilBytes(op.key))
|
||||
switch op.opType {
|
||||
case opTypeSet:
|
||||
if putErr := b.Put(key, op.value); putErr != nil {
|
||||
return putErr
|
||||
}
|
||||
case opTypeDelete:
|
||||
if delErr := b.Delete(key); delErr != nil {
|
||||
return delErr
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (bdb *boltDBBatch) WriteSync() {
|
||||
bdb.Write()
|
||||
}
|
||||
|
||||
func (bdb *boltDBBatch) Close() {}
|
||||
|
||||
// WARNING: Any concurrent writes or reads will block until the iterator is
|
||||
// closed.
|
||||
func (bdb *BoltDB) Iterator(start, end []byte) Iterator {
|
||||
tx, err := bdb.db.Begin(false)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return newBoltDBIterator(tx, start, end, false)
|
||||
}
|
||||
|
||||
// WARNING: Any concurrent writes or reads will block until the iterator is
|
||||
// closed.
|
||||
func (bdb *BoltDB) ReverseIterator(start, end []byte) Iterator {
|
||||
tx, err := bdb.db.Begin(false)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return newBoltDBIterator(tx, start, end, true)
|
||||
}
|
||||
|
||||
// boltDBIterator allows you to iterate on range of keys/values given some
|
||||
// start / end keys (nil & nil will result in doing full scan).
|
||||
type boltDBIterator struct {
|
||||
tx *bbolt.Tx
|
||||
|
||||
itr *bbolt.Cursor
|
||||
start []byte
|
||||
end []byte
|
||||
|
||||
currentKey []byte
|
||||
currentValue []byte
|
||||
|
||||
isInvalid bool
|
||||
isReverse bool
|
||||
}
|
||||
|
||||
func newBoltDBIterator(tx *bbolt.Tx, start, end []byte, isReverse bool) *boltDBIterator {
|
||||
itr := tx.Bucket(bucket).Cursor()
|
||||
|
||||
var ck, cv []byte
|
||||
if isReverse {
|
||||
if end == nil {
|
||||
ck, cv = itr.Last()
|
||||
} else {
|
||||
_, _ = itr.Seek(end) // after key
|
||||
ck, cv = itr.Prev() // return to end key
|
||||
}
|
||||
} else {
|
||||
if start == nil {
|
||||
ck, cv = itr.First()
|
||||
} else {
|
||||
ck, cv = itr.Seek(start)
|
||||
}
|
||||
}
|
||||
|
||||
return &boltDBIterator{
|
||||
tx: tx,
|
||||
itr: itr,
|
||||
start: start,
|
||||
end: end,
|
||||
currentKey: ck,
|
||||
currentValue: cv,
|
||||
isReverse: isReverse,
|
||||
isInvalid: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (itr *boltDBIterator) Domain() ([]byte, []byte) {
|
||||
return itr.start, itr.end
|
||||
}
|
||||
|
||||
func (itr *boltDBIterator) Valid() bool {
|
||||
if itr.isInvalid {
|
||||
return false
|
||||
}
|
||||
|
||||
// iterated to the end of the cursor
|
||||
if len(itr.currentKey) == 0 {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
|
||||
if itr.isReverse {
|
||||
if itr.start != nil && bytes.Compare(itr.currentKey, itr.start) < 0 {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
if itr.end != nil && bytes.Compare(itr.end, itr.currentKey) <= 0 {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Valid
|
||||
return true
|
||||
}
|
||||
|
||||
func (itr *boltDBIterator) Next() {
|
||||
itr.assertIsValid()
|
||||
if itr.isReverse {
|
||||
itr.currentKey, itr.currentValue = itr.itr.Prev()
|
||||
} else {
|
||||
itr.currentKey, itr.currentValue = itr.itr.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func (itr *boltDBIterator) Key() []byte {
|
||||
itr.assertIsValid()
|
||||
return append([]byte{}, itr.currentKey...)
|
||||
}
|
||||
|
||||
func (itr *boltDBIterator) Value() []byte {
|
||||
itr.assertIsValid()
|
||||
var value []byte
|
||||
if itr.currentValue != nil {
|
||||
value = append([]byte{}, itr.currentValue...)
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (itr *boltDBIterator) Close() {
|
||||
err := itr.tx.Rollback()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (itr *boltDBIterator) assertIsValid() {
|
||||
if !itr.Valid() {
|
||||
panic("Boltdb-iterator is invalid")
|
||||
}
|
||||
}
|
||||
|
||||
// nonEmptyKey returns a []byte("nil") if key is empty.
|
||||
// WARNING: this may collude with "nil" user key!
|
||||
func nonEmptyKey(key []byte) []byte {
|
||||
if len(key) == 0 {
|
||||
return []byte("nil")
|
||||
}
|
||||
return key
|
||||
}
|
@ -1,37 +0,0 @@
|
||||
// +build boltdb
|
||||
|
||||
package db
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
func TestBoltDBNewBoltDB(t *testing.T) {
|
||||
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
|
||||
dir := os.TempDir()
|
||||
defer cleanupDBDir(dir, name)
|
||||
|
||||
db, err := NewBoltDB(name, dir)
|
||||
require.NoError(t, err)
|
||||
db.Close()
|
||||
}
|
||||
|
||||
func BenchmarkBoltDBRandomReadsWrites(b *testing.B) {
|
||||
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
|
||||
db, err := NewBoltDB(name, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
db.Close()
|
||||
cleanupDBDir("", name)
|
||||
}()
|
||||
|
||||
benchmarkRandomReadsWrites(b, db)
|
||||
}
|
@ -1,325 +0,0 @@
|
||||
// +build cleveldb
|
||||
|
||||
package db
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/jmhodges/levigo"
|
||||
)
|
||||
|
||||
func init() {
|
||||
dbCreator := func(name string, dir string) (DB, error) {
|
||||
return NewCLevelDB(name, dir)
|
||||
}
|
||||
registerDBCreator(CLevelDBBackend, dbCreator, false)
|
||||
}
|
||||
|
||||
var _ DB = (*CLevelDB)(nil)
|
||||
|
||||
type CLevelDB struct {
|
||||
db *levigo.DB
|
||||
ro *levigo.ReadOptions
|
||||
wo *levigo.WriteOptions
|
||||
woSync *levigo.WriteOptions
|
||||
}
|
||||
|
||||
func NewCLevelDB(name string, dir string) (*CLevelDB, error) {
|
||||
dbPath := filepath.Join(dir, name+".db")
|
||||
|
||||
opts := levigo.NewOptions()
|
||||
opts.SetCache(levigo.NewLRUCache(1 << 30))
|
||||
opts.SetCreateIfMissing(true)
|
||||
db, err := levigo.Open(dbPath, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ro := levigo.NewReadOptions()
|
||||
wo := levigo.NewWriteOptions()
|
||||
woSync := levigo.NewWriteOptions()
|
||||
woSync.SetSync(true)
|
||||
database := &CLevelDB{
|
||||
db: db,
|
||||
ro: ro,
|
||||
wo: wo,
|
||||
woSync: woSync,
|
||||
}
|
||||
return database, nil
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *CLevelDB) Get(key []byte) []byte {
|
||||
key = nonNilBytes(key)
|
||||
res, err := db.db.Get(db.ro, key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *CLevelDB) Has(key []byte) bool {
|
||||
return db.Get(key) != nil
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *CLevelDB) Set(key []byte, value []byte) {
|
||||
key = nonNilBytes(key)
|
||||
value = nonNilBytes(value)
|
||||
err := db.db.Put(db.wo, key, value)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *CLevelDB) SetSync(key []byte, value []byte) {
|
||||
key = nonNilBytes(key)
|
||||
value = nonNilBytes(value)
|
||||
err := db.db.Put(db.woSync, key, value)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *CLevelDB) Delete(key []byte) {
|
||||
key = nonNilBytes(key)
|
||||
err := db.db.Delete(db.wo, key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *CLevelDB) DeleteSync(key []byte) {
|
||||
key = nonNilBytes(key)
|
||||
err := db.db.Delete(db.woSync, key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (db *CLevelDB) DB() *levigo.DB {
|
||||
return db.db
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *CLevelDB) Close() {
|
||||
db.db.Close()
|
||||
db.ro.Close()
|
||||
db.wo.Close()
|
||||
db.woSync.Close()
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *CLevelDB) Print() {
|
||||
itr := db.Iterator(nil, nil)
|
||||
defer itr.Close()
|
||||
for ; itr.Valid(); itr.Next() {
|
||||
key := itr.Key()
|
||||
value := itr.Value()
|
||||
fmt.Printf("[%X]:\t[%X]\n", key, value)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *CLevelDB) Stats() map[string]string {
|
||||
keys := []string{
|
||||
"leveldb.aliveiters",
|
||||
"leveldb.alivesnaps",
|
||||
"leveldb.blockpool",
|
||||
"leveldb.cachedblock",
|
||||
"leveldb.num-files-at-level{n}",
|
||||
"leveldb.openedtables",
|
||||
"leveldb.sstables",
|
||||
"leveldb.stats",
|
||||
}
|
||||
|
||||
stats := make(map[string]string, len(keys))
|
||||
for _, key := range keys {
|
||||
str := db.db.PropertyValue(key)
|
||||
stats[key] = str
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Batch
|
||||
|
||||
// Implements DB.
|
||||
func (db *CLevelDB) NewBatch() Batch {
|
||||
batch := levigo.NewWriteBatch()
|
||||
return &cLevelDBBatch{db, batch}
|
||||
}
|
||||
|
||||
type cLevelDBBatch struct {
|
||||
db *CLevelDB
|
||||
batch *levigo.WriteBatch
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
func (mBatch *cLevelDBBatch) Set(key, value []byte) {
|
||||
mBatch.batch.Put(key, value)
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
func (mBatch *cLevelDBBatch) Delete(key []byte) {
|
||||
mBatch.batch.Delete(key)
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
func (mBatch *cLevelDBBatch) Write() {
|
||||
err := mBatch.db.db.Write(mBatch.db.wo, mBatch.batch)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
func (mBatch *cLevelDBBatch) WriteSync() {
|
||||
err := mBatch.db.db.Write(mBatch.db.woSync, mBatch.batch)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
func (mBatch *cLevelDBBatch) Close() {
|
||||
mBatch.batch.Close()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Iterator
|
||||
// NOTE This is almost identical to db/go_level_db.Iterator
|
||||
// Before creating a third version, refactor.
|
||||
|
||||
func (db *CLevelDB) Iterator(start, end []byte) Iterator {
|
||||
itr := db.db.NewIterator(db.ro)
|
||||
return newCLevelDBIterator(itr, start, end, false)
|
||||
}
|
||||
|
||||
func (db *CLevelDB) ReverseIterator(start, end []byte) Iterator {
|
||||
itr := db.db.NewIterator(db.ro)
|
||||
return newCLevelDBIterator(itr, start, end, true)
|
||||
}
|
||||
|
||||
var _ Iterator = (*cLevelDBIterator)(nil)
|
||||
|
||||
type cLevelDBIterator struct {
|
||||
source *levigo.Iterator
|
||||
start, end []byte
|
||||
isReverse bool
|
||||
isInvalid bool
|
||||
}
|
||||
|
||||
func newCLevelDBIterator(source *levigo.Iterator, start, end []byte, isReverse bool) *cLevelDBIterator {
|
||||
if isReverse {
|
||||
if end == nil {
|
||||
source.SeekToLast()
|
||||
} else {
|
||||
source.Seek(end)
|
||||
if source.Valid() {
|
||||
eoakey := source.Key() // end or after key
|
||||
if bytes.Compare(end, eoakey) <= 0 {
|
||||
source.Prev()
|
||||
}
|
||||
} else {
|
||||
source.SeekToLast()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if start == nil {
|
||||
source.SeekToFirst()
|
||||
} else {
|
||||
source.Seek(start)
|
||||
}
|
||||
}
|
||||
return &cLevelDBIterator{
|
||||
source: source,
|
||||
start: start,
|
||||
end: end,
|
||||
isReverse: isReverse,
|
||||
isInvalid: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (itr cLevelDBIterator) Domain() ([]byte, []byte) {
|
||||
return itr.start, itr.end
|
||||
}
|
||||
|
||||
func (itr cLevelDBIterator) Valid() bool {
|
||||
|
||||
// Once invalid, forever invalid.
|
||||
if itr.isInvalid {
|
||||
return false
|
||||
}
|
||||
|
||||
// Panic on DB error. No way to recover.
|
||||
itr.assertNoError()
|
||||
|
||||
// If source is invalid, invalid.
|
||||
if !itr.source.Valid() {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
|
||||
// If key is end or past it, invalid.
|
||||
var start = itr.start
|
||||
var end = itr.end
|
||||
var key = itr.source.Key()
|
||||
if itr.isReverse {
|
||||
if start != nil && bytes.Compare(key, start) < 0 {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
if end != nil && bytes.Compare(end, key) <= 0 {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// It's valid.
|
||||
return true
|
||||
}
|
||||
|
||||
func (itr cLevelDBIterator) Key() []byte {
|
||||
itr.assertNoError()
|
||||
itr.assertIsValid()
|
||||
return itr.source.Key()
|
||||
}
|
||||
|
||||
func (itr cLevelDBIterator) Value() []byte {
|
||||
itr.assertNoError()
|
||||
itr.assertIsValid()
|
||||
return itr.source.Value()
|
||||
}
|
||||
|
||||
func (itr cLevelDBIterator) Next() {
|
||||
itr.assertNoError()
|
||||
itr.assertIsValid()
|
||||
if itr.isReverse {
|
||||
itr.source.Prev()
|
||||
} else {
|
||||
itr.source.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func (itr cLevelDBIterator) Close() {
|
||||
itr.source.Close()
|
||||
}
|
||||
|
||||
func (itr cLevelDBIterator) assertNoError() {
|
||||
if err := itr.source.GetError(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (itr cLevelDBIterator) assertIsValid() {
|
||||
if !itr.Valid() {
|
||||
panic("cLevelDBIterator is invalid")
|
||||
}
|
||||
}
|
@ -1,110 +0,0 @@
|
||||
// +build cleveldb
|
||||
|
||||
package db
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
func BenchmarkRandomReadsWrites2(b *testing.B) {
|
||||
b.StopTimer()
|
||||
|
||||
numItems := int64(1000000)
|
||||
internal := map[int64]int64{}
|
||||
for i := 0; i < int(numItems); i++ {
|
||||
internal[int64(i)] = int64(0)
|
||||
}
|
||||
db, err := NewCLevelDB(fmt.Sprintf("test_%x", cmn.RandStr(12)), "")
|
||||
if err != nil {
|
||||
b.Fatal(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("ok, starting")
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Write something
|
||||
{
|
||||
idx := (int64(cmn.RandInt()) % numItems)
|
||||
internal[idx]++
|
||||
val := internal[idx]
|
||||
idxBytes := int642Bytes(int64(idx))
|
||||
valBytes := int642Bytes(int64(val))
|
||||
//fmt.Printf("Set %X -> %X\n", idxBytes, valBytes)
|
||||
db.Set(
|
||||
idxBytes,
|
||||
valBytes,
|
||||
)
|
||||
}
|
||||
// Read something
|
||||
{
|
||||
idx := (int64(cmn.RandInt()) % numItems)
|
||||
val := internal[idx]
|
||||
idxBytes := int642Bytes(int64(idx))
|
||||
valBytes := db.Get(idxBytes)
|
||||
//fmt.Printf("Get %X -> %X\n", idxBytes, valBytes)
|
||||
if val == 0 {
|
||||
if !bytes.Equal(valBytes, nil) {
|
||||
b.Errorf("Expected %v for %v, got %X",
|
||||
nil, idx, valBytes)
|
||||
break
|
||||
}
|
||||
} else {
|
||||
if len(valBytes) != 8 {
|
||||
b.Errorf("Expected length 8 for %v, got %X",
|
||||
idx, valBytes)
|
||||
break
|
||||
}
|
||||
valGot := bytes2Int64(valBytes)
|
||||
if val != valGot {
|
||||
b.Errorf("Expected %v for %v, got %v",
|
||||
val, idx, valGot)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
db.Close()
|
||||
}
|
||||
|
||||
/*
|
||||
func int642Bytes(i int64) []byte {
|
||||
buf := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(buf, uint64(i))
|
||||
return buf
|
||||
}
|
||||
|
||||
func bytes2Int64(buf []byte) int64 {
|
||||
return int64(binary.BigEndian.Uint64(buf))
|
||||
}
|
||||
*/
|
||||
|
||||
func TestCLevelDBBackend(t *testing.T) {
|
||||
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
|
||||
// Can't use "" (current directory) or "./" here because levigo.Open returns:
|
||||
// "Error initializing DB: IO error: test_XXX.db: Invalid argument"
|
||||
dir := os.TempDir()
|
||||
db := NewDB(name, CLevelDBBackend, dir)
|
||||
defer cleanupDBDir(dir, name)
|
||||
|
||||
_, ok := db.(*CLevelDB)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func TestCLevelDBStats(t *testing.T) {
|
||||
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
|
||||
dir := os.TempDir()
|
||||
db := NewDB(name, CLevelDBBackend, dir)
|
||||
defer cleanupDBDir(dir, name)
|
||||
|
||||
assert.NotEmpty(t, db.Stats())
|
||||
}
|
@ -1,256 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
//----------------------------------------
|
||||
// Helper functions.
|
||||
|
||||
func checkValue(t *testing.T, db DB, key []byte, valueWanted []byte) {
|
||||
valueGot := db.Get(key)
|
||||
assert.Equal(t, valueWanted, valueGot)
|
||||
}
|
||||
|
||||
func checkValid(t *testing.T, itr Iterator, expected bool) {
|
||||
valid := itr.Valid()
|
||||
require.Equal(t, expected, valid)
|
||||
}
|
||||
|
||||
func checkNext(t *testing.T, itr Iterator, expected bool) {
|
||||
itr.Next()
|
||||
valid := itr.Valid()
|
||||
require.Equal(t, expected, valid)
|
||||
}
|
||||
|
||||
func checkNextPanics(t *testing.T, itr Iterator) {
|
||||
assert.Panics(t, func() { itr.Next() }, "checkNextPanics expected panic but didn't")
|
||||
}
|
||||
|
||||
func checkDomain(t *testing.T, itr Iterator, start, end []byte) {
|
||||
ds, de := itr.Domain()
|
||||
assert.Equal(t, start, ds, "checkDomain domain start incorrect")
|
||||
assert.Equal(t, end, de, "checkDomain domain end incorrect")
|
||||
}
|
||||
|
||||
func checkItem(t *testing.T, itr Iterator, key []byte, value []byte) {
|
||||
k, v := itr.Key(), itr.Value()
|
||||
assert.Exactly(t, key, k)
|
||||
assert.Exactly(t, value, v)
|
||||
}
|
||||
|
||||
func checkInvalid(t *testing.T, itr Iterator) {
|
||||
checkValid(t, itr, false)
|
||||
checkKeyPanics(t, itr)
|
||||
checkValuePanics(t, itr)
|
||||
checkNextPanics(t, itr)
|
||||
}
|
||||
|
||||
func checkKeyPanics(t *testing.T, itr Iterator) {
|
||||
assert.Panics(t, func() { itr.Key() }, "checkKeyPanics expected panic but didn't")
|
||||
}
|
||||
|
||||
func checkValuePanics(t *testing.T, itr Iterator) {
|
||||
assert.Panics(t, func() { itr.Value() }, "checkValuePanics expected panic but didn't")
|
||||
}
|
||||
|
||||
func newTempDB(t *testing.T, backend DBBackendType) (db DB, dbDir string) {
|
||||
dirname, err := ioutil.TempDir("", "db_common_test")
|
||||
require.Nil(t, err)
|
||||
return NewDB("testdb", backend, dirname), dirname
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// mockDB
|
||||
|
||||
// NOTE: not actually goroutine safe.
|
||||
// If you want something goroutine safe, maybe you just want a MemDB.
|
||||
type mockDB struct {
|
||||
mtx sync.Mutex
|
||||
calls map[string]int
|
||||
}
|
||||
|
||||
func newMockDB() *mockDB {
|
||||
return &mockDB{
|
||||
calls: make(map[string]int),
|
||||
}
|
||||
}
|
||||
|
||||
func (mdb *mockDB) Mutex() *sync.Mutex {
|
||||
return &(mdb.mtx)
|
||||
}
|
||||
|
||||
func (mdb *mockDB) Get([]byte) []byte {
|
||||
mdb.calls["Get"]++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mdb *mockDB) Has([]byte) bool {
|
||||
mdb.calls["Has"]++
|
||||
return false
|
||||
}
|
||||
|
||||
func (mdb *mockDB) Set([]byte, []byte) {
|
||||
mdb.calls["Set"]++
|
||||
}
|
||||
|
||||
func (mdb *mockDB) SetSync([]byte, []byte) {
|
||||
mdb.calls["SetSync"]++
|
||||
}
|
||||
|
||||
func (mdb *mockDB) SetNoLock([]byte, []byte) {
|
||||
mdb.calls["SetNoLock"]++
|
||||
}
|
||||
|
||||
func (mdb *mockDB) SetNoLockSync([]byte, []byte) {
|
||||
mdb.calls["SetNoLockSync"]++
|
||||
}
|
||||
|
||||
func (mdb *mockDB) Delete([]byte) {
|
||||
mdb.calls["Delete"]++
|
||||
}
|
||||
|
||||
func (mdb *mockDB) DeleteSync([]byte) {
|
||||
mdb.calls["DeleteSync"]++
|
||||
}
|
||||
|
||||
func (mdb *mockDB) DeleteNoLock([]byte) {
|
||||
mdb.calls["DeleteNoLock"]++
|
||||
}
|
||||
|
||||
func (mdb *mockDB) DeleteNoLockSync([]byte) {
|
||||
mdb.calls["DeleteNoLockSync"]++
|
||||
}
|
||||
|
||||
func (mdb *mockDB) Iterator(start, end []byte) Iterator {
|
||||
mdb.calls["Iterator"]++
|
||||
return &mockIterator{}
|
||||
}
|
||||
|
||||
func (mdb *mockDB) ReverseIterator(start, end []byte) Iterator {
|
||||
mdb.calls["ReverseIterator"]++
|
||||
return &mockIterator{}
|
||||
}
|
||||
|
||||
func (mdb *mockDB) Close() {
|
||||
mdb.calls["Close"]++
|
||||
}
|
||||
|
||||
func (mdb *mockDB) NewBatch() Batch {
|
||||
mdb.calls["NewBatch"]++
|
||||
return &memBatch{db: mdb}
|
||||
}
|
||||
|
||||
func (mdb *mockDB) Print() {
|
||||
mdb.calls["Print"]++
|
||||
fmt.Printf("mockDB{%v}", mdb.Stats())
|
||||
}
|
||||
|
||||
func (mdb *mockDB) Stats() map[string]string {
|
||||
mdb.calls["Stats"]++
|
||||
|
||||
res := make(map[string]string)
|
||||
for key, count := range mdb.calls {
|
||||
res[key] = fmt.Sprintf("%d", count)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// mockIterator
|
||||
|
||||
type mockIterator struct{}
|
||||
|
||||
func (mockIterator) Domain() (start []byte, end []byte) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (mockIterator) Valid() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (mockIterator) Next() {
|
||||
}
|
||||
|
||||
func (mockIterator) Key() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mockIterator) Value() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mockIterator) Close() {
|
||||
}
|
||||
|
||||
func benchmarkRandomReadsWrites(b *testing.B, db DB) {
|
||||
b.StopTimer()
|
||||
|
||||
// create dummy data
|
||||
const numItems = int64(1000000)
|
||||
internal := map[int64]int64{}
|
||||
for i := 0; i < int(numItems); i++ {
|
||||
internal[int64(i)] = int64(0)
|
||||
}
|
||||
|
||||
// fmt.Println("ok, starting")
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Write something
|
||||
{
|
||||
idx := int64(cmn.RandInt()) % numItems
|
||||
internal[idx]++
|
||||
val := internal[idx]
|
||||
idxBytes := int642Bytes(int64(idx))
|
||||
valBytes := int642Bytes(int64(val))
|
||||
//fmt.Printf("Set %X -> %X\n", idxBytes, valBytes)
|
||||
db.Set(idxBytes, valBytes)
|
||||
}
|
||||
|
||||
// Read something
|
||||
{
|
||||
idx := int64(cmn.RandInt()) % numItems
|
||||
valExp := internal[idx]
|
||||
idxBytes := int642Bytes(int64(idx))
|
||||
valBytes := db.Get(idxBytes)
|
||||
//fmt.Printf("Get %X -> %X\n", idxBytes, valBytes)
|
||||
if valExp == 0 {
|
||||
if !bytes.Equal(valBytes, nil) {
|
||||
b.Errorf("Expected %v for %v, got %X", nil, idx, valBytes)
|
||||
break
|
||||
}
|
||||
} else {
|
||||
if len(valBytes) != 8 {
|
||||
b.Errorf("Expected length 8 for %v, got %X", idx, valBytes)
|
||||
break
|
||||
}
|
||||
valGot := bytes2Int64(valBytes)
|
||||
if valExp != valGot {
|
||||
b.Errorf("Expected %v for %v, got %v", valExp, idx, valGot)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func int642Bytes(i int64) []byte {
|
||||
buf := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(buf, uint64(i))
|
||||
return buf
|
||||
}
|
||||
|
||||
func bytes2Int64(buf []byte) int64 {
|
||||
return int64(binary.BigEndian.Uint64(buf))
|
||||
}
|
@ -1,70 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type DBBackendType string
|
||||
|
||||
// These are valid backend types.
|
||||
const (
|
||||
// GoLevelDBBackend represents goleveldb (github.com/syndtr/goleveldb - most
|
||||
// popular implementation)
|
||||
// - pure go
|
||||
// - stable
|
||||
GoLevelDBBackend DBBackendType = "goleveldb"
|
||||
// CLevelDBBackend represents cleveldb (uses levigo wrapper)
|
||||
// - fast
|
||||
// - requires gcc
|
||||
// - use cleveldb build tag (go build -tags cleveldb)
|
||||
CLevelDBBackend DBBackendType = "cleveldb"
|
||||
// MemDBBackend represents in-memoty key value store, which is mostly used
|
||||
// for testing.
|
||||
MemDBBackend DBBackendType = "memdb"
|
||||
// FSDBBackend represents filesystem database
|
||||
// - EXPERIMENTAL
|
||||
// - slow
|
||||
FSDBBackend DBBackendType = "fsdb"
|
||||
// BoltDBBackend represents bolt (uses etcd's fork of bolt -
|
||||
// github.com/etcd-io/bbolt)
|
||||
// - EXPERIMENTAL
|
||||
// - may be faster is some use-cases (random reads - indexer)
|
||||
// - use boltdb build tag (go build -tags boltdb)
|
||||
BoltDBBackend DBBackendType = "boltdb"
|
||||
)
|
||||
|
||||
type dbCreator func(name string, dir string) (DB, error)
|
||||
|
||||
var backends = map[DBBackendType]dbCreator{}
|
||||
|
||||
func registerDBCreator(backend DBBackendType, creator dbCreator, force bool) {
|
||||
_, ok := backends[backend]
|
||||
if !force && ok {
|
||||
return
|
||||
}
|
||||
backends[backend] = creator
|
||||
}
|
||||
|
||||
// NewDB creates a new database of type backend with the given name.
|
||||
// NOTE: function panics if:
|
||||
// - backend is unknown (not registered)
|
||||
// - creator function, provided during registration, returns error
|
||||
func NewDB(name string, backend DBBackendType, dir string) DB {
|
||||
dbCreator, ok := backends[backend]
|
||||
if !ok {
|
||||
keys := make([]string, len(backends))
|
||||
i := 0
|
||||
for k := range backends {
|
||||
keys[i] = string(k)
|
||||
i++
|
||||
}
|
||||
panic(fmt.Sprintf("Unknown db_backend %s, expected either %s", backend, strings.Join(keys, " or ")))
|
||||
}
|
||||
|
||||
db, err := dbCreator(name, dir)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error initializing DB: %v", err))
|
||||
}
|
||||
return db
|
||||
}
|
@ -1,194 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDBIteratorSingleKey(t *testing.T) {
|
||||
for backend := range backends {
|
||||
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
|
||||
db, dir := newTempDB(t, backend)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
db.SetSync(bz("1"), bz("value_1"))
|
||||
itr := db.Iterator(nil, nil)
|
||||
|
||||
checkValid(t, itr, true)
|
||||
checkNext(t, itr, false)
|
||||
checkValid(t, itr, false)
|
||||
checkNextPanics(t, itr)
|
||||
|
||||
// Once invalid...
|
||||
checkInvalid(t, itr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDBIteratorTwoKeys(t *testing.T) {
|
||||
for backend := range backends {
|
||||
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
|
||||
db, dir := newTempDB(t, backend)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
db.SetSync(bz("1"), bz("value_1"))
|
||||
db.SetSync(bz("2"), bz("value_1"))
|
||||
|
||||
{ // Fail by calling Next too much
|
||||
itr := db.Iterator(nil, nil)
|
||||
checkValid(t, itr, true)
|
||||
|
||||
checkNext(t, itr, true)
|
||||
checkValid(t, itr, true)
|
||||
|
||||
checkNext(t, itr, false)
|
||||
checkValid(t, itr, false)
|
||||
|
||||
checkNextPanics(t, itr)
|
||||
|
||||
// Once invalid...
|
||||
checkInvalid(t, itr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDBIteratorMany(t *testing.T) {
|
||||
for backend := range backends {
|
||||
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
|
||||
db, dir := newTempDB(t, backend)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
keys := make([][]byte, 100)
|
||||
for i := 0; i < 100; i++ {
|
||||
keys[i] = []byte{byte(i)}
|
||||
}
|
||||
|
||||
value := []byte{5}
|
||||
for _, k := range keys {
|
||||
db.Set(k, value)
|
||||
}
|
||||
|
||||
itr := db.Iterator(nil, nil)
|
||||
defer itr.Close()
|
||||
for ; itr.Valid(); itr.Next() {
|
||||
assert.Equal(t, db.Get(itr.Key()), itr.Value())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDBIteratorEmpty(t *testing.T) {
|
||||
for backend := range backends {
|
||||
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
|
||||
db, dir := newTempDB(t, backend)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
itr := db.Iterator(nil, nil)
|
||||
|
||||
checkInvalid(t, itr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDBIteratorEmptyBeginAfter(t *testing.T) {
|
||||
for backend := range backends {
|
||||
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
|
||||
db, dir := newTempDB(t, backend)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
itr := db.Iterator(bz("1"), nil)
|
||||
|
||||
checkInvalid(t, itr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDBIteratorNonemptyBeginAfter(t *testing.T) {
|
||||
for backend := range backends {
|
||||
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
|
||||
db, dir := newTempDB(t, backend)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
db.SetSync(bz("1"), bz("value_1"))
|
||||
itr := db.Iterator(bz("2"), nil)
|
||||
|
||||
checkInvalid(t, itr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDBBatchWrite(t *testing.T) {
|
||||
testCases := []struct {
|
||||
modify func(batch Batch)
|
||||
calls map[string]int
|
||||
}{
|
||||
0: {
|
||||
func(batch Batch) {
|
||||
batch.Set(bz("1"), bz("1"))
|
||||
batch.Set(bz("2"), bz("2"))
|
||||
batch.Delete(bz("3"))
|
||||
batch.Set(bz("4"), bz("4"))
|
||||
batch.Write()
|
||||
},
|
||||
map[string]int{
|
||||
"Set": 0, "SetSync": 0, "SetNoLock": 3, "SetNoLockSync": 0,
|
||||
"Delete": 0, "DeleteSync": 0, "DeleteNoLock": 1, "DeleteNoLockSync": 0,
|
||||
},
|
||||
},
|
||||
1: {
|
||||
func(batch Batch) {
|
||||
batch.Set(bz("1"), bz("1"))
|
||||
batch.Set(bz("2"), bz("2"))
|
||||
batch.Set(bz("4"), bz("4"))
|
||||
batch.Delete(bz("3"))
|
||||
batch.Write()
|
||||
},
|
||||
map[string]int{
|
||||
"Set": 0, "SetSync": 0, "SetNoLock": 3, "SetNoLockSync": 0,
|
||||
"Delete": 0, "DeleteSync": 0, "DeleteNoLock": 1, "DeleteNoLockSync": 0,
|
||||
},
|
||||
},
|
||||
2: {
|
||||
func(batch Batch) {
|
||||
batch.Set(bz("1"), bz("1"))
|
||||
batch.Set(bz("2"), bz("2"))
|
||||
batch.Delete(bz("3"))
|
||||
batch.Set(bz("4"), bz("4"))
|
||||
batch.WriteSync()
|
||||
},
|
||||
map[string]int{
|
||||
"Set": 0, "SetSync": 0, "SetNoLock": 2, "SetNoLockSync": 1,
|
||||
"Delete": 0, "DeleteSync": 0, "DeleteNoLock": 1, "DeleteNoLockSync": 0,
|
||||
},
|
||||
},
|
||||
3: {
|
||||
func(batch Batch) {
|
||||
batch.Set(bz("1"), bz("1"))
|
||||
batch.Set(bz("2"), bz("2"))
|
||||
batch.Set(bz("4"), bz("4"))
|
||||
batch.Delete(bz("3"))
|
||||
batch.WriteSync()
|
||||
},
|
||||
map[string]int{
|
||||
"Set": 0, "SetSync": 0, "SetNoLock": 3, "SetNoLockSync": 0,
|
||||
"Delete": 0, "DeleteSync": 0, "DeleteNoLock": 0, "DeleteNoLockSync": 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
mdb := newMockDB()
|
||||
batch := mdb.NewBatch()
|
||||
|
||||
tc.modify(batch)
|
||||
|
||||
for call, exp := range tc.calls {
|
||||
got := mdb.calls[call]
|
||||
assert.Equal(t, exp, got, "#%v - key: %s", i, call)
|
||||
}
|
||||
}
|
||||
}
|
270
libs/db/fsdb.go
270
libs/db/fsdb.go
@ -1,270 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
const (
|
||||
keyPerm = os.FileMode(0600)
|
||||
dirPerm = os.FileMode(0700)
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerDBCreator(FSDBBackend, func(name, dir string) (DB, error) {
|
||||
dbPath := filepath.Join(dir, name+".db")
|
||||
return NewFSDB(dbPath), nil
|
||||
}, false)
|
||||
}
|
||||
|
||||
var _ DB = (*FSDB)(nil)
|
||||
|
||||
// It's slow.
|
||||
type FSDB struct {
|
||||
mtx sync.Mutex
|
||||
dir string
|
||||
}
|
||||
|
||||
func NewFSDB(dir string) *FSDB {
|
||||
err := os.MkdirAll(dir, dirPerm)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "Creating FSDB dir "+dir))
|
||||
}
|
||||
database := &FSDB{
|
||||
dir: dir,
|
||||
}
|
||||
return database
|
||||
}
|
||||
|
||||
func (db *FSDB) Get(key []byte) []byte {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
key = escapeKey(key)
|
||||
|
||||
path := db.nameToPath(key)
|
||||
value, err := read(path)
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
panic(errors.Wrapf(err, "Getting key %s (0x%X)", string(key), key))
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (db *FSDB) Has(key []byte) bool {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
key = escapeKey(key)
|
||||
|
||||
path := db.nameToPath(key)
|
||||
return cmn.FileExists(path)
|
||||
}
|
||||
|
||||
func (db *FSDB) Set(key []byte, value []byte) {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
db.SetNoLock(key, value)
|
||||
}
|
||||
|
||||
func (db *FSDB) SetSync(key []byte, value []byte) {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
db.SetNoLock(key, value)
|
||||
}
|
||||
|
||||
// NOTE: Implements atomicSetDeleter.
|
||||
func (db *FSDB) SetNoLock(key []byte, value []byte) {
|
||||
key = escapeKey(key)
|
||||
value = nonNilBytes(value)
|
||||
path := db.nameToPath(key)
|
||||
err := write(path, value)
|
||||
if err != nil {
|
||||
panic(errors.Wrapf(err, "Setting key %s (0x%X)", string(key), key))
|
||||
}
|
||||
}
|
||||
|
||||
func (db *FSDB) Delete(key []byte) {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
db.DeleteNoLock(key)
|
||||
}
|
||||
|
||||
func (db *FSDB) DeleteSync(key []byte) {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
db.DeleteNoLock(key)
|
||||
}
|
||||
|
||||
// NOTE: Implements atomicSetDeleter.
|
||||
func (db *FSDB) DeleteNoLock(key []byte) {
|
||||
key = escapeKey(key)
|
||||
path := db.nameToPath(key)
|
||||
err := remove(path)
|
||||
if os.IsNotExist(err) {
|
||||
return
|
||||
} else if err != nil {
|
||||
panic(errors.Wrapf(err, "Removing key %s (0x%X)", string(key), key))
|
||||
}
|
||||
}
|
||||
|
||||
func (db *FSDB) Close() {
|
||||
// Nothing to do.
|
||||
}
|
||||
|
||||
func (db *FSDB) Print() {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
panic("FSDB.Print not yet implemented")
|
||||
}
|
||||
|
||||
func (db *FSDB) Stats() map[string]string {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
panic("FSDB.Stats not yet implemented")
|
||||
}
|
||||
|
||||
func (db *FSDB) NewBatch() Batch {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
// Not sure we would ever want to try...
|
||||
// It doesn't seem easy for general filesystems.
|
||||
panic("FSDB.NewBatch not yet implemented")
|
||||
}
|
||||
|
||||
func (db *FSDB) Mutex() *sync.Mutex {
|
||||
return &(db.mtx)
|
||||
}
|
||||
|
||||
func (db *FSDB) Iterator(start, end []byte) Iterator {
|
||||
return db.MakeIterator(start, end, false)
|
||||
}
|
||||
|
||||
func (db *FSDB) MakeIterator(start, end []byte, isReversed bool) Iterator {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
// We need a copy of all of the keys.
|
||||
// Not the best, but probably not a bottleneck depending.
|
||||
keys, err := list(db.dir, start, end)
|
||||
if err != nil {
|
||||
panic(errors.Wrapf(err, "Listing keys in %s", db.dir))
|
||||
}
|
||||
if isReversed {
|
||||
sort.Sort(sort.Reverse(sort.StringSlice(keys)))
|
||||
} else {
|
||||
sort.Strings(keys)
|
||||
}
|
||||
return newMemDBIterator(db, keys, start, end)
|
||||
}
|
||||
|
||||
func (db *FSDB) ReverseIterator(start, end []byte) Iterator {
|
||||
return db.MakeIterator(start, end, true)
|
||||
}
|
||||
|
||||
func (db *FSDB) nameToPath(name []byte) string {
|
||||
n := url.PathEscape(string(name))
|
||||
return filepath.Join(db.dir, n)
|
||||
}
|
||||
|
||||
// Read some bytes to a file.
|
||||
// CONTRACT: returns os errors directly without wrapping.
|
||||
func read(path string) ([]byte, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
d, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Write some bytes from a file.
|
||||
// CONTRACT: returns os errors directly without wrapping.
|
||||
func write(path string, d []byte) error {
|
||||
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, keyPerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
// fInfo, err := f.Stat()
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if fInfo.Mode() != keyPerm {
|
||||
// return tmerrors.NewErrPermissionsChanged(f.Name(), keyPerm, fInfo.Mode())
|
||||
// }
|
||||
_, err = f.Write(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = f.Sync()
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove a file.
|
||||
// CONTRACT: returns os errors directly without wrapping.
|
||||
func remove(path string) error {
|
||||
return os.Remove(path)
|
||||
}
|
||||
|
||||
// List keys in a directory, stripping of escape sequences and dir portions.
|
||||
// CONTRACT: returns os errors directly without wrapping.
|
||||
func list(dirPath string, start, end []byte) ([]string, error) {
|
||||
dir, err := os.Open(dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer dir.Close()
|
||||
|
||||
names, err := dir.Readdirnames(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var keys []string
|
||||
for _, name := range names {
|
||||
n, err := url.PathUnescape(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to unescape %s while listing", name)
|
||||
}
|
||||
key := unescapeKey([]byte(n))
|
||||
if IsKeyInDomain(key, start, end) {
|
||||
keys = append(keys, string(key))
|
||||
}
|
||||
}
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// To support empty or nil keys, while the file system doesn't allow empty
|
||||
// filenames.
|
||||
func escapeKey(key []byte) []byte {
|
||||
return []byte("k_" + string(key))
|
||||
}
|
||||
func unescapeKey(escKey []byte) []byte {
|
||||
if len(escKey) < 2 {
|
||||
panic(fmt.Sprintf("Invalid esc key: %x", escKey))
|
||||
}
|
||||
if string(escKey[:2]) != "k_" {
|
||||
panic(fmt.Sprintf("Invalid esc key: %x", escKey))
|
||||
}
|
||||
return escKey[2:]
|
||||
}
|
@ -1,333 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
dbCreator := func(name string, dir string) (DB, error) {
|
||||
return NewGoLevelDB(name, dir)
|
||||
}
|
||||
registerDBCreator(GoLevelDBBackend, dbCreator, false)
|
||||
}
|
||||
|
||||
var _ DB = (*GoLevelDB)(nil)
|
||||
|
||||
type GoLevelDB struct {
|
||||
db *leveldb.DB
|
||||
}
|
||||
|
||||
func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) {
|
||||
return NewGoLevelDBWithOpts(name, dir, nil)
|
||||
}
|
||||
|
||||
func NewGoLevelDBWithOpts(name string, dir string, o *opt.Options) (*GoLevelDB, error) {
|
||||
dbPath := filepath.Join(dir, name+".db")
|
||||
db, err := leveldb.OpenFile(dbPath, o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
database := &GoLevelDB{
|
||||
db: db,
|
||||
}
|
||||
return database, nil
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) Get(key []byte) []byte {
|
||||
key = nonNilBytes(key)
|
||||
res, err := db.db.Get(key, nil)
|
||||
if err != nil {
|
||||
if err == errors.ErrNotFound {
|
||||
return nil
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) Has(key []byte) bool {
|
||||
return db.Get(key) != nil
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) Set(key []byte, value []byte) {
|
||||
key = nonNilBytes(key)
|
||||
value = nonNilBytes(value)
|
||||
err := db.db.Put(key, value, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) SetSync(key []byte, value []byte) {
|
||||
key = nonNilBytes(key)
|
||||
value = nonNilBytes(value)
|
||||
err := db.db.Put(key, value, &opt.WriteOptions{Sync: true})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) Delete(key []byte) {
|
||||
key = nonNilBytes(key)
|
||||
err := db.db.Delete(key, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) DeleteSync(key []byte) {
|
||||
key = nonNilBytes(key)
|
||||
err := db.db.Delete(key, &opt.WriteOptions{Sync: true})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (db *GoLevelDB) DB() *leveldb.DB {
|
||||
return db.db
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) Close() {
|
||||
db.db.Close()
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) Print() {
|
||||
str, _ := db.db.GetProperty("leveldb.stats")
|
||||
fmt.Printf("%v\n", str)
|
||||
|
||||
itr := db.db.NewIterator(nil, nil)
|
||||
for itr.Next() {
|
||||
key := itr.Key()
|
||||
value := itr.Value()
|
||||
fmt.Printf("[%X]:\t[%X]\n", key, value)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) Stats() map[string]string {
|
||||
keys := []string{
|
||||
"leveldb.num-files-at-level{n}",
|
||||
"leveldb.stats",
|
||||
"leveldb.sstables",
|
||||
"leveldb.blockpool",
|
||||
"leveldb.cachedblock",
|
||||
"leveldb.openedtables",
|
||||
"leveldb.alivesnaps",
|
||||
"leveldb.aliveiters",
|
||||
}
|
||||
|
||||
stats := make(map[string]string)
|
||||
for _, key := range keys {
|
||||
str, err := db.db.GetProperty(key)
|
||||
if err == nil {
|
||||
stats[key] = str
|
||||
}
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Batch
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) NewBatch() Batch {
|
||||
batch := new(leveldb.Batch)
|
||||
return &goLevelDBBatch{db, batch}
|
||||
}
|
||||
|
||||
type goLevelDBBatch struct {
|
||||
db *GoLevelDB
|
||||
batch *leveldb.Batch
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
func (mBatch *goLevelDBBatch) Set(key, value []byte) {
|
||||
mBatch.batch.Put(key, value)
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
func (mBatch *goLevelDBBatch) Delete(key []byte) {
|
||||
mBatch.batch.Delete(key)
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
func (mBatch *goLevelDBBatch) Write() {
|
||||
err := mBatch.db.db.Write(mBatch.batch, &opt.WriteOptions{Sync: false})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
func (mBatch *goLevelDBBatch) WriteSync() {
|
||||
err := mBatch.db.db.Write(mBatch.batch, &opt.WriteOptions{Sync: true})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
// Close is no-op for goLevelDBBatch.
|
||||
func (mBatch *goLevelDBBatch) Close() {}
|
||||
|
||||
//----------------------------------------
|
||||
// Iterator
|
||||
// NOTE This is almost identical to db/c_level_db.Iterator
|
||||
// Before creating a third version, refactor.
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) Iterator(start, end []byte) Iterator {
|
||||
itr := db.db.NewIterator(nil, nil)
|
||||
return newGoLevelDBIterator(itr, start, end, false)
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator {
|
||||
itr := db.db.NewIterator(nil, nil)
|
||||
return newGoLevelDBIterator(itr, start, end, true)
|
||||
}
|
||||
|
||||
type goLevelDBIterator struct {
|
||||
source iterator.Iterator
|
||||
start []byte
|
||||
end []byte
|
||||
isReverse bool
|
||||
isInvalid bool
|
||||
}
|
||||
|
||||
var _ Iterator = (*goLevelDBIterator)(nil)
|
||||
|
||||
func newGoLevelDBIterator(source iterator.Iterator, start, end []byte, isReverse bool) *goLevelDBIterator {
|
||||
if isReverse {
|
||||
if end == nil {
|
||||
source.Last()
|
||||
} else {
|
||||
valid := source.Seek(end)
|
||||
if valid {
|
||||
eoakey := source.Key() // end or after key
|
||||
if bytes.Compare(end, eoakey) <= 0 {
|
||||
source.Prev()
|
||||
}
|
||||
} else {
|
||||
source.Last()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if start == nil {
|
||||
source.First()
|
||||
} else {
|
||||
source.Seek(start)
|
||||
}
|
||||
}
|
||||
return &goLevelDBIterator{
|
||||
source: source,
|
||||
start: start,
|
||||
end: end,
|
||||
isReverse: isReverse,
|
||||
isInvalid: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *goLevelDBIterator) Domain() ([]byte, []byte) {
|
||||
return itr.start, itr.end
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *goLevelDBIterator) Valid() bool {
|
||||
|
||||
// Once invalid, forever invalid.
|
||||
if itr.isInvalid {
|
||||
return false
|
||||
}
|
||||
|
||||
// Panic on DB error. No way to recover.
|
||||
itr.assertNoError()
|
||||
|
||||
// If source is invalid, invalid.
|
||||
if !itr.source.Valid() {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
|
||||
// If key is end or past it, invalid.
|
||||
var start = itr.start
|
||||
var end = itr.end
|
||||
var key = itr.source.Key()
|
||||
|
||||
if itr.isReverse {
|
||||
if start != nil && bytes.Compare(key, start) < 0 {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
if end != nil && bytes.Compare(end, key) <= 0 {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Valid
|
||||
return true
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *goLevelDBIterator) Key() []byte {
|
||||
// Key returns a copy of the current key.
|
||||
// See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88
|
||||
itr.assertNoError()
|
||||
itr.assertIsValid()
|
||||
return cp(itr.source.Key())
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *goLevelDBIterator) Value() []byte {
|
||||
// Value returns a copy of the current value.
|
||||
// See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88
|
||||
itr.assertNoError()
|
||||
itr.assertIsValid()
|
||||
return cp(itr.source.Value())
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *goLevelDBIterator) Next() {
|
||||
itr.assertNoError()
|
||||
itr.assertIsValid()
|
||||
if itr.isReverse {
|
||||
itr.source.Prev()
|
||||
} else {
|
||||
itr.source.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *goLevelDBIterator) Close() {
|
||||
itr.source.Release()
|
||||
}
|
||||
|
||||
func (itr *goLevelDBIterator) assertNoError() {
|
||||
if err := itr.source.Error(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (itr goLevelDBIterator) assertIsValid() {
|
||||
if !itr.Valid() {
|
||||
panic("goLevelDBIterator is invalid")
|
||||
}
|
||||
}
|
@ -1,45 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
func TestGoLevelDBNewGoLevelDB(t *testing.T) {
|
||||
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
|
||||
defer cleanupDBDir("", name)
|
||||
|
||||
// Test we can't open the db twice for writing
|
||||
wr1, err := NewGoLevelDB(name, "")
|
||||
require.Nil(t, err)
|
||||
_, err = NewGoLevelDB(name, "")
|
||||
require.NotNil(t, err)
|
||||
wr1.Close() // Close the db to release the lock
|
||||
|
||||
// Test we can open the db twice for reading only
|
||||
ro1, err := NewGoLevelDBWithOpts(name, "", &opt.Options{ReadOnly: true})
|
||||
defer ro1.Close()
|
||||
require.Nil(t, err)
|
||||
ro2, err := NewGoLevelDBWithOpts(name, "", &opt.Options{ReadOnly: true})
|
||||
defer ro2.Close()
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
func BenchmarkGoLevelDBRandomReadsWrites(b *testing.B) {
|
||||
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
|
||||
db, err := NewGoLevelDB(name, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
db.Close()
|
||||
cleanupDBDir("", name)
|
||||
}()
|
||||
|
||||
benchmarkRandomReadsWrites(b, db)
|
||||
}
|
@ -1,74 +0,0 @@
|
||||
package db
|
||||
|
||||
import "sync"
|
||||
|
||||
type atomicSetDeleter interface {
|
||||
Mutex() *sync.Mutex
|
||||
SetNoLock(key, value []byte)
|
||||
SetNoLockSync(key, value []byte)
|
||||
DeleteNoLock(key []byte)
|
||||
DeleteNoLockSync(key []byte)
|
||||
}
|
||||
|
||||
type memBatch struct {
|
||||
db atomicSetDeleter
|
||||
ops []operation
|
||||
}
|
||||
|
||||
type opType int
|
||||
|
||||
const (
|
||||
opTypeSet opType = 1
|
||||
opTypeDelete opType = 2
|
||||
)
|
||||
|
||||
type operation struct {
|
||||
opType
|
||||
key []byte
|
||||
value []byte
|
||||
}
|
||||
|
||||
func (mBatch *memBatch) Set(key, value []byte) {
|
||||
mBatch.ops = append(mBatch.ops, operation{opTypeSet, key, value})
|
||||
}
|
||||
|
||||
func (mBatch *memBatch) Delete(key []byte) {
|
||||
mBatch.ops = append(mBatch.ops, operation{opTypeDelete, key, nil})
|
||||
}
|
||||
|
||||
func (mBatch *memBatch) Write() {
|
||||
mBatch.write(false)
|
||||
}
|
||||
|
||||
func (mBatch *memBatch) WriteSync() {
|
||||
mBatch.write(true)
|
||||
}
|
||||
|
||||
func (mBatch *memBatch) Close() {
|
||||
mBatch.ops = nil
|
||||
}
|
||||
|
||||
func (mBatch *memBatch) write(doSync bool) {
|
||||
if mtx := mBatch.db.Mutex(); mtx != nil {
|
||||
mtx.Lock()
|
||||
defer mtx.Unlock()
|
||||
}
|
||||
|
||||
for i, op := range mBatch.ops {
|
||||
if doSync && i == (len(mBatch.ops)-1) {
|
||||
switch op.opType {
|
||||
case opTypeSet:
|
||||
mBatch.db.SetNoLockSync(op.key, op.value)
|
||||
case opTypeDelete:
|
||||
mBatch.db.DeleteNoLockSync(op.key)
|
||||
}
|
||||
break // we're done.
|
||||
}
|
||||
switch op.opType {
|
||||
case opTypeSet:
|
||||
mBatch.db.SetNoLock(op.key, op.value)
|
||||
case opTypeDelete:
|
||||
mBatch.db.DeleteNoLock(op.key)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,255 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerDBCreator(MemDBBackend, func(name, dir string) (DB, error) {
|
||||
return NewMemDB(), nil
|
||||
}, false)
|
||||
}
|
||||
|
||||
var _ DB = (*MemDB)(nil)
|
||||
|
||||
type MemDB struct {
|
||||
mtx sync.Mutex
|
||||
db map[string][]byte
|
||||
}
|
||||
|
||||
func NewMemDB() *MemDB {
|
||||
database := &MemDB{
|
||||
db: make(map[string][]byte),
|
||||
}
|
||||
return database
|
||||
}
|
||||
|
||||
// Implements atomicSetDeleter.
|
||||
func (db *MemDB) Mutex() *sync.Mutex {
|
||||
return &(db.mtx)
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) Get(key []byte) []byte {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
key = nonNilBytes(key)
|
||||
|
||||
value := db.db[string(key)]
|
||||
return value
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) Has(key []byte) bool {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
key = nonNilBytes(key)
|
||||
|
||||
_, ok := db.db[string(key)]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) Set(key []byte, value []byte) {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
db.SetNoLock(key, value)
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) SetSync(key []byte, value []byte) {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
db.SetNoLock(key, value)
|
||||
}
|
||||
|
||||
// Implements atomicSetDeleter.
|
||||
func (db *MemDB) SetNoLock(key []byte, value []byte) {
|
||||
db.SetNoLockSync(key, value)
|
||||
}
|
||||
|
||||
// Implements atomicSetDeleter.
|
||||
func (db *MemDB) SetNoLockSync(key []byte, value []byte) {
|
||||
key = nonNilBytes(key)
|
||||
value = nonNilBytes(value)
|
||||
|
||||
db.db[string(key)] = value
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) Delete(key []byte) {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
db.DeleteNoLock(key)
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) DeleteSync(key []byte) {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
db.DeleteNoLock(key)
|
||||
}
|
||||
|
||||
// Implements atomicSetDeleter.
|
||||
func (db *MemDB) DeleteNoLock(key []byte) {
|
||||
db.DeleteNoLockSync(key)
|
||||
}
|
||||
|
||||
// Implements atomicSetDeleter.
|
||||
func (db *MemDB) DeleteNoLockSync(key []byte) {
|
||||
key = nonNilBytes(key)
|
||||
|
||||
delete(db.db, string(key))
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) Close() {
|
||||
// Close is a noop since for an in-memory
|
||||
// database, we don't have a destination
|
||||
// to flush contents to nor do we want
|
||||
// any data loss on invoking Close()
|
||||
// See the discussion in https://github.com/tendermint/tendermint/libs/pull/56
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) Print() {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
for key, value := range db.db {
|
||||
fmt.Printf("[%X]:\t[%X]\n", []byte(key), value)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) Stats() map[string]string {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
stats := make(map[string]string)
|
||||
stats["database.type"] = "memDB"
|
||||
stats["database.size"] = fmt.Sprintf("%d", len(db.db))
|
||||
return stats
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) NewBatch() Batch {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
return &memBatch{db, nil}
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Iterator
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) Iterator(start, end []byte) Iterator {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
keys := db.getSortedKeys(start, end, false)
|
||||
return newMemDBIterator(db, keys, start, end)
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) ReverseIterator(start, end []byte) Iterator {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
keys := db.getSortedKeys(start, end, true)
|
||||
return newMemDBIterator(db, keys, start, end)
|
||||
}
|
||||
|
||||
// We need a copy of all of the keys.
|
||||
// Not the best, but probably not a bottleneck depending.
|
||||
type memDBIterator struct {
|
||||
db DB
|
||||
cur int
|
||||
keys []string
|
||||
start []byte
|
||||
end []byte
|
||||
}
|
||||
|
||||
var _ Iterator = (*memDBIterator)(nil)
|
||||
|
||||
// Keys is expected to be in reverse order for reverse iterators.
|
||||
func newMemDBIterator(db DB, keys []string, start, end []byte) *memDBIterator {
|
||||
return &memDBIterator{
|
||||
db: db,
|
||||
cur: 0,
|
||||
keys: keys,
|
||||
start: start,
|
||||
end: end,
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *memDBIterator) Domain() ([]byte, []byte) {
|
||||
return itr.start, itr.end
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *memDBIterator) Valid() bool {
|
||||
return 0 <= itr.cur && itr.cur < len(itr.keys)
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *memDBIterator) Next() {
|
||||
itr.assertIsValid()
|
||||
itr.cur++
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *memDBIterator) Key() []byte {
|
||||
itr.assertIsValid()
|
||||
return []byte(itr.keys[itr.cur])
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *memDBIterator) Value() []byte {
|
||||
itr.assertIsValid()
|
||||
key := []byte(itr.keys[itr.cur])
|
||||
return itr.db.Get(key)
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *memDBIterator) Close() {
|
||||
itr.keys = nil
|
||||
itr.db = nil
|
||||
}
|
||||
|
||||
func (itr *memDBIterator) assertIsValid() {
|
||||
if !itr.Valid() {
|
||||
panic("memDBIterator is invalid")
|
||||
}
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Misc.
|
||||
|
||||
func (db *MemDB) getSortedKeys(start, end []byte, reverse bool) []string {
|
||||
keys := []string{}
|
||||
for key := range db.db {
|
||||
inDomain := IsKeyInDomain([]byte(key), start, end)
|
||||
if inDomain {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
}
|
||||
sort.Strings(keys)
|
||||
if reverse {
|
||||
nkeys := len(keys)
|
||||
for i := 0; i < nkeys/2; i++ {
|
||||
temp := keys[i]
|
||||
keys[i] = keys[nkeys-i-1]
|
||||
keys[nkeys-i-1] = temp
|
||||
}
|
||||
}
|
||||
return keys
|
||||
}
|
@ -1,336 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// IteratePrefix is a convenience function for iterating over a key domain
|
||||
// restricted by prefix.
|
||||
func IteratePrefix(db DB, prefix []byte) Iterator {
|
||||
var start, end []byte
|
||||
if len(prefix) == 0 {
|
||||
start = nil
|
||||
end = nil
|
||||
} else {
|
||||
start = cp(prefix)
|
||||
end = cpIncr(prefix)
|
||||
}
|
||||
return db.Iterator(start, end)
|
||||
}
|
||||
|
||||
/*
|
||||
TODO: Make test, maybe rename.
|
||||
// Like IteratePrefix but the iterator strips the prefix from the keys.
|
||||
func IteratePrefixStripped(db DB, prefix []byte) Iterator {
|
||||
start, end := ...
|
||||
return newPrefixIterator(prefix, start, end, IteratePrefix(db, prefix))
|
||||
}
|
||||
*/
|
||||
|
||||
//----------------------------------------
|
||||
// prefixDB
|
||||
|
||||
type prefixDB struct {
|
||||
mtx sync.Mutex
|
||||
prefix []byte
|
||||
db DB
|
||||
}
|
||||
|
||||
// NewPrefixDB lets you namespace multiple DBs within a single DB.
|
||||
func NewPrefixDB(db DB, prefix []byte) *prefixDB {
|
||||
return &prefixDB{
|
||||
prefix: prefix,
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
// Implements atomicSetDeleter.
|
||||
func (pdb *prefixDB) Mutex() *sync.Mutex {
|
||||
return &(pdb.mtx)
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (pdb *prefixDB) Get(key []byte) []byte {
|
||||
pdb.mtx.Lock()
|
||||
defer pdb.mtx.Unlock()
|
||||
|
||||
pkey := pdb.prefixed(key)
|
||||
value := pdb.db.Get(pkey)
|
||||
return value
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (pdb *prefixDB) Has(key []byte) bool {
|
||||
pdb.mtx.Lock()
|
||||
defer pdb.mtx.Unlock()
|
||||
|
||||
return pdb.db.Has(pdb.prefixed(key))
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (pdb *prefixDB) Set(key []byte, value []byte) {
|
||||
pdb.mtx.Lock()
|
||||
defer pdb.mtx.Unlock()
|
||||
|
||||
pkey := pdb.prefixed(key)
|
||||
pdb.db.Set(pkey, value)
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (pdb *prefixDB) SetSync(key []byte, value []byte) {
|
||||
pdb.mtx.Lock()
|
||||
defer pdb.mtx.Unlock()
|
||||
|
||||
pdb.db.SetSync(pdb.prefixed(key), value)
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (pdb *prefixDB) Delete(key []byte) {
|
||||
pdb.mtx.Lock()
|
||||
defer pdb.mtx.Unlock()
|
||||
|
||||
pdb.db.Delete(pdb.prefixed(key))
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (pdb *prefixDB) DeleteSync(key []byte) {
|
||||
pdb.mtx.Lock()
|
||||
defer pdb.mtx.Unlock()
|
||||
|
||||
pdb.db.DeleteSync(pdb.prefixed(key))
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (pdb *prefixDB) Iterator(start, end []byte) Iterator {
|
||||
pdb.mtx.Lock()
|
||||
defer pdb.mtx.Unlock()
|
||||
|
||||
var pstart, pend []byte
|
||||
pstart = append(cp(pdb.prefix), start...)
|
||||
if end == nil {
|
||||
pend = cpIncr(pdb.prefix)
|
||||
} else {
|
||||
pend = append(cp(pdb.prefix), end...)
|
||||
}
|
||||
return newPrefixIterator(
|
||||
pdb.prefix,
|
||||
start,
|
||||
end,
|
||||
pdb.db.Iterator(
|
||||
pstart,
|
||||
pend,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (pdb *prefixDB) ReverseIterator(start, end []byte) Iterator {
|
||||
pdb.mtx.Lock()
|
||||
defer pdb.mtx.Unlock()
|
||||
|
||||
var pstart, pend []byte
|
||||
pstart = append(cp(pdb.prefix), start...)
|
||||
if end == nil {
|
||||
pend = cpIncr(pdb.prefix)
|
||||
} else {
|
||||
pend = append(cp(pdb.prefix), end...)
|
||||
}
|
||||
ritr := pdb.db.ReverseIterator(pstart, pend)
|
||||
return newPrefixIterator(
|
||||
pdb.prefix,
|
||||
start,
|
||||
end,
|
||||
ritr,
|
||||
)
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
// Panics if the underlying DB is not an
|
||||
// atomicSetDeleter.
|
||||
func (pdb *prefixDB) NewBatch() Batch {
|
||||
pdb.mtx.Lock()
|
||||
defer pdb.mtx.Unlock()
|
||||
|
||||
return newPrefixBatch(pdb.prefix, pdb.db.NewBatch())
|
||||
}
|
||||
|
||||
/* NOTE: Uncomment to use memBatch instead of prefixBatch
|
||||
// Implements atomicSetDeleter.
|
||||
func (pdb *prefixDB) SetNoLock(key []byte, value []byte) {
|
||||
pdb.db.(atomicSetDeleter).SetNoLock(pdb.prefixed(key), value)
|
||||
}
|
||||
|
||||
// Implements atomicSetDeleter.
|
||||
func (pdb *prefixDB) SetNoLockSync(key []byte, value []byte) {
|
||||
pdb.db.(atomicSetDeleter).SetNoLockSync(pdb.prefixed(key), value)
|
||||
}
|
||||
|
||||
// Implements atomicSetDeleter.
|
||||
func (pdb *prefixDB) DeleteNoLock(key []byte) {
|
||||
pdb.db.(atomicSetDeleter).DeleteNoLock(pdb.prefixed(key))
|
||||
}
|
||||
|
||||
// Implements atomicSetDeleter.
|
||||
func (pdb *prefixDB) DeleteNoLockSync(key []byte) {
|
||||
pdb.db.(atomicSetDeleter).DeleteNoLockSync(pdb.prefixed(key))
|
||||
}
|
||||
*/
|
||||
|
||||
// Implements DB.
|
||||
func (pdb *prefixDB) Close() {
|
||||
pdb.mtx.Lock()
|
||||
defer pdb.mtx.Unlock()
|
||||
|
||||
pdb.db.Close()
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (pdb *prefixDB) Print() {
|
||||
fmt.Printf("prefix: %X\n", pdb.prefix)
|
||||
|
||||
itr := pdb.Iterator(nil, nil)
|
||||
defer itr.Close()
|
||||
for ; itr.Valid(); itr.Next() {
|
||||
key := itr.Key()
|
||||
value := itr.Value()
|
||||
fmt.Printf("[%X]:\t[%X]\n", key, value)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (pdb *prefixDB) Stats() map[string]string {
|
||||
stats := make(map[string]string)
|
||||
stats["prefixdb.prefix.string"] = string(pdb.prefix)
|
||||
stats["prefixdb.prefix.hex"] = fmt.Sprintf("%X", pdb.prefix)
|
||||
source := pdb.db.Stats()
|
||||
for key, value := range source {
|
||||
stats["prefixdb.source."+key] = value
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
func (pdb *prefixDB) prefixed(key []byte) []byte {
|
||||
return append(cp(pdb.prefix), key...)
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// prefixBatch
|
||||
|
||||
type prefixBatch struct {
|
||||
prefix []byte
|
||||
source Batch
|
||||
}
|
||||
|
||||
func newPrefixBatch(prefix []byte, source Batch) prefixBatch {
|
||||
return prefixBatch{
|
||||
prefix: prefix,
|
||||
source: source,
|
||||
}
|
||||
}
|
||||
|
||||
func (pb prefixBatch) Set(key, value []byte) {
|
||||
pkey := append(cp(pb.prefix), key...)
|
||||
pb.source.Set(pkey, value)
|
||||
}
|
||||
|
||||
func (pb prefixBatch) Delete(key []byte) {
|
||||
pkey := append(cp(pb.prefix), key...)
|
||||
pb.source.Delete(pkey)
|
||||
}
|
||||
|
||||
func (pb prefixBatch) Write() {
|
||||
pb.source.Write()
|
||||
}
|
||||
|
||||
func (pb prefixBatch) WriteSync() {
|
||||
pb.source.WriteSync()
|
||||
}
|
||||
|
||||
func (pb prefixBatch) Close() {
|
||||
pb.source.Close()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// prefixIterator
|
||||
|
||||
var _ Iterator = (*prefixIterator)(nil)
|
||||
|
||||
// Strips prefix while iterating from Iterator.
|
||||
type prefixIterator struct {
|
||||
prefix []byte
|
||||
start []byte
|
||||
end []byte
|
||||
source Iterator
|
||||
valid bool
|
||||
}
|
||||
|
||||
func newPrefixIterator(prefix, start, end []byte, source Iterator) *prefixIterator {
|
||||
if !source.Valid() || !bytes.HasPrefix(source.Key(), prefix) {
|
||||
return &prefixIterator{
|
||||
prefix: prefix,
|
||||
start: start,
|
||||
end: end,
|
||||
source: source,
|
||||
valid: false,
|
||||
}
|
||||
} else {
|
||||
return &prefixIterator{
|
||||
prefix: prefix,
|
||||
start: start,
|
||||
end: end,
|
||||
source: source,
|
||||
valid: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (itr *prefixIterator) Domain() (start []byte, end []byte) {
|
||||
return itr.start, itr.end
|
||||
}
|
||||
|
||||
func (itr *prefixIterator) Valid() bool {
|
||||
return itr.valid && itr.source.Valid()
|
||||
}
|
||||
|
||||
func (itr *prefixIterator) Next() {
|
||||
if !itr.valid {
|
||||
panic("prefixIterator invalid, cannot call Next()")
|
||||
}
|
||||
itr.source.Next()
|
||||
if !itr.source.Valid() || !bytes.HasPrefix(itr.source.Key(), itr.prefix) {
|
||||
itr.valid = false
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (itr *prefixIterator) Key() (key []byte) {
|
||||
if !itr.valid {
|
||||
panic("prefixIterator invalid, cannot call Key()")
|
||||
}
|
||||
return stripPrefix(itr.source.Key(), itr.prefix)
|
||||
}
|
||||
|
||||
func (itr *prefixIterator) Value() (value []byte) {
|
||||
if !itr.valid {
|
||||
panic("prefixIterator invalid, cannot call Value()")
|
||||
}
|
||||
return itr.source.Value()
|
||||
}
|
||||
|
||||
func (itr *prefixIterator) Close() {
|
||||
itr.source.Close()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func stripPrefix(key []byte, prefix []byte) (stripped []byte) {
|
||||
if len(key) < len(prefix) {
|
||||
panic("should not happen")
|
||||
}
|
||||
if !bytes.Equal(key[:len(prefix)], prefix) {
|
||||
panic("should not happne")
|
||||
}
|
||||
return key[len(prefix):]
|
||||
}
|
@ -1,192 +0,0 @@
|
||||
package db
|
||||
|
||||
import "testing"
|
||||
|
||||
func mockDBWithStuff() DB {
|
||||
db := NewMemDB()
|
||||
// Under "key" prefix
|
||||
db.Set(bz("key"), bz("value"))
|
||||
db.Set(bz("key1"), bz("value1"))
|
||||
db.Set(bz("key2"), bz("value2"))
|
||||
db.Set(bz("key3"), bz("value3"))
|
||||
db.Set(bz("something"), bz("else"))
|
||||
db.Set(bz(""), bz(""))
|
||||
db.Set(bz("k"), bz("val"))
|
||||
db.Set(bz("ke"), bz("valu"))
|
||||
db.Set(bz("kee"), bz("valuu"))
|
||||
return db
|
||||
}
|
||||
|
||||
func TestPrefixDBSimple(t *testing.T) {
|
||||
db := mockDBWithStuff()
|
||||
pdb := NewPrefixDB(db, bz("key"))
|
||||
|
||||
checkValue(t, pdb, bz("key"), nil)
|
||||
checkValue(t, pdb, bz(""), bz("value"))
|
||||
checkValue(t, pdb, bz("key1"), nil)
|
||||
checkValue(t, pdb, bz("1"), bz("value1"))
|
||||
checkValue(t, pdb, bz("key2"), nil)
|
||||
checkValue(t, pdb, bz("2"), bz("value2"))
|
||||
checkValue(t, pdb, bz("key3"), nil)
|
||||
checkValue(t, pdb, bz("3"), bz("value3"))
|
||||
checkValue(t, pdb, bz("something"), nil)
|
||||
checkValue(t, pdb, bz("k"), nil)
|
||||
checkValue(t, pdb, bz("ke"), nil)
|
||||
checkValue(t, pdb, bz("kee"), nil)
|
||||
}
|
||||
|
||||
func TestPrefixDBIterator1(t *testing.T) {
|
||||
db := mockDBWithStuff()
|
||||
pdb := NewPrefixDB(db, bz("key"))
|
||||
|
||||
itr := pdb.Iterator(nil, nil)
|
||||
checkDomain(t, itr, nil, nil)
|
||||
checkItem(t, itr, bz(""), bz("value"))
|
||||
checkNext(t, itr, true)
|
||||
checkItem(t, itr, bz("1"), bz("value1"))
|
||||
checkNext(t, itr, true)
|
||||
checkItem(t, itr, bz("2"), bz("value2"))
|
||||
checkNext(t, itr, true)
|
||||
checkItem(t, itr, bz("3"), bz("value3"))
|
||||
checkNext(t, itr, false)
|
||||
checkInvalid(t, itr)
|
||||
itr.Close()
|
||||
}
|
||||
|
||||
func TestPrefixDBIterator2(t *testing.T) {
|
||||
db := mockDBWithStuff()
|
||||
pdb := NewPrefixDB(db, bz("key"))
|
||||
|
||||
itr := pdb.Iterator(nil, bz(""))
|
||||
checkDomain(t, itr, nil, bz(""))
|
||||
checkInvalid(t, itr)
|
||||
itr.Close()
|
||||
}
|
||||
|
||||
func TestPrefixDBIterator3(t *testing.T) {
|
||||
db := mockDBWithStuff()
|
||||
pdb := NewPrefixDB(db, bz("key"))
|
||||
|
||||
itr := pdb.Iterator(bz(""), nil)
|
||||
checkDomain(t, itr, bz(""), nil)
|
||||
checkItem(t, itr, bz(""), bz("value"))
|
||||
checkNext(t, itr, true)
|
||||
checkItem(t, itr, bz("1"), bz("value1"))
|
||||
checkNext(t, itr, true)
|
||||
checkItem(t, itr, bz("2"), bz("value2"))
|
||||
checkNext(t, itr, true)
|
||||
checkItem(t, itr, bz("3"), bz("value3"))
|
||||
checkNext(t, itr, false)
|
||||
checkInvalid(t, itr)
|
||||
itr.Close()
|
||||
}
|
||||
|
||||
func TestPrefixDBIterator4(t *testing.T) {
|
||||
db := mockDBWithStuff()
|
||||
pdb := NewPrefixDB(db, bz("key"))
|
||||
|
||||
itr := pdb.Iterator(bz(""), bz(""))
|
||||
checkDomain(t, itr, bz(""), bz(""))
|
||||
checkInvalid(t, itr)
|
||||
itr.Close()
|
||||
}
|
||||
|
||||
func TestPrefixDBReverseIterator1(t *testing.T) {
|
||||
db := mockDBWithStuff()
|
||||
pdb := NewPrefixDB(db, bz("key"))
|
||||
|
||||
itr := pdb.ReverseIterator(nil, nil)
|
||||
checkDomain(t, itr, nil, nil)
|
||||
checkItem(t, itr, bz("3"), bz("value3"))
|
||||
checkNext(t, itr, true)
|
||||
checkItem(t, itr, bz("2"), bz("value2"))
|
||||
checkNext(t, itr, true)
|
||||
checkItem(t, itr, bz("1"), bz("value1"))
|
||||
checkNext(t, itr, true)
|
||||
checkItem(t, itr, bz(""), bz("value"))
|
||||
checkNext(t, itr, false)
|
||||
checkInvalid(t, itr)
|
||||
itr.Close()
|
||||
}
|
||||
|
||||
func TestPrefixDBReverseIterator2(t *testing.T) {
|
||||
db := mockDBWithStuff()
|
||||
pdb := NewPrefixDB(db, bz("key"))
|
||||
|
||||
itr := pdb.ReverseIterator(bz(""), nil)
|
||||
checkDomain(t, itr, bz(""), nil)
|
||||
checkItem(t, itr, bz("3"), bz("value3"))
|
||||
checkNext(t, itr, true)
|
||||
checkItem(t, itr, bz("2"), bz("value2"))
|
||||
checkNext(t, itr, true)
|
||||
checkItem(t, itr, bz("1"), bz("value1"))
|
||||
checkNext(t, itr, true)
|
||||
checkItem(t, itr, bz(""), bz("value"))
|
||||
checkNext(t, itr, false)
|
||||
checkInvalid(t, itr)
|
||||
itr.Close()
|
||||
}
|
||||
|
||||
func TestPrefixDBReverseIterator3(t *testing.T) {
|
||||
db := mockDBWithStuff()
|
||||
pdb := NewPrefixDB(db, bz("key"))
|
||||
|
||||
itr := pdb.ReverseIterator(nil, bz(""))
|
||||
checkDomain(t, itr, nil, bz(""))
|
||||
checkInvalid(t, itr)
|
||||
itr.Close()
|
||||
}
|
||||
|
||||
func TestPrefixDBReverseIterator4(t *testing.T) {
|
||||
db := mockDBWithStuff()
|
||||
pdb := NewPrefixDB(db, bz("key"))
|
||||
|
||||
itr := pdb.ReverseIterator(bz(""), bz(""))
|
||||
checkDomain(t, itr, bz(""), bz(""))
|
||||
checkInvalid(t, itr)
|
||||
itr.Close()
|
||||
}
|
||||
|
||||
func TestPrefixDBReverseIterator5(t *testing.T) {
|
||||
db := mockDBWithStuff()
|
||||
pdb := NewPrefixDB(db, bz("key"))
|
||||
|
||||
itr := pdb.ReverseIterator(bz("1"), nil)
|
||||
checkDomain(t, itr, bz("1"), nil)
|
||||
checkItem(t, itr, bz("3"), bz("value3"))
|
||||
checkNext(t, itr, true)
|
||||
checkItem(t, itr, bz("2"), bz("value2"))
|
||||
checkNext(t, itr, true)
|
||||
checkItem(t, itr, bz("1"), bz("value1"))
|
||||
checkNext(t, itr, false)
|
||||
checkInvalid(t, itr)
|
||||
itr.Close()
|
||||
}
|
||||
|
||||
func TestPrefixDBReverseIterator6(t *testing.T) {
|
||||
db := mockDBWithStuff()
|
||||
pdb := NewPrefixDB(db, bz("key"))
|
||||
|
||||
itr := pdb.ReverseIterator(bz("2"), nil)
|
||||
checkDomain(t, itr, bz("2"), nil)
|
||||
checkItem(t, itr, bz("3"), bz("value3"))
|
||||
checkNext(t, itr, true)
|
||||
checkItem(t, itr, bz("2"), bz("value2"))
|
||||
checkNext(t, itr, false)
|
||||
checkInvalid(t, itr)
|
||||
itr.Close()
|
||||
}
|
||||
|
||||
func TestPrefixDBReverseIterator7(t *testing.T) {
|
||||
db := mockDBWithStuff()
|
||||
pdb := NewPrefixDB(db, bz("key"))
|
||||
|
||||
itr := pdb.ReverseIterator(nil, bz("2"))
|
||||
checkDomain(t, itr, nil, bz("2"))
|
||||
checkItem(t, itr, bz("1"), bz("value1"))
|
||||
checkNext(t, itr, true)
|
||||
checkItem(t, itr, bz(""), bz("value"))
|
||||
checkNext(t, itr, false)
|
||||
checkInvalid(t, itr)
|
||||
itr.Close()
|
||||
}
|
@ -1,37 +0,0 @@
|
||||
/*
|
||||
remotedb is a package for connecting to distributed Tendermint db.DB
|
||||
instances. The purpose is to detach difficult deployments such as
|
||||
CLevelDB that requires gcc or perhaps for databases that require
|
||||
custom configurations such as extra disk space. It also eases
|
||||
the burden and cost of deployment of dependencies for databases
|
||||
to be used by Tendermint developers. Most importantly it is built
|
||||
over the high performant gRPC transport.
|
||||
|
||||
remotedb's RemoteDB implements db.DB so can be used normally
|
||||
like other databases. One just has to explicitly connect to the
|
||||
remote database with a client setup such as:
|
||||
|
||||
client, err := remotedb.NewRemoteDB(addr, cert)
|
||||
// Make sure to invoke InitRemote!
|
||||
if err := client.InitRemote(&remotedb.Init{Name: "test-remote-db", Type: "leveldb"}); err != nil {
|
||||
log.Fatalf("Failed to initialize the remote db")
|
||||
}
|
||||
|
||||
client.Set(key1, value)
|
||||
gv1 := client.SetSync(k2, v2)
|
||||
|
||||
client.Delete(k1)
|
||||
gv2 := client.Get(k1)
|
||||
|
||||
for itr := client.Iterator(k1, k9); itr.Valid(); itr.Next() {
|
||||
ik, iv := itr.Key(), itr.Value()
|
||||
ds, de := itr.Domain()
|
||||
}
|
||||
|
||||
stats := client.Stats()
|
||||
|
||||
if !client.Has(dk1) {
|
||||
client.SetSync(dk1, dv1)
|
||||
}
|
||||
*/
|
||||
package remotedb
|
@ -1,22 +0,0 @@
|
||||
package grpcdb
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
||||
protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto"
|
||||
)
|
||||
|
||||
// NewClient creates a gRPC client connected to the bound gRPC server at serverAddr.
|
||||
// Use kind to set the level of security to either Secure or Insecure.
|
||||
func NewClient(serverAddr, serverCert string) (protodb.DBClient, error) {
|
||||
creds, err := credentials.NewClientTLSFromFile(serverCert, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cc, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(creds))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return protodb.NewDBClient(cc), nil
|
||||
}
|
@ -1,32 +0,0 @@
|
||||
/*
|
||||
grpcdb is the distribution of Tendermint's db.DB instances using
|
||||
the gRPC transport to decouple local db.DB usages from applications,
|
||||
to using them over a network in a highly performant manner.
|
||||
|
||||
grpcdb allows users to initialize a database's server like
|
||||
they would locally and invoke the respective methods of db.DB.
|
||||
|
||||
Most users shouldn't use this package, but should instead use
|
||||
remotedb. Only the lower level users and database server deployers
|
||||
should use it, for functionality such as:
|
||||
|
||||
ln, err := net.Listen("tcp", "0.0.0.0:0")
|
||||
srv := grpcdb.NewServer()
|
||||
defer srv.Stop()
|
||||
go func() {
|
||||
if err := srv.Serve(ln); err != nil {
|
||||
t.Fatalf("BindServer: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
or
|
||||
addr := ":8998"
|
||||
cert := "server.crt"
|
||||
key := "server.key"
|
||||
go func() {
|
||||
if err := grpcdb.ListenAndServe(addr, cert, key); err != nil {
|
||||
log.Fatalf("BindServer: %v", err)
|
||||
}
|
||||
}()
|
||||
*/
|
||||
package grpcdb
|
@ -1,52 +0,0 @@
|
||||
package grpcdb_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"log"
|
||||
|
||||
grpcdb "github.com/tendermint/tendermint/libs/db/remotedb/grpcdb"
|
||||
protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto"
|
||||
)
|
||||
|
||||
func Example() {
|
||||
addr := ":8998"
|
||||
cert := "server.crt"
|
||||
key := "server.key"
|
||||
go func() {
|
||||
if err := grpcdb.ListenAndServe(addr, cert, key); err != nil {
|
||||
log.Fatalf("BindServer: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
client, err := grpcdb.NewClient(addr, cert)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create grpcDB client: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
// 1. Initialize the DB
|
||||
in := &protodb.Init{
|
||||
Type: "leveldb",
|
||||
Name: "grpc-uno-test",
|
||||
Dir: ".",
|
||||
}
|
||||
if _, err := client.Init(ctx, in); err != nil {
|
||||
log.Fatalf("Init error: %v", err)
|
||||
}
|
||||
|
||||
// 2. Now it can be used!
|
||||
query1 := &protodb.Entity{Key: []byte("Project"), Value: []byte("Tmlibs-on-gRPC")}
|
||||
if _, err := client.SetSync(ctx, query1); err != nil {
|
||||
log.Fatalf("SetSync err: %v", err)
|
||||
}
|
||||
|
||||
query2 := &protodb.Entity{Key: []byte("Project")}
|
||||
read, err := client.Get(ctx, query2)
|
||||
if err != nil {
|
||||
log.Fatalf("Get err: %v", err)
|
||||
}
|
||||
if g, w := read.Value, []byte("Tmlibs-on-gRPC"); !bytes.Equal(g, w) {
|
||||
log.Fatalf("got= (%q ==> % X)\nwant=(%q ==> % X)", g, g, w, w)
|
||||
}
|
||||
}
|
@ -1,200 +0,0 @@
|
||||
package grpcdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/db"
|
||||
protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto"
|
||||
)
|
||||
|
||||
// ListenAndServe is a blocking function that sets up a gRPC based
|
||||
// server at the address supplied, with the gRPC options passed in.
|
||||
// Normally in usage, invoke it in a goroutine like you would for http.ListenAndServe.
|
||||
func ListenAndServe(addr, cert, key string, opts ...grpc.ServerOption) error {
|
||||
ln, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv, err := NewServer(cert, key, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return srv.Serve(ln)
|
||||
}
|
||||
|
||||
func NewServer(cert, key string, opts ...grpc.ServerOption) (*grpc.Server, error) {
|
||||
creds, err := credentials.NewServerTLSFromFile(cert, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts = append(opts, grpc.Creds(creds))
|
||||
srv := grpc.NewServer(opts...)
|
||||
protodb.RegisterDBServer(srv, new(server))
|
||||
return srv, nil
|
||||
}
|
||||
|
||||
type server struct {
|
||||
mu sync.Mutex
|
||||
db db.DB
|
||||
}
|
||||
|
||||
var _ protodb.DBServer = (*server)(nil)
|
||||
|
||||
// Init initializes the server's database. Only one type of database
|
||||
// can be initialized per server.
|
||||
//
|
||||
// Dir is the directory on the file system in which the DB will be stored(if backed by disk) (TODO: remove)
|
||||
//
|
||||
// Name is representative filesystem entry's basepath
|
||||
//
|
||||
// Type can be either one of:
|
||||
// * cleveldb (if built with gcc enabled)
|
||||
// * fsdb
|
||||
// * memdB
|
||||
// * leveldb
|
||||
// See https://godoc.org/github.com/tendermint/tendermint/libs/db#DBBackendType
|
||||
func (s *server) Init(ctx context.Context, in *protodb.Init) (*protodb.Entity, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.db = db.NewDB(in.Name, db.DBBackendType(in.Type), in.Dir)
|
||||
return &protodb.Entity{CreatedAt: time.Now().Unix()}, nil
|
||||
}
|
||||
|
||||
func (s *server) Delete(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) {
|
||||
s.db.Delete(in.Key)
|
||||
return nothing, nil
|
||||
}
|
||||
|
||||
var nothing = new(protodb.Nothing)
|
||||
|
||||
func (s *server) DeleteSync(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) {
|
||||
s.db.DeleteSync(in.Key)
|
||||
return nothing, nil
|
||||
}
|
||||
|
||||
func (s *server) Get(ctx context.Context, in *protodb.Entity) (*protodb.Entity, error) {
|
||||
value := s.db.Get(in.Key)
|
||||
return &protodb.Entity{Value: value}, nil
|
||||
}
|
||||
|
||||
func (s *server) GetStream(ds protodb.DB_GetStreamServer) error {
|
||||
// Receive routine
|
||||
responsesChan := make(chan *protodb.Entity)
|
||||
go func() {
|
||||
defer close(responsesChan)
|
||||
ctx := context.Background()
|
||||
for {
|
||||
in, err := ds.Recv()
|
||||
if err != nil {
|
||||
responsesChan <- &protodb.Entity{Err: err.Error()}
|
||||
return
|
||||
}
|
||||
out, err := s.Get(ctx, in)
|
||||
if err != nil {
|
||||
if out == nil {
|
||||
out = new(protodb.Entity)
|
||||
out.Key = in.Key
|
||||
}
|
||||
out.Err = err.Error()
|
||||
responsesChan <- out
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise continue on
|
||||
responsesChan <- out
|
||||
}
|
||||
}()
|
||||
|
||||
// Send routine, block until we return
|
||||
for out := range responsesChan {
|
||||
if err := ds.Send(out); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *server) Has(ctx context.Context, in *protodb.Entity) (*protodb.Entity, error) {
|
||||
exists := s.db.Has(in.Key)
|
||||
return &protodb.Entity{Exists: exists}, nil
|
||||
}
|
||||
|
||||
func (s *server) Set(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) {
|
||||
s.db.Set(in.Key, in.Value)
|
||||
return nothing, nil
|
||||
}
|
||||
|
||||
func (s *server) SetSync(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) {
|
||||
s.db.SetSync(in.Key, in.Value)
|
||||
return nothing, nil
|
||||
}
|
||||
|
||||
func (s *server) Iterator(query *protodb.Entity, dis protodb.DB_IteratorServer) error {
|
||||
it := s.db.Iterator(query.Start, query.End)
|
||||
defer it.Close()
|
||||
return s.handleIterator(it, dis.Send)
|
||||
}
|
||||
|
||||
func (s *server) handleIterator(it db.Iterator, sendFunc func(*protodb.Iterator) error) error {
|
||||
for it.Valid() {
|
||||
start, end := it.Domain()
|
||||
out := &protodb.Iterator{
|
||||
Domain: &protodb.Domain{Start: start, End: end},
|
||||
Valid: it.Valid(),
|
||||
Key: it.Key(),
|
||||
Value: it.Value(),
|
||||
}
|
||||
if err := sendFunc(out); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Finally move the iterator forward
|
||||
it.Next()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *server) ReverseIterator(query *protodb.Entity, dis protodb.DB_ReverseIteratorServer) error {
|
||||
it := s.db.ReverseIterator(query.Start, query.End)
|
||||
defer it.Close()
|
||||
return s.handleIterator(it, dis.Send)
|
||||
}
|
||||
|
||||
func (s *server) Stats(context.Context, *protodb.Nothing) (*protodb.Stats, error) {
|
||||
stats := s.db.Stats()
|
||||
return &protodb.Stats{Data: stats, TimeAt: time.Now().Unix()}, nil
|
||||
}
|
||||
|
||||
func (s *server) BatchWrite(c context.Context, b *protodb.Batch) (*protodb.Nothing, error) {
|
||||
return s.batchWrite(c, b, false)
|
||||
}
|
||||
|
||||
func (s *server) BatchWriteSync(c context.Context, b *protodb.Batch) (*protodb.Nothing, error) {
|
||||
return s.batchWrite(c, b, true)
|
||||
}
|
||||
|
||||
func (s *server) batchWrite(c context.Context, b *protodb.Batch, sync bool) (*protodb.Nothing, error) {
|
||||
bat := s.db.NewBatch()
|
||||
defer bat.Close()
|
||||
for _, op := range b.Ops {
|
||||
switch op.Type {
|
||||
case protodb.Operation_SET:
|
||||
bat.Set(op.Entity.Key, op.Entity.Value)
|
||||
case protodb.Operation_DELETE:
|
||||
bat.Delete(op.Entity.Key)
|
||||
}
|
||||
}
|
||||
if sync {
|
||||
bat.WriteSync()
|
||||
} else {
|
||||
bat.Write()
|
||||
}
|
||||
return nothing, nil
|
||||
}
|
@ -1,914 +0,0 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: defs.proto
|
||||
|
||||
/*
|
||||
Package protodb is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
defs.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Batch
|
||||
Operation
|
||||
Entity
|
||||
Nothing
|
||||
Domain
|
||||
Iterator
|
||||
Stats
|
||||
Init
|
||||
*/
|
||||
package protodb
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type Operation_Type int32
|
||||
|
||||
const (
|
||||
Operation_SET Operation_Type = 0
|
||||
Operation_DELETE Operation_Type = 1
|
||||
)
|
||||
|
||||
var Operation_Type_name = map[int32]string{
|
||||
0: "SET",
|
||||
1: "DELETE",
|
||||
}
|
||||
var Operation_Type_value = map[string]int32{
|
||||
"SET": 0,
|
||||
"DELETE": 1,
|
||||
}
|
||||
|
||||
func (x Operation_Type) String() string {
|
||||
return proto.EnumName(Operation_Type_name, int32(x))
|
||||
}
|
||||
func (Operation_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} }
|
||||
|
||||
type Batch struct {
|
||||
Ops []*Operation `protobuf:"bytes,1,rep,name=ops" json:"ops,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Batch) Reset() { *m = Batch{} }
|
||||
func (m *Batch) String() string { return proto.CompactTextString(m) }
|
||||
func (*Batch) ProtoMessage() {}
|
||||
func (*Batch) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
|
||||
func (m *Batch) GetOps() []*Operation {
|
||||
if m != nil {
|
||||
return m.Ops
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Operation struct {
|
||||
Entity *Entity `protobuf:"bytes,1,opt,name=entity" json:"entity,omitempty"`
|
||||
Type Operation_Type `protobuf:"varint,2,opt,name=type,enum=protodb.Operation_Type" json:"type,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Operation) Reset() { *m = Operation{} }
|
||||
func (m *Operation) String() string { return proto.CompactTextString(m) }
|
||||
func (*Operation) ProtoMessage() {}
|
||||
func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
|
||||
func (m *Operation) GetEntity() *Entity {
|
||||
if m != nil {
|
||||
return m.Entity
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Operation) GetType() Operation_Type {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return Operation_SET
|
||||
}
|
||||
|
||||
type Entity struct {
|
||||
Id int32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"`
|
||||
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
|
||||
Exists bool `protobuf:"varint,4,opt,name=exists" json:"exists,omitempty"`
|
||||
Start []byte `protobuf:"bytes,5,opt,name=start,proto3" json:"start,omitempty"`
|
||||
End []byte `protobuf:"bytes,6,opt,name=end,proto3" json:"end,omitempty"`
|
||||
Err string `protobuf:"bytes,7,opt,name=err" json:"err,omitempty"`
|
||||
CreatedAt int64 `protobuf:"varint,8,opt,name=created_at,json=createdAt" json:"created_at,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Entity) Reset() { *m = Entity{} }
|
||||
func (m *Entity) String() string { return proto.CompactTextString(m) }
|
||||
func (*Entity) ProtoMessage() {}
|
||||
func (*Entity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||
|
||||
func (m *Entity) GetId() int32 {
|
||||
if m != nil {
|
||||
return m.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Entity) GetKey() []byte {
|
||||
if m != nil {
|
||||
return m.Key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Entity) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Entity) GetExists() bool {
|
||||
if m != nil {
|
||||
return m.Exists
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *Entity) GetStart() []byte {
|
||||
if m != nil {
|
||||
return m.Start
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Entity) GetEnd() []byte {
|
||||
if m != nil {
|
||||
return m.End
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Entity) GetErr() string {
|
||||
if m != nil {
|
||||
return m.Err
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Entity) GetCreatedAt() int64 {
|
||||
if m != nil {
|
||||
return m.CreatedAt
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type Nothing struct {
|
||||
}
|
||||
|
||||
func (m *Nothing) Reset() { *m = Nothing{} }
|
||||
func (m *Nothing) String() string { return proto.CompactTextString(m) }
|
||||
func (*Nothing) ProtoMessage() {}
|
||||
func (*Nothing) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
||||
|
||||
type Domain struct {
|
||||
Start []byte `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"`
|
||||
End []byte `protobuf:"bytes,2,opt,name=end,proto3" json:"end,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Domain) Reset() { *m = Domain{} }
|
||||
func (m *Domain) String() string { return proto.CompactTextString(m) }
|
||||
func (*Domain) ProtoMessage() {}
|
||||
func (*Domain) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
|
||||
|
||||
func (m *Domain) GetStart() []byte {
|
||||
if m != nil {
|
||||
return m.Start
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Domain) GetEnd() []byte {
|
||||
if m != nil {
|
||||
return m.End
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Iterator struct {
|
||||
Domain *Domain `protobuf:"bytes,1,opt,name=domain" json:"domain,omitempty"`
|
||||
Valid bool `protobuf:"varint,2,opt,name=valid" json:"valid,omitempty"`
|
||||
Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Iterator) Reset() { *m = Iterator{} }
|
||||
func (m *Iterator) String() string { return proto.CompactTextString(m) }
|
||||
func (*Iterator) ProtoMessage() {}
|
||||
func (*Iterator) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
|
||||
|
||||
func (m *Iterator) GetDomain() *Domain {
|
||||
if m != nil {
|
||||
return m.Domain
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Iterator) GetValid() bool {
|
||||
if m != nil {
|
||||
return m.Valid
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *Iterator) GetKey() []byte {
|
||||
if m != nil {
|
||||
return m.Key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Iterator) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Stats struct {
|
||||
Data map[string]string `protobuf:"bytes,1,rep,name=data" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||
TimeAt int64 `protobuf:"varint,2,opt,name=time_at,json=timeAt" json:"time_at,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Stats) Reset() { *m = Stats{} }
|
||||
func (m *Stats) String() string { return proto.CompactTextString(m) }
|
||||
func (*Stats) ProtoMessage() {}
|
||||
func (*Stats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
|
||||
|
||||
func (m *Stats) GetData() map[string]string {
|
||||
if m != nil {
|
||||
return m.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Stats) GetTimeAt() int64 {
|
||||
if m != nil {
|
||||
return m.TimeAt
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type Init struct {
|
||||
Type string `protobuf:"bytes,1,opt,name=Type" json:"Type,omitempty"`
|
||||
Name string `protobuf:"bytes,2,opt,name=Name" json:"Name,omitempty"`
|
||||
Dir string `protobuf:"bytes,3,opt,name=Dir" json:"Dir,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Init) Reset() { *m = Init{} }
|
||||
func (m *Init) String() string { return proto.CompactTextString(m) }
|
||||
func (*Init) ProtoMessage() {}
|
||||
func (*Init) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
|
||||
|
||||
func (m *Init) GetType() string {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Init) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Init) GetDir() string {
|
||||
if m != nil {
|
||||
return m.Dir
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Batch)(nil), "protodb.Batch")
|
||||
proto.RegisterType((*Operation)(nil), "protodb.Operation")
|
||||
proto.RegisterType((*Entity)(nil), "protodb.Entity")
|
||||
proto.RegisterType((*Nothing)(nil), "protodb.Nothing")
|
||||
proto.RegisterType((*Domain)(nil), "protodb.Domain")
|
||||
proto.RegisterType((*Iterator)(nil), "protodb.Iterator")
|
||||
proto.RegisterType((*Stats)(nil), "protodb.Stats")
|
||||
proto.RegisterType((*Init)(nil), "protodb.Init")
|
||||
proto.RegisterEnum("protodb.Operation_Type", Operation_Type_name, Operation_Type_value)
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// Client API for DB service
|
||||
|
||||
type DBClient interface {
|
||||
Init(ctx context.Context, in *Init, opts ...grpc.CallOption) (*Entity, error)
|
||||
Get(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error)
|
||||
GetStream(ctx context.Context, opts ...grpc.CallOption) (DB_GetStreamClient, error)
|
||||
Has(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error)
|
||||
Set(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error)
|
||||
SetSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error)
|
||||
Delete(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error)
|
||||
DeleteSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error)
|
||||
Iterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_IteratorClient, error)
|
||||
ReverseIterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_ReverseIteratorClient, error)
|
||||
// rpc print(Nothing) returns (Entity) {}
|
||||
Stats(ctx context.Context, in *Nothing, opts ...grpc.CallOption) (*Stats, error)
|
||||
BatchWrite(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error)
|
||||
BatchWriteSync(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error)
|
||||
}
|
||||
|
||||
type dBClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewDBClient(cc *grpc.ClientConn) DBClient {
|
||||
return &dBClient{cc}
|
||||
}
|
||||
|
||||
func (c *dBClient) Init(ctx context.Context, in *Init, opts ...grpc.CallOption) (*Entity, error) {
|
||||
out := new(Entity)
|
||||
err := grpc.Invoke(ctx, "/protodb.DB/init", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) Get(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) {
|
||||
out := new(Entity)
|
||||
err := grpc.Invoke(ctx, "/protodb.DB/get", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) GetStream(ctx context.Context, opts ...grpc.CallOption) (DB_GetStreamClient, error) {
|
||||
stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[0], c.cc, "/protodb.DB/getStream", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &dBGetStreamClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type DB_GetStreamClient interface {
|
||||
Send(*Entity) error
|
||||
Recv() (*Entity, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type dBGetStreamClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *dBGetStreamClient) Send(m *Entity) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *dBGetStreamClient) Recv() (*Entity, error) {
|
||||
m := new(Entity)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) Has(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) {
|
||||
out := new(Entity)
|
||||
err := grpc.Invoke(ctx, "/protodb.DB/has", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) Set(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) {
|
||||
out := new(Nothing)
|
||||
err := grpc.Invoke(ctx, "/protodb.DB/set", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) SetSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) {
|
||||
out := new(Nothing)
|
||||
err := grpc.Invoke(ctx, "/protodb.DB/setSync", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) Delete(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) {
|
||||
out := new(Nothing)
|
||||
err := grpc.Invoke(ctx, "/protodb.DB/delete", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) DeleteSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) {
|
||||
out := new(Nothing)
|
||||
err := grpc.Invoke(ctx, "/protodb.DB/deleteSync", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) Iterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_IteratorClient, error) {
|
||||
stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[1], c.cc, "/protodb.DB/iterator", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &dBIteratorClient{stream}
|
||||
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := x.ClientStream.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type DB_IteratorClient interface {
|
||||
Recv() (*Iterator, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type dBIteratorClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *dBIteratorClient) Recv() (*Iterator, error) {
|
||||
m := new(Iterator)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) ReverseIterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_ReverseIteratorClient, error) {
|
||||
stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[2], c.cc, "/protodb.DB/reverseIterator", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &dBReverseIteratorClient{stream}
|
||||
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := x.ClientStream.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type DB_ReverseIteratorClient interface {
|
||||
Recv() (*Iterator, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type dBReverseIteratorClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *dBReverseIteratorClient) Recv() (*Iterator, error) {
|
||||
m := new(Iterator)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) Stats(ctx context.Context, in *Nothing, opts ...grpc.CallOption) (*Stats, error) {
|
||||
out := new(Stats)
|
||||
err := grpc.Invoke(ctx, "/protodb.DB/stats", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) BatchWrite(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) {
|
||||
out := new(Nothing)
|
||||
err := grpc.Invoke(ctx, "/protodb.DB/batchWrite", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *dBClient) BatchWriteSync(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) {
|
||||
out := new(Nothing)
|
||||
err := grpc.Invoke(ctx, "/protodb.DB/batchWriteSync", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Server API for DB service
|
||||
|
||||
type DBServer interface {
|
||||
Init(context.Context, *Init) (*Entity, error)
|
||||
Get(context.Context, *Entity) (*Entity, error)
|
||||
GetStream(DB_GetStreamServer) error
|
||||
Has(context.Context, *Entity) (*Entity, error)
|
||||
Set(context.Context, *Entity) (*Nothing, error)
|
||||
SetSync(context.Context, *Entity) (*Nothing, error)
|
||||
Delete(context.Context, *Entity) (*Nothing, error)
|
||||
DeleteSync(context.Context, *Entity) (*Nothing, error)
|
||||
Iterator(*Entity, DB_IteratorServer) error
|
||||
ReverseIterator(*Entity, DB_ReverseIteratorServer) error
|
||||
// rpc print(Nothing) returns (Entity) {}
|
||||
Stats(context.Context, *Nothing) (*Stats, error)
|
||||
BatchWrite(context.Context, *Batch) (*Nothing, error)
|
||||
BatchWriteSync(context.Context, *Batch) (*Nothing, error)
|
||||
}
|
||||
|
||||
func RegisterDBServer(s *grpc.Server, srv DBServer) {
|
||||
s.RegisterService(&_DB_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _DB_Init_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(Init)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(DBServer).Init(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protodb.DB/Init",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(DBServer).Init(ctx, req.(*Init))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _DB_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(Entity)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(DBServer).Get(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protodb.DB/Get",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(DBServer).Get(ctx, req.(*Entity))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _DB_GetStream_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(DBServer).GetStream(&dBGetStreamServer{stream})
|
||||
}
|
||||
|
||||
type DB_GetStreamServer interface {
|
||||
Send(*Entity) error
|
||||
Recv() (*Entity, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type dBGetStreamServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *dBGetStreamServer) Send(m *Entity) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *dBGetStreamServer) Recv() (*Entity, error) {
|
||||
m := new(Entity)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func _DB_Has_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(Entity)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(DBServer).Has(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protodb.DB/Has",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(DBServer).Has(ctx, req.(*Entity))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _DB_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(Entity)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(DBServer).Set(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protodb.DB/Set",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(DBServer).Set(ctx, req.(*Entity))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _DB_SetSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(Entity)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(DBServer).SetSync(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protodb.DB/SetSync",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(DBServer).SetSync(ctx, req.(*Entity))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _DB_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(Entity)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(DBServer).Delete(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protodb.DB/Delete",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(DBServer).Delete(ctx, req.(*Entity))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _DB_DeleteSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(Entity)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(DBServer).DeleteSync(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protodb.DB/DeleteSync",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(DBServer).DeleteSync(ctx, req.(*Entity))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _DB_Iterator_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
m := new(Entity)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
return err
|
||||
}
|
||||
return srv.(DBServer).Iterator(m, &dBIteratorServer{stream})
|
||||
}
|
||||
|
||||
type DB_IteratorServer interface {
|
||||
Send(*Iterator) error
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type dBIteratorServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *dBIteratorServer) Send(m *Iterator) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func _DB_ReverseIterator_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
m := new(Entity)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
return err
|
||||
}
|
||||
return srv.(DBServer).ReverseIterator(m, &dBReverseIteratorServer{stream})
|
||||
}
|
||||
|
||||
type DB_ReverseIteratorServer interface {
|
||||
Send(*Iterator) error
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type dBReverseIteratorServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *dBReverseIteratorServer) Send(m *Iterator) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func _DB_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(Nothing)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(DBServer).Stats(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protodb.DB/Stats",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(DBServer).Stats(ctx, req.(*Nothing))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _DB_BatchWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(Batch)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(DBServer).BatchWrite(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protodb.DB/BatchWrite",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(DBServer).BatchWrite(ctx, req.(*Batch))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _DB_BatchWriteSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(Batch)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(DBServer).BatchWriteSync(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/protodb.DB/BatchWriteSync",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(DBServer).BatchWriteSync(ctx, req.(*Batch))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _DB_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "protodb.DB",
|
||||
HandlerType: (*DBServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "init",
|
||||
Handler: _DB_Init_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "get",
|
||||
Handler: _DB_Get_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "has",
|
||||
Handler: _DB_Has_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "set",
|
||||
Handler: _DB_Set_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "setSync",
|
||||
Handler: _DB_SetSync_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "delete",
|
||||
Handler: _DB_Delete_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "deleteSync",
|
||||
Handler: _DB_DeleteSync_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "stats",
|
||||
Handler: _DB_Stats_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "batchWrite",
|
||||
Handler: _DB_BatchWrite_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "batchWriteSync",
|
||||
Handler: _DB_BatchWriteSync_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "getStream",
|
||||
Handler: _DB_GetStream_Handler,
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
},
|
||||
{
|
||||
StreamName: "iterator",
|
||||
Handler: _DB_Iterator_Handler,
|
||||
ServerStreams: true,
|
||||
},
|
||||
{
|
||||
StreamName: "reverseIterator",
|
||||
Handler: _DB_ReverseIterator_Handler,
|
||||
ServerStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "defs.proto",
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("defs.proto", fileDescriptor0) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 606 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4f, 0x6f, 0xd3, 0x4e,
|
||||
0x10, 0xcd, 0xda, 0x8e, 0x13, 0x4f, 0x7f, 0xbf, 0x34, 0x8c, 0x10, 0xb5, 0x8a, 0x90, 0x22, 0x0b,
|
||||
0x09, 0x43, 0x69, 0x14, 0x52, 0x24, 0xfe, 0x9c, 0x68, 0x95, 0x1c, 0x2a, 0xa1, 0x22, 0x39, 0x95,
|
||||
0x38, 0xa2, 0x6d, 0x3d, 0x34, 0x2b, 0x1a, 0x3b, 0xac, 0x87, 0x8a, 0x5c, 0xb8, 0xf2, 0x79, 0xf8,
|
||||
0x7c, 0x5c, 0xd0, 0xae, 0x1d, 0x87, 0x36, 0x39, 0x84, 0x53, 0x76, 0x66, 0xde, 0x7b, 0xb3, 0xf3,
|
||||
0x32, 0x5e, 0x80, 0x94, 0x3e, 0x17, 0xfd, 0xb9, 0xce, 0x39, 0xc7, 0x96, 0xfd, 0x49, 0x2f, 0xa2,
|
||||
0x43, 0x68, 0x9e, 0x48, 0xbe, 0x9c, 0xe2, 0x63, 0x70, 0xf3, 0x79, 0x11, 0x8a, 0x9e, 0x1b, 0xef,
|
||||
0x0c, 0xb1, 0x5f, 0xd5, 0xfb, 0x1f, 0xe6, 0xa4, 0x25, 0xab, 0x3c, 0x4b, 0x4c, 0x39, 0xfa, 0x01,
|
||||
0x41, 0x9d, 0xc1, 0x27, 0xe0, 0x53, 0xc6, 0x8a, 0x17, 0xa1, 0xe8, 0x89, 0x78, 0x67, 0xb8, 0x5b,
|
||||
0xb3, 0xc6, 0x36, 0x9d, 0x54, 0x65, 0x3c, 0x00, 0x8f, 0x17, 0x73, 0x0a, 0x9d, 0x9e, 0x88, 0x3b,
|
||||
0xc3, 0xbd, 0x75, 0xf1, 0xfe, 0xf9, 0x62, 0x4e, 0x89, 0x05, 0x45, 0x0f, 0xc1, 0x33, 0x11, 0xb6,
|
||||
0xc0, 0x9d, 0x8c, 0xcf, 0xbb, 0x0d, 0x04, 0xf0, 0x47, 0xe3, 0xf7, 0xe3, 0xf3, 0x71, 0x57, 0x44,
|
||||
0xbf, 0x04, 0xf8, 0xa5, 0x38, 0x76, 0xc0, 0x51, 0xa9, 0xed, 0xdc, 0x4c, 0x1c, 0x95, 0x62, 0x17,
|
||||
0xdc, 0x2f, 0xb4, 0xb0, 0x3d, 0xfe, 0x4b, 0xcc, 0x11, 0xef, 0x43, 0xf3, 0x46, 0x5e, 0x7f, 0xa3,
|
||||
0xd0, 0xb5, 0xb9, 0x32, 0xc0, 0x07, 0xe0, 0xd3, 0x77, 0x55, 0x70, 0x11, 0x7a, 0x3d, 0x11, 0xb7,
|
||||
0x93, 0x2a, 0x32, 0xe8, 0x82, 0xa5, 0xe6, 0xb0, 0x59, 0xa2, 0x6d, 0x60, 0x54, 0x29, 0x4b, 0x43,
|
||||
0xbf, 0x54, 0xa5, 0xcc, 0xf6, 0x21, 0xad, 0xc3, 0x56, 0x4f, 0xc4, 0x41, 0x62, 0x8e, 0xf8, 0x08,
|
||||
0xe0, 0x52, 0x93, 0x64, 0x4a, 0x3f, 0x49, 0x0e, 0xdb, 0x3d, 0x11, 0xbb, 0x49, 0x50, 0x65, 0x8e,
|
||||
0x39, 0x0a, 0xa0, 0x75, 0x96, 0xf3, 0x54, 0x65, 0x57, 0xd1, 0x00, 0xfc, 0x51, 0x3e, 0x93, 0x2a,
|
||||
0x5b, 0x75, 0x13, 0x1b, 0xba, 0x39, 0x75, 0xb7, 0xe8, 0x2b, 0xb4, 0x4f, 0xd9, 0xb8, 0x94, 0x6b,
|
||||
0xe3, 0x77, 0x6a, 0xd9, 0x6b, 0x7e, 0x97, 0xa2, 0x49, 0x55, 0xae, 0x06, 0x57, 0xa5, 0x50, 0x3b,
|
||||
0x29, 0x83, 0xa5, 0x41, 0xee, 0x06, 0x83, 0xbc, 0xbf, 0x0c, 0x8a, 0x7e, 0x0a, 0x68, 0x4e, 0x58,
|
||||
0x72, 0x81, 0xcf, 0xc1, 0x4b, 0x25, 0xcb, 0x6a, 0x29, 0xc2, 0xba, 0x9d, 0xad, 0xf6, 0x47, 0x92,
|
||||
0xe5, 0x38, 0x63, 0xbd, 0x48, 0x2c, 0x0a, 0xf7, 0xa0, 0xc5, 0x6a, 0x46, 0xc6, 0x03, 0xc7, 0x7a,
|
||||
0xe0, 0x9b, 0xf0, 0x98, 0xf7, 0x5f, 0x41, 0x50, 0x63, 0x97, 0xb7, 0x10, 0xa5, 0x7d, 0xb7, 0x6e,
|
||||
0xe1, 0xd8, 0x5c, 0x19, 0xbc, 0x75, 0x5e, 0x8b, 0xe8, 0x1d, 0x78, 0xa7, 0x99, 0x62, 0xc4, 0x72,
|
||||
0x25, 0x2a, 0x52, 0xb9, 0x1e, 0x08, 0xde, 0x99, 0x9c, 0x2d, 0x49, 0xf6, 0x6c, 0xb4, 0x47, 0x4a,
|
||||
0xdb, 0x09, 0x83, 0xc4, 0x1c, 0x87, 0xbf, 0x3d, 0x70, 0x46, 0x27, 0x18, 0x83, 0xa7, 0x8c, 0xd0,
|
||||
0xff, 0xf5, 0x08, 0x46, 0x77, 0xff, 0xee, 0xc2, 0x46, 0x0d, 0x7c, 0x0a, 0xee, 0x15, 0x31, 0xde,
|
||||
0xad, 0x6c, 0x82, 0x1e, 0x41, 0x70, 0x45, 0x3c, 0x61, 0x4d, 0x72, 0xb6, 0x0d, 0x21, 0x16, 0x03,
|
||||
0x61, 0xf4, 0xa7, 0xb2, 0xd8, 0x4a, 0xff, 0x19, 0xb8, 0xc5, 0xa6, 0xab, 0x74, 0xeb, 0xc4, 0x72,
|
||||
0xad, 0x1a, 0xd8, 0x87, 0x56, 0x41, 0x3c, 0x59, 0x64, 0x97, 0xdb, 0xe1, 0x0f, 0xc1, 0x4f, 0xe9,
|
||||
0x9a, 0x98, 0xb6, 0x83, 0xbf, 0x30, 0x8f, 0x87, 0x81, 0x6f, 0xdf, 0x61, 0x08, 0x6d, 0xb5, 0x5c,
|
||||
0xdc, 0x35, 0xc2, 0xbd, 0xd5, 0xff, 0x50, 0x61, 0xa2, 0xc6, 0x40, 0xe0, 0x1b, 0xd8, 0xd5, 0x74,
|
||||
0x43, 0xba, 0xa0, 0xd3, 0x7f, 0xa5, 0x1e, 0xd8, 0xef, 0x89, 0x0b, 0x5c, 0xbb, 0xcb, 0x7e, 0xe7,
|
||||
0xf6, 0xde, 0x46, 0x0d, 0x1c, 0x00, 0x5c, 0x98, 0x47, 0xef, 0xa3, 0x56, 0x4c, 0xb8, 0xaa, 0xdb,
|
||||
0x97, 0x70, 0xe3, 0x34, 0x2f, 0xa1, 0xb3, 0x62, 0x58, 0x13, 0xb6, 0x60, 0x5d, 0xf8, 0x36, 0x75,
|
||||
0xf4, 0x27, 0x00, 0x00, 0xff, 0xff, 0x95, 0xf4, 0xe3, 0x82, 0x7a, 0x05, 0x00, 0x00,
|
||||
}
|
@ -1,71 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package protodb;
|
||||
|
||||
message Batch {
|
||||
repeated Operation ops = 1;
|
||||
}
|
||||
|
||||
message Operation {
|
||||
Entity entity = 1;
|
||||
enum Type {
|
||||
SET = 0;
|
||||
DELETE = 1;
|
||||
}
|
||||
Type type = 2;
|
||||
}
|
||||
|
||||
message Entity {
|
||||
int32 id = 1;
|
||||
bytes key = 2;
|
||||
bytes value = 3;
|
||||
bool exists = 4;
|
||||
bytes start = 5;
|
||||
bytes end = 6;
|
||||
string err = 7;
|
||||
int64 created_at = 8;
|
||||
}
|
||||
|
||||
message Nothing {
|
||||
}
|
||||
|
||||
message Domain {
|
||||
bytes start = 1;
|
||||
bytes end = 2;
|
||||
}
|
||||
|
||||
message Iterator {
|
||||
Domain domain = 1;
|
||||
bool valid = 2;
|
||||
bytes key = 3;
|
||||
bytes value = 4;
|
||||
}
|
||||
|
||||
message Stats {
|
||||
map<string, string> data = 1;
|
||||
int64 time_at = 2;
|
||||
}
|
||||
|
||||
message Init {
|
||||
string Type = 1;
|
||||
string Name = 2;
|
||||
string Dir = 3;
|
||||
}
|
||||
|
||||
service DB {
|
||||
rpc init(Init) returns (Entity) {}
|
||||
rpc get(Entity) returns (Entity) {}
|
||||
rpc getStream(stream Entity) returns (stream Entity) {}
|
||||
|
||||
rpc has(Entity) returns (Entity) {}
|
||||
rpc set(Entity) returns (Nothing) {}
|
||||
rpc setSync(Entity) returns (Nothing) {}
|
||||
rpc delete(Entity) returns (Nothing) {}
|
||||
rpc deleteSync(Entity) returns (Nothing) {}
|
||||
rpc iterator(Entity) returns (stream Iterator) {}
|
||||
rpc reverseIterator(Entity) returns (stream Iterator) {}
|
||||
// rpc print(Nothing) returns (Entity) {}
|
||||
rpc stats(Nothing) returns (Stats) {}
|
||||
rpc batchWrite(Batch) returns (Nothing) {}
|
||||
rpc batchWriteSync(Batch) returns (Nothing) {}
|
||||
}
|
@ -1,266 +0,0 @@
|
||||
package remotedb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/db/remotedb/grpcdb"
|
||||
protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto"
|
||||
)
|
||||
|
||||
type RemoteDB struct {
|
||||
ctx context.Context
|
||||
dc protodb.DBClient
|
||||
}
|
||||
|
||||
func NewRemoteDB(serverAddr string, serverKey string) (*RemoteDB, error) {
|
||||
return newRemoteDB(grpcdb.NewClient(serverAddr, serverKey))
|
||||
}
|
||||
|
||||
func newRemoteDB(gdc protodb.DBClient, err error) (*RemoteDB, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &RemoteDB{dc: gdc, ctx: context.Background()}, nil
|
||||
}
|
||||
|
||||
type Init struct {
|
||||
Dir string
|
||||
Name string
|
||||
Type string
|
||||
}
|
||||
|
||||
func (rd *RemoteDB) InitRemote(in *Init) error {
|
||||
_, err := rd.dc.Init(rd.ctx, &protodb.Init{Dir: in.Dir, Type: in.Type, Name: in.Name})
|
||||
return err
|
||||
}
|
||||
|
||||
var _ db.DB = (*RemoteDB)(nil)
|
||||
|
||||
// Close is a noop currently
|
||||
func (rd *RemoteDB) Close() {
|
||||
}
|
||||
|
||||
func (rd *RemoteDB) Delete(key []byte) {
|
||||
if _, err := rd.dc.Delete(rd.ctx, &protodb.Entity{Key: key}); err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.Delete: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (rd *RemoteDB) DeleteSync(key []byte) {
|
||||
if _, err := rd.dc.DeleteSync(rd.ctx, &protodb.Entity{Key: key}); err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.DeleteSync: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (rd *RemoteDB) Set(key, value []byte) {
|
||||
if _, err := rd.dc.Set(rd.ctx, &protodb.Entity{Key: key, Value: value}); err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.Set: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (rd *RemoteDB) SetSync(key, value []byte) {
|
||||
if _, err := rd.dc.SetSync(rd.ctx, &protodb.Entity{Key: key, Value: value}); err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.SetSync: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (rd *RemoteDB) Get(key []byte) []byte {
|
||||
res, err := rd.dc.Get(rd.ctx, &protodb.Entity{Key: key})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.Get error: %v", err))
|
||||
}
|
||||
return res.Value
|
||||
}
|
||||
|
||||
func (rd *RemoteDB) Has(key []byte) bool {
|
||||
res, err := rd.dc.Has(rd.ctx, &protodb.Entity{Key: key})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.Has error: %v", err))
|
||||
}
|
||||
return res.Exists
|
||||
}
|
||||
|
||||
func (rd *RemoteDB) ReverseIterator(start, end []byte) db.Iterator {
|
||||
dic, err := rd.dc.ReverseIterator(rd.ctx, &protodb.Entity{Start: start, End: end})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.Iterator error: %v", err))
|
||||
}
|
||||
return makeReverseIterator(dic)
|
||||
}
|
||||
|
||||
func (rd *RemoteDB) NewBatch() db.Batch {
|
||||
return &batch{
|
||||
db: rd,
|
||||
ops: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Implement Print when db.DB implements a method
|
||||
// to print to a string and not db.Print to stdout.
|
||||
func (rd *RemoteDB) Print() {
|
||||
panic("Unimplemented")
|
||||
}
|
||||
|
||||
func (rd *RemoteDB) Stats() map[string]string {
|
||||
stats, err := rd.dc.Stats(rd.ctx, &protodb.Nothing{})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.Stats error: %v", err))
|
||||
}
|
||||
if stats == nil {
|
||||
return nil
|
||||
}
|
||||
return stats.Data
|
||||
}
|
||||
|
||||
func (rd *RemoteDB) Iterator(start, end []byte) db.Iterator {
|
||||
dic, err := rd.dc.Iterator(rd.ctx, &protodb.Entity{Start: start, End: end})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.Iterator error: %v", err))
|
||||
}
|
||||
return makeIterator(dic)
|
||||
}
|
||||
|
||||
func makeIterator(dic protodb.DB_IteratorClient) db.Iterator {
|
||||
return &iterator{dic: dic}
|
||||
}
|
||||
|
||||
func makeReverseIterator(dric protodb.DB_ReverseIteratorClient) db.Iterator {
|
||||
return &reverseIterator{dric: dric}
|
||||
}
|
||||
|
||||
type reverseIterator struct {
|
||||
dric protodb.DB_ReverseIteratorClient
|
||||
cur *protodb.Iterator
|
||||
}
|
||||
|
||||
var _ db.Iterator = (*iterator)(nil)
|
||||
|
||||
func (rItr *reverseIterator) Valid() bool {
|
||||
return rItr.cur != nil && rItr.cur.Valid
|
||||
}
|
||||
|
||||
func (rItr *reverseIterator) Domain() (start, end []byte) {
|
||||
if rItr.cur == nil || rItr.cur.Domain == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return rItr.cur.Domain.Start, rItr.cur.Domain.End
|
||||
}
|
||||
|
||||
// Next advances the current reverseIterator
|
||||
func (rItr *reverseIterator) Next() {
|
||||
var err error
|
||||
rItr.cur, err = rItr.dric.Recv()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.ReverseIterator.Next error: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (rItr *reverseIterator) Key() []byte {
|
||||
if rItr.cur == nil {
|
||||
return nil
|
||||
}
|
||||
return rItr.cur.Key
|
||||
}
|
||||
|
||||
func (rItr *reverseIterator) Value() []byte {
|
||||
if rItr.cur == nil {
|
||||
return nil
|
||||
}
|
||||
return rItr.cur.Value
|
||||
}
|
||||
|
||||
func (rItr *reverseIterator) Close() {
|
||||
}
|
||||
|
||||
// iterator implements the db.Iterator by retrieving
|
||||
// streamed iterators from the remote backend as
|
||||
// needed. It is NOT safe for concurrent usage,
|
||||
// matching the behavior of other iterators.
|
||||
type iterator struct {
|
||||
dic protodb.DB_IteratorClient
|
||||
cur *protodb.Iterator
|
||||
}
|
||||
|
||||
var _ db.Iterator = (*iterator)(nil)
|
||||
|
||||
func (itr *iterator) Valid() bool {
|
||||
return itr.cur != nil && itr.cur.Valid
|
||||
}
|
||||
|
||||
func (itr *iterator) Domain() (start, end []byte) {
|
||||
if itr.cur == nil || itr.cur.Domain == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return itr.cur.Domain.Start, itr.cur.Domain.End
|
||||
}
|
||||
|
||||
// Next advances the current iterator
|
||||
func (itr *iterator) Next() {
|
||||
var err error
|
||||
itr.cur, err = itr.dic.Recv()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.Iterator.Next error: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (itr *iterator) Key() []byte {
|
||||
if itr.cur == nil {
|
||||
return nil
|
||||
}
|
||||
return itr.cur.Key
|
||||
}
|
||||
|
||||
func (itr *iterator) Value() []byte {
|
||||
if itr.cur == nil {
|
||||
return nil
|
||||
}
|
||||
return itr.cur.Value
|
||||
}
|
||||
|
||||
func (itr *iterator) Close() {
|
||||
err := itr.dic.CloseSend()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error closing iterator: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
type batch struct {
|
||||
db *RemoteDB
|
||||
ops []*protodb.Operation
|
||||
}
|
||||
|
||||
var _ db.Batch = (*batch)(nil)
|
||||
|
||||
func (bat *batch) Set(key, value []byte) {
|
||||
op := &protodb.Operation{
|
||||
Entity: &protodb.Entity{Key: key, Value: value},
|
||||
Type: protodb.Operation_SET,
|
||||
}
|
||||
bat.ops = append(bat.ops, op)
|
||||
}
|
||||
|
||||
func (bat *batch) Delete(key []byte) {
|
||||
op := &protodb.Operation{
|
||||
Entity: &protodb.Entity{Key: key},
|
||||
Type: protodb.Operation_DELETE,
|
||||
}
|
||||
bat.ops = append(bat.ops, op)
|
||||
}
|
||||
|
||||
func (bat *batch) Write() {
|
||||
if _, err := bat.db.dc.BatchWrite(bat.db.ctx, &protodb.Batch{Ops: bat.ops}); err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.BatchWrite: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (bat *batch) WriteSync() {
|
||||
if _, err := bat.db.dc.BatchWriteSync(bat.db.ctx, &protodb.Batch{Ops: bat.ops}); err != nil {
|
||||
panic(fmt.Sprintf("RemoteDB.BatchWriteSync: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (bat *batch) Close() {
|
||||
bat.ops = nil
|
||||
}
|
@ -1,123 +0,0 @@
|
||||
package remotedb_test
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/db/remotedb"
|
||||
"github.com/tendermint/tendermint/libs/db/remotedb/grpcdb"
|
||||
)
|
||||
|
||||
func TestRemoteDB(t *testing.T) {
|
||||
cert := "test.crt"
|
||||
key := "test.key"
|
||||
ln, err := net.Listen("tcp", "localhost:0")
|
||||
require.Nil(t, err, "expecting a port to have been assigned on which we can listen")
|
||||
srv, err := grpcdb.NewServer(cert, key)
|
||||
require.Nil(t, err)
|
||||
defer srv.Stop()
|
||||
go func() {
|
||||
if err := srv.Serve(ln); err != nil {
|
||||
t.Fatalf("BindServer: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
client, err := remotedb.NewRemoteDB(ln.Addr().String(), cert)
|
||||
require.Nil(t, err, "expecting a successful client creation")
|
||||
dbName := "test-remote-db"
|
||||
require.Nil(t, client.InitRemote(&remotedb.Init{Name: dbName, Type: "goleveldb"}))
|
||||
defer func() {
|
||||
err := os.RemoveAll(dbName + ".db")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
k1 := []byte("key-1")
|
||||
v1 := client.Get(k1)
|
||||
require.Equal(t, 0, len(v1), "expecting no key1 to have been stored, got %X (%s)", v1, v1)
|
||||
vv1 := []byte("value-1")
|
||||
client.Set(k1, vv1)
|
||||
gv1 := client.Get(k1)
|
||||
require.Equal(t, gv1, vv1)
|
||||
|
||||
// Simple iteration
|
||||
itr := client.Iterator(nil, nil)
|
||||
itr.Next()
|
||||
require.Equal(t, itr.Key(), []byte("key-1"))
|
||||
require.Equal(t, itr.Value(), []byte("value-1"))
|
||||
require.Panics(t, itr.Next)
|
||||
itr.Close()
|
||||
|
||||
// Set some more keys
|
||||
k2 := []byte("key-2")
|
||||
v2 := []byte("value-2")
|
||||
client.SetSync(k2, v2)
|
||||
has := client.Has(k2)
|
||||
require.True(t, has)
|
||||
gv2 := client.Get(k2)
|
||||
require.Equal(t, gv2, v2)
|
||||
|
||||
// More iteration
|
||||
itr = client.Iterator(nil, nil)
|
||||
itr.Next()
|
||||
require.Equal(t, itr.Key(), []byte("key-1"))
|
||||
require.Equal(t, itr.Value(), []byte("value-1"))
|
||||
itr.Next()
|
||||
require.Equal(t, itr.Key(), []byte("key-2"))
|
||||
require.Equal(t, itr.Value(), []byte("value-2"))
|
||||
require.Panics(t, itr.Next)
|
||||
itr.Close()
|
||||
|
||||
// Deletion
|
||||
client.Delete(k1)
|
||||
client.DeleteSync(k2)
|
||||
gv1 = client.Get(k1)
|
||||
gv2 = client.Get(k2)
|
||||
require.Equal(t, len(gv2), 0, "after deletion, not expecting the key to exist anymore")
|
||||
require.Equal(t, len(gv1), 0, "after deletion, not expecting the key to exist anymore")
|
||||
|
||||
// Batch tests - set
|
||||
k3 := []byte("key-3")
|
||||
k4 := []byte("key-4")
|
||||
k5 := []byte("key-5")
|
||||
v3 := []byte("value-3")
|
||||
v4 := []byte("value-4")
|
||||
v5 := []byte("value-5")
|
||||
bat := client.NewBatch()
|
||||
bat.Set(k3, v3)
|
||||
bat.Set(k4, v4)
|
||||
rv3 := client.Get(k3)
|
||||
require.Equal(t, 0, len(rv3), "expecting no k3 to have been stored")
|
||||
rv4 := client.Get(k4)
|
||||
require.Equal(t, 0, len(rv4), "expecting no k4 to have been stored")
|
||||
bat.Write()
|
||||
rv3 = client.Get(k3)
|
||||
require.Equal(t, rv3, v3, "expecting k3 to have been stored")
|
||||
rv4 = client.Get(k4)
|
||||
require.Equal(t, rv4, v4, "expecting k4 to have been stored")
|
||||
|
||||
// Batch tests - deletion
|
||||
bat = client.NewBatch()
|
||||
bat.Delete(k4)
|
||||
bat.Delete(k3)
|
||||
bat.WriteSync()
|
||||
rv3 = client.Get(k3)
|
||||
require.Equal(t, 0, len(rv3), "expecting k3 to have been deleted")
|
||||
rv4 = client.Get(k4)
|
||||
require.Equal(t, 0, len(rv4), "expecting k4 to have been deleted")
|
||||
|
||||
// Batch tests - set and delete
|
||||
bat = client.NewBatch()
|
||||
bat.Set(k4, v4)
|
||||
bat.Set(k5, v5)
|
||||
bat.Delete(k4)
|
||||
bat.WriteSync()
|
||||
rv4 = client.Get(k4)
|
||||
require.Equal(t, 0, len(rv4), "expecting k4 to have been deleted")
|
||||
rv5 := client.Get(k5)
|
||||
require.Equal(t, rv5, v5, "expecting k5 to have been stored")
|
||||
}
|
@ -1,25 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEOjCCAiKgAwIBAgIQYO+jRR0Sbs+WzU/hj2aoxzANBgkqhkiG9w0BAQsFADAZ
|
||||
MRcwFQYDVQQDEw50ZW5kZXJtaW50LmNvbTAeFw0xOTA2MDIxMTAyMDdaFw0yMDEy
|
||||
MDIxMTAyMDRaMBMxETAPBgNVBAMTCHJlbW90ZWRiMIIBIjANBgkqhkiG9w0BAQEF
|
||||
AAOCAQ8AMIIBCgKCAQEAt7YkYMJ5X5X3MT1tWG1KFO3uyZl962fInl+43xVESydp
|
||||
qYYHYei7b3T8c/3Ww6f3aKkkCHrvPtqHZjU6o+wp/AQMNlyUoyRN89+6Oj67u2C7
|
||||
iZjzAJ+Pk87jMaStubvmZ9J+uk4op4rv5Rt4ns/Kg70RaMvqYR8tGqPcy3o8fWS+
|
||||
hCbuwAS8b65yp+AgbnThDEBUnieN3OFLfDV//45qw2OlqlM/gHOVT2JMRbl14Y7x
|
||||
tW3/Xe+lsB7B3+OC6NQ2Nu7DEA1X+TBNyItIGnQH6DwK2ZBRtyQEk26FAWVj8fHd
|
||||
A5I4+RcGWXz4T6gJmDZN7+47WHO0ProjARbUV0GIuQIDAQABo4GDMIGAMA4GA1Ud
|
||||
DwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0O
|
||||
BBYEFOA8wzCYhoZmy0WHgnv/0efijUMKMB8GA1UdIwQYMBaAFNSTPe743aIx7rIp
|
||||
vn5HV3gJ4z1hMA8GA1UdEQQIMAaHBH8AAAEwDQYJKoZIhvcNAQELBQADggIBAKZf
|
||||
EVo0i9nMZv6ZJjbmAlMfo5FH41/oBYC8pyGAnJKl42raXKJAbl45h80iGn3vNggf
|
||||
7HJjN+znAHDFYjIwK2IV2WhHPyxK6uk+FA5uBR/aAPcw+zhRfXUMYdhNHr6KBlZZ
|
||||
bvD7Iq4UALg+XFQz/fQkIi7QvTBwkYyPNA2+a/TGf6myMp26hoz73DQXklqm6Zle
|
||||
myPs1Vp9bTgOv/3l64BMUV37FZ2TyiisBkV1qPEoDxT7Fbi8G1K8gMDLd0wu0jvX
|
||||
nz96nk30TDnZewV1fhkMJVKKGiLbaIgHcu1lWsWJZ0tdc+MF7R9bLBO5T0cTDgNy
|
||||
V8/51g+Cxu5SSHKjFkT0vBBONhjPmRqzJpxOQfHjiv8mmHwwiaNNy2VkJHj5GHer
|
||||
64r67fQTSqAifzgwAbXYK+ObUbx4PnHvSYSF5dbcR1Oj6UTVtGAgdmN2Y03AIc1B
|
||||
CiaojcMVuMRz/SvmPWl34GBvvT5/h9VCpHEB3vV6bQxJb5U1fLyo4GABA2Ic3DHr
|
||||
kV5p7CZI06UNbyQyFtnEb5XoXywRa4Df7FzDIv3HL13MtyXrYrJqC1eAbn+3jGdh
|
||||
bQa510mWYAlQQmzHSf/SLKott4QKR3SmhOGqGKNvquAYJ9XLdYdsPmKKGH6iGUD8
|
||||
n7yEi0KMD/BHsPQNNLatsR2SxqGDeLhbLR0w2hig
|
||||
-----END CERTIFICATE-----
|
@ -1,27 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpQIBAAKCAQEAt7YkYMJ5X5X3MT1tWG1KFO3uyZl962fInl+43xVESydpqYYH
|
||||
Yei7b3T8c/3Ww6f3aKkkCHrvPtqHZjU6o+wp/AQMNlyUoyRN89+6Oj67u2C7iZjz
|
||||
AJ+Pk87jMaStubvmZ9J+uk4op4rv5Rt4ns/Kg70RaMvqYR8tGqPcy3o8fWS+hCbu
|
||||
wAS8b65yp+AgbnThDEBUnieN3OFLfDV//45qw2OlqlM/gHOVT2JMRbl14Y7xtW3/
|
||||
Xe+lsB7B3+OC6NQ2Nu7DEA1X+TBNyItIGnQH6DwK2ZBRtyQEk26FAWVj8fHdA5I4
|
||||
+RcGWXz4T6gJmDZN7+47WHO0ProjARbUV0GIuQIDAQABAoIBAQCEVFAZ3puc7aIU
|
||||
NuIXqwmMz+KMFuMr+SL6aYr6LhB2bhpfQSr6LLEu1L6wMm1LnCbLneJVtW+1/6U+
|
||||
SyNFRmzrmmLNmZx7c0AvZb14DQ4fJ8uOjryje0vptUHT1YJJ4n5R1L7yJjCElsC8
|
||||
cDBPfO+sOzlaGmBmuxU7NkNp0k/WJc1Wnn5WFCKKk8BCH1AUKvn/vwbRV4zl/Be7
|
||||
ApywPUouV+GJlTAG5KLb15CWKSqFNJxUJ6K7NnmfDoy7muUUv8MtrTn59XTH4qK7
|
||||
p/3A8tdNpR/RpEJ8+y3kS9CDZBVnsk0j0ptT//jdt1vSsylXxrf7vjLnyguRZZ5H
|
||||
Vwe2POotAoGBAOY1UaFjtIz2G5qromaUtrPb5EPWRU8fiLtUXUDKG8KqNAqsGbDz
|
||||
Stw1mVFyyuaFMReO18djCvcja1xxF3TZbdpV1k7RfcpEZXiFzBAPgeEGdA3Tc3V2
|
||||
byuJQthWamCBxF/7OGUmH/E/kH0pv5g9+eIitK/CUC2YUhCnubhchGAXAoGBAMxL
|
||||
O7mnPqDJ2PqxVip/lL6VnchtF1bx1aDNr83rVTf+BEsOgCIFoDEBIVKDnhXlaJu7
|
||||
8JN4la/esytq4j3nM1cl6mjvw2ixYmwQtKiDuNiyb88hhQ+nxVsbIpYxtbhsj+u5
|
||||
hOrMN6jKd0GVWsYpdNvY/dXZG1MXhbWwExjRAY+vAoGBAKBu3jHUU5q9VWWIYciN
|
||||
sXpNL5qbNHg86MRsugSSFaCnj1c0sz7ffvdSn0Pk9USL5Defw/9fpd+wHn0xD4DO
|
||||
msFDevQ5CSoyWmkRDbLPq9sP7UdJariczkMQCLbOGpqhNSMS6C2N0UsG2oJv2ueV
|
||||
oZUYTMYEbG4qLl8PFN5IE7UHAoGAGwEq4OyZm7lyxBii8jUxHUw7sh2xgx2uhnYJ
|
||||
8idUeXVLbfx5tYWW2kNy+yxIvk432LYsI+JBryC6AFg9lb81CyUI6lwfMXyZLP28
|
||||
U7Ytvf9ARloA88PSk6tvk/j4M2uuTpOUXVEnXll9EB9FA4LBXro9O4JaWU53rz+a
|
||||
FqKyGSMCgYEAuYCGC+Fz7lIa0aE4tT9mwczQequxGYsL41KR/4pDO3t9QsnzunLY
|
||||
fvCFhteBOstwTBBdfBaKIwSp3zI2QtA4K0Jx9SAJ9q0ft2ciB9ukUFBhC9+TqzXg
|
||||
gSz3XpRtI8PhwAxZgCnov+NPQV8IxvD4ZgnnEiRBHrYnSEsaMLoVnkw=
|
||||
-----END RSA PRIVATE KEY-----
|
136
libs/db/types.go
136
libs/db/types.go
@ -1,136 +0,0 @@
|
||||
package db
|
||||
|
||||
// DBs are goroutine safe.
|
||||
type DB interface {
|
||||
|
||||
// Get returns nil iff key doesn't exist.
|
||||
// A nil key is interpreted as an empty byteslice.
|
||||
// CONTRACT: key, value readonly []byte
|
||||
Get([]byte) []byte
|
||||
|
||||
// Has checks if a key exists.
|
||||
// A nil key is interpreted as an empty byteslice.
|
||||
// CONTRACT: key, value readonly []byte
|
||||
Has(key []byte) bool
|
||||
|
||||
// Set sets the key.
|
||||
// A nil key is interpreted as an empty byteslice.
|
||||
// CONTRACT: key, value readonly []byte
|
||||
Set([]byte, []byte)
|
||||
SetSync([]byte, []byte)
|
||||
|
||||
// Delete deletes the key.
|
||||
// A nil key is interpreted as an empty byteslice.
|
||||
// CONTRACT: key readonly []byte
|
||||
Delete([]byte)
|
||||
DeleteSync([]byte)
|
||||
|
||||
// Iterate over a domain of keys in ascending order. End is exclusive.
|
||||
// Start must be less than end, or the Iterator is invalid.
|
||||
// A nil start is interpreted as an empty byteslice.
|
||||
// If end is nil, iterates up to the last item (inclusive).
|
||||
// CONTRACT: No writes may happen within a domain while an iterator exists over it.
|
||||
// CONTRACT: start, end readonly []byte
|
||||
Iterator(start, end []byte) Iterator
|
||||
|
||||
// Iterate over a domain of keys in descending order. End is exclusive.
|
||||
// Start must be less than end, or the Iterator is invalid.
|
||||
// If start is nil, iterates up to the first/least item (inclusive).
|
||||
// If end is nil, iterates from the last/greatest item (inclusive).
|
||||
// CONTRACT: No writes may happen within a domain while an iterator exists over it.
|
||||
// CONTRACT: start, end readonly []byte
|
||||
ReverseIterator(start, end []byte) Iterator
|
||||
|
||||
// Closes the connection.
|
||||
Close()
|
||||
|
||||
// Creates a batch for atomic updates.
|
||||
NewBatch() Batch
|
||||
|
||||
// For debugging
|
||||
Print()
|
||||
|
||||
// Stats returns a map of property values for all keys and the size of the cache.
|
||||
Stats() map[string]string
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Batch
|
||||
|
||||
// Batch Close must be called when the program no longer needs the object.
|
||||
type Batch interface {
|
||||
SetDeleter
|
||||
Write()
|
||||
WriteSync()
|
||||
Close()
|
||||
}
|
||||
|
||||
type SetDeleter interface {
|
||||
Set(key, value []byte) // CONTRACT: key, value readonly []byte
|
||||
Delete(key []byte) // CONTRACT: key readonly []byte
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Iterator
|
||||
|
||||
/*
|
||||
Usage:
|
||||
|
||||
var itr Iterator = ...
|
||||
defer itr.Close()
|
||||
|
||||
for ; itr.Valid(); itr.Next() {
|
||||
k, v := itr.Key(); itr.Value()
|
||||
// ...
|
||||
}
|
||||
*/
|
||||
type Iterator interface {
|
||||
|
||||
// The start & end (exclusive) limits to iterate over.
|
||||
// If end < start, then the Iterator goes in reverse order.
|
||||
//
|
||||
// A domain of ([]byte{12, 13}, []byte{12, 14}) will iterate
|
||||
// over anything with the prefix []byte{12, 13}.
|
||||
//
|
||||
// The smallest key is the empty byte array []byte{} - see BeginningKey().
|
||||
// The largest key is the nil byte array []byte(nil) - see EndingKey().
|
||||
// CONTRACT: start, end readonly []byte
|
||||
Domain() (start []byte, end []byte)
|
||||
|
||||
// Valid returns whether the current position is valid.
|
||||
// Once invalid, an Iterator is forever invalid.
|
||||
Valid() bool
|
||||
|
||||
// Next moves the iterator to the next sequential key in the database, as
|
||||
// defined by order of iteration.
|
||||
//
|
||||
// If Valid returns false, this method will panic.
|
||||
Next()
|
||||
|
||||
// Key returns the key of the cursor.
|
||||
// If Valid returns false, this method will panic.
|
||||
// CONTRACT: key readonly []byte
|
||||
Key() (key []byte)
|
||||
|
||||
// Value returns the value of the cursor.
|
||||
// If Valid returns false, this method will panic.
|
||||
// CONTRACT: value readonly []byte
|
||||
Value() (value []byte)
|
||||
|
||||
// Close releases the Iterator.
|
||||
Close()
|
||||
}
|
||||
|
||||
// For testing convenience.
|
||||
func bz(s string) []byte {
|
||||
return []byte(s)
|
||||
}
|
||||
|
||||
// We defensively turn nil keys or values into []byte{} for
|
||||
// most operations.
|
||||
func nonNilBytes(bz []byte) []byte {
|
||||
if bz == nil {
|
||||
return []byte{}
|
||||
}
|
||||
return bz
|
||||
}
|
@ -1,45 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
)
|
||||
|
||||
func cp(bz []byte) (ret []byte) {
|
||||
ret = make([]byte, len(bz))
|
||||
copy(ret, bz)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Returns a slice of the same length (big endian)
|
||||
// except incremented by one.
|
||||
// Returns nil on overflow (e.g. if bz bytes are all 0xFF)
|
||||
// CONTRACT: len(bz) > 0
|
||||
func cpIncr(bz []byte) (ret []byte) {
|
||||
if len(bz) == 0 {
|
||||
panic("cpIncr expects non-zero bz length")
|
||||
}
|
||||
ret = cp(bz)
|
||||
for i := len(bz) - 1; i >= 0; i-- {
|
||||
if ret[i] < byte(0xFF) {
|
||||
ret[i]++
|
||||
return
|
||||
}
|
||||
ret[i] = byte(0x00)
|
||||
if i == 0 {
|
||||
// Overflow
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// See DB interface documentation for more information.
|
||||
func IsKeyInDomain(key, start, end []byte) bool {
|
||||
if bytes.Compare(key, start) < 0 {
|
||||
return false
|
||||
}
|
||||
if end != nil && bytes.Compare(end, key) <= 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
@ -1,104 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Empty iterator for empty db.
|
||||
func TestPrefixIteratorNoMatchNil(t *testing.T) {
|
||||
for backend := range backends {
|
||||
t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) {
|
||||
db, dir := newTempDB(t, backend)
|
||||
defer os.RemoveAll(dir)
|
||||
itr := IteratePrefix(db, []byte("2"))
|
||||
|
||||
checkInvalid(t, itr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Empty iterator for db populated after iterator created.
|
||||
func TestPrefixIteratorNoMatch1(t *testing.T) {
|
||||
for backend := range backends {
|
||||
if backend == BoltDBBackend {
|
||||
t.Log("bolt does not support concurrent writes while iterating")
|
||||
continue
|
||||
}
|
||||
|
||||
t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) {
|
||||
db, dir := newTempDB(t, backend)
|
||||
defer os.RemoveAll(dir)
|
||||
itr := IteratePrefix(db, []byte("2"))
|
||||
db.SetSync(bz("1"), bz("value_1"))
|
||||
|
||||
checkInvalid(t, itr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Empty iterator for prefix starting after db entry.
|
||||
func TestPrefixIteratorNoMatch2(t *testing.T) {
|
||||
for backend := range backends {
|
||||
t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) {
|
||||
db, dir := newTempDB(t, backend)
|
||||
defer os.RemoveAll(dir)
|
||||
db.SetSync(bz("3"), bz("value_3"))
|
||||
itr := IteratePrefix(db, []byte("4"))
|
||||
|
||||
checkInvalid(t, itr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Iterator with single val for db with single val, starting from that val.
|
||||
func TestPrefixIteratorMatch1(t *testing.T) {
|
||||
for backend := range backends {
|
||||
t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) {
|
||||
db, dir := newTempDB(t, backend)
|
||||
defer os.RemoveAll(dir)
|
||||
db.SetSync(bz("2"), bz("value_2"))
|
||||
itr := IteratePrefix(db, bz("2"))
|
||||
|
||||
checkValid(t, itr, true)
|
||||
checkItem(t, itr, bz("2"), bz("value_2"))
|
||||
checkNext(t, itr, false)
|
||||
|
||||
// Once invalid...
|
||||
checkInvalid(t, itr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Iterator with prefix iterates over everything with same prefix.
|
||||
func TestPrefixIteratorMatches1N(t *testing.T) {
|
||||
for backend := range backends {
|
||||
t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) {
|
||||
db, dir := newTempDB(t, backend)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
// prefixed
|
||||
db.SetSync(bz("a/1"), bz("value_1"))
|
||||
db.SetSync(bz("a/3"), bz("value_3"))
|
||||
|
||||
// not
|
||||
db.SetSync(bz("b/3"), bz("value_3"))
|
||||
db.SetSync(bz("a-3"), bz("value_3"))
|
||||
db.SetSync(bz("a.3"), bz("value_3"))
|
||||
db.SetSync(bz("abcdefg"), bz("value_3"))
|
||||
itr := IteratePrefix(db, bz("a/"))
|
||||
|
||||
checkValid(t, itr, true)
|
||||
checkItem(t, itr, bz("a/1"), bz("value_1"))
|
||||
checkNext(t, itr, true)
|
||||
checkItem(t, itr, bz("a/3"), bz("value_3"))
|
||||
|
||||
// Bad!
|
||||
checkNext(t, itr, false)
|
||||
|
||||
//Once invalid...
|
||||
checkInvalid(t, itr)
|
||||
})
|
||||
}
|
||||
}
|
@ -2,36 +2,30 @@ package fail
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var callIndexToFail int
|
||||
|
||||
func init() {
|
||||
func envSet() int {
|
||||
callIndexToFailS := os.Getenv("FAIL_TEST_INDEX")
|
||||
|
||||
if callIndexToFailS == "" {
|
||||
callIndexToFail = -1
|
||||
return -1
|
||||
} else {
|
||||
var err error
|
||||
callIndexToFail, err = strconv.Atoi(callIndexToFailS)
|
||||
callIndexToFail, err := strconv.Atoi(callIndexToFailS)
|
||||
if err != nil {
|
||||
callIndexToFail = -1
|
||||
return -1
|
||||
}
|
||||
return callIndexToFail
|
||||
}
|
||||
}
|
||||
|
||||
// Fail when FAIL_TEST_INDEX == callIndex
|
||||
var (
|
||||
callIndex int //indexes Fail calls
|
||||
|
||||
callRandIndex int // indexes a run of FailRand calls
|
||||
callRandIndexToFail = -1 // the callRandIndex to fail on in FailRand
|
||||
)
|
||||
var callIndex int //indexes Fail calls
|
||||
|
||||
func Fail() {
|
||||
callIndexToFail := envSet()
|
||||
if callIndexToFail < 0 {
|
||||
return
|
||||
}
|
||||
@ -43,33 +37,6 @@ func Fail() {
|
||||
callIndex += 1
|
||||
}
|
||||
|
||||
// FailRand should be called n successive times.
|
||||
// It will fail on a random one of those calls
|
||||
// n must be greater than 0
|
||||
func FailRand(n int) {
|
||||
if callIndexToFail < 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if callRandIndexToFail < 0 {
|
||||
// first call in the loop, pick a random index to fail at
|
||||
callRandIndexToFail = rand.Intn(n)
|
||||
callRandIndex = 0
|
||||
}
|
||||
|
||||
if callIndex == callIndexToFail {
|
||||
if callRandIndex == callRandIndexToFail {
|
||||
Exit()
|
||||
}
|
||||
}
|
||||
|
||||
callRandIndex += 1
|
||||
|
||||
if callRandIndex == n {
|
||||
callIndex += 1
|
||||
}
|
||||
}
|
||||
|
||||
func Exit() {
|
||||
fmt.Printf("*** fail-test %d ***\n", callIndex)
|
||||
os.Exit(1)
|
||||
|
@ -1,14 +0,0 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func AssertPanics(t *testing.T, msg string, f func()) {
|
||||
defer func() {
|
||||
if err := recover(); err == nil {
|
||||
t.Errorf("Should have panic'd, but didn't: %v", msg)
|
||||
}
|
||||
}()
|
||||
f()
|
||||
}
|
@ -1,3 +0,0 @@
|
||||
package version
|
||||
|
||||
const Version = "0.9.0"
|
@ -7,10 +7,10 @@ import (
|
||||
|
||||
amino "github.com/tendermint/go-amino"
|
||||
cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
log "github.com/tendermint/tendermint/libs/log"
|
||||
lerr "github.com/tendermint/tendermint/lite/errors"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-cmn/db"
|
||||
)
|
||||
|
||||
var _ PersistentProvider = (*DBProvider)(nil)
|
||||
|
@ -8,9 +8,9 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
log "github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-cmn/db"
|
||||
)
|
||||
|
||||
func TestInquirerValidPath(t *testing.T) {
|
||||
|
@ -7,10 +7,10 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
log "github.com/tendermint/tendermint/libs/log"
|
||||
lerr "github.com/tendermint/tendermint/lite/errors"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-cmn/db"
|
||||
)
|
||||
|
||||
// missingProvider doesn't store anything, always a miss.
|
||||
|
@ -2,10 +2,10 @@ package proxy
|
||||
|
||||
import (
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
log "github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/lite"
|
||||
lclient "github.com/tendermint/tendermint/lite/client"
|
||||
dbm "github.com/tendermint/tm-cmn/db"
|
||||
)
|
||||
|
||||
func NewVerifier(chainID, rootDir string, client lclient.SignStatusClient, logger log.Logger, cacheSize int) (*lite.DynamicVerifier, error) {
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/evidence"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
@ -45,6 +44,7 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
dbm "github.com/tendermint/tm-cmn/db"
|
||||
)
|
||||
|
||||
// CustomReactorNamePrefix is a prefix for all custom reactors to prevent
|
||||
|
@ -17,7 +17,6 @@ import (
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/evidence"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
@ -28,6 +27,7 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
dbm "github.com/tendermint/tm-cmn/db"
|
||||
)
|
||||
|
||||
func TestNodeStartStop(t *testing.T) {
|
||||
|
@ -13,7 +13,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"errors"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NetAddress defines information about a peer on the network
|
||||
@ -40,7 +40,7 @@ func IDAddressString(id ID, protocolHostPort string) string {
|
||||
// NewNetAddress returns a new NetAddress using the provided TCP
|
||||
// address. When testing, other net.Addr (except TCP) will result in
|
||||
// using 0.0.0.0:0. When normal run, other net.Addr (except TCP) will
|
||||
// panic.
|
||||
// panic. Panics if ID is invalid.
|
||||
// TODO: socks proxies?
|
||||
func NewNetAddress(id ID, addr net.Addr) *NetAddress {
|
||||
tcpAddr, ok := addr.(*net.TCPAddr)
|
||||
@ -53,6 +53,11 @@ func NewNetAddress(id ID, addr net.Addr) *NetAddress {
|
||||
return netAddr
|
||||
}
|
||||
}
|
||||
|
||||
if err := validateID(id); err != nil {
|
||||
panic(fmt.Sprintf("Invalid ID %v: %v (addr: %v)", id, err, addr))
|
||||
}
|
||||
|
||||
ip := tcpAddr.IP
|
||||
port := uint16(tcpAddr.Port)
|
||||
na := NewNetAddressIPPort(ip, port)
|
||||
@ -72,18 +77,11 @@ func NewNetAddressString(addr string) (*NetAddress, error) {
|
||||
}
|
||||
|
||||
// get ID
|
||||
idStr := spl[0]
|
||||
idBytes, err := hex.DecodeString(idStr)
|
||||
if err != nil {
|
||||
if err := validateID(ID(spl[0])); err != nil {
|
||||
return nil, ErrNetAddressInvalid{addrWithoutProtocol, err}
|
||||
}
|
||||
if len(idBytes) != IDByteLength {
|
||||
return nil, ErrNetAddressInvalid{
|
||||
addrWithoutProtocol,
|
||||
fmt.Errorf("invalid hex length - got %d, expected %d", len(idBytes), IDByteLength)}
|
||||
}
|
||||
var id ID
|
||||
id, addrWithoutProtocol = ID(idStr), spl[1]
|
||||
id, addrWithoutProtocol = ID(spl[0]), spl[1]
|
||||
|
||||
// get host and port
|
||||
host, portStr, err := net.SplitHostPort(addrWithoutProtocol)
|
||||
@ -207,22 +205,28 @@ func (na *NetAddress) DialTimeout(timeout time.Duration) (net.Conn, error) {
|
||||
|
||||
// Routable returns true if the address is routable.
|
||||
func (na *NetAddress) Routable() bool {
|
||||
if err := na.Valid(); err != nil {
|
||||
return false
|
||||
}
|
||||
// TODO(oga) bitcoind doesn't include RFC3849 here, but should we?
|
||||
return na.Valid() && !(na.RFC1918() || na.RFC3927() || na.RFC4862() ||
|
||||
return !(na.RFC1918() || na.RFC3927() || na.RFC4862() ||
|
||||
na.RFC4193() || na.RFC4843() || na.Local())
|
||||
}
|
||||
|
||||
// For IPv4 these are either a 0 or all bits set address. For IPv6 a zero
|
||||
// address or one that matches the RFC3849 documentation address format.
|
||||
func (na *NetAddress) Valid() bool {
|
||||
if string(na.ID) != "" {
|
||||
data, err := hex.DecodeString(string(na.ID))
|
||||
if err != nil || len(data) != IDByteLength {
|
||||
return false
|
||||
}
|
||||
func (na *NetAddress) Valid() error {
|
||||
if err := validateID(na.ID); err != nil {
|
||||
return errors.Wrap(err, "invalid ID")
|
||||
}
|
||||
return na.IP != nil && !(na.IP.IsUnspecified() || na.RFC3849() ||
|
||||
na.IP.Equal(net.IPv4bcast))
|
||||
|
||||
if na.IP == nil {
|
||||
return errors.New("no IP")
|
||||
}
|
||||
if na.IP.IsUnspecified() || na.RFC3849() || na.IP.Equal(net.IPv4bcast) {
|
||||
return errors.New("invalid IP")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasID returns true if the address has an ID.
|
||||
@ -329,3 +333,17 @@ func removeProtocolIfDefined(addr string) string {
|
||||
return addr
|
||||
|
||||
}
|
||||
|
||||
func validateID(id ID) error {
|
||||
if len(id) == 0 {
|
||||
return errors.New("no ID")
|
||||
}
|
||||
idBytes, err := hex.DecodeString(string(id))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(idBytes) != IDByteLength {
|
||||
return fmt.Errorf("invalid hex length - got %d, expected %d", len(idBytes), IDByteLength)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -11,9 +11,13 @@ import (
|
||||
func TestNewNetAddress(t *testing.T) {
|
||||
tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:8080")
|
||||
require.Nil(t, err)
|
||||
addr := NewNetAddress("", tcpAddr)
|
||||
|
||||
assert.Equal(t, "127.0.0.1:8080", addr.String())
|
||||
assert.Panics(t, func() {
|
||||
NewNetAddress("", tcpAddr)
|
||||
})
|
||||
|
||||
addr := NewNetAddress("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef", tcpAddr)
|
||||
assert.Equal(t, "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", addr.String())
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
NewNetAddress("", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8000})
|
||||
@ -106,7 +110,12 @@ func TestNetAddressProperties(t *testing.T) {
|
||||
addr, err := NewNetAddressString(tc.addr)
|
||||
require.Nil(t, err)
|
||||
|
||||
assert.Equal(t, tc.valid, addr.Valid())
|
||||
err = addr.Valid()
|
||||
if tc.valid {
|
||||
assert.NoError(t, err)
|
||||
} else {
|
||||
assert.Error(t, err)
|
||||
}
|
||||
assert.Equal(t, tc.local, addr.Local())
|
||||
assert.Equal(t, tc.routable, addr.Routable())
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user