Compare commits

..

44 Commits

Author SHA1 Message Date
Anton Kaliaev
806e8d2077 fix warning 2019-08-19 11:02:52 +04:00
Anton Kaliaev
a31a4c8c2f unexpose CompareVotingPowers 2019-08-19 10:54:05 +04:00
Anton Kaliaev
1b9e0261b6 merge initProvider and makeProvider 2019-08-16 11:43:45 +04:00
Anton Kaliaev
97131a1544 add more docs from ADR-44 2019-08-15 16:22:26 +04:00
Anton Kaliaev
2f96903494 rename BaseVerifier to verifier, remove Verifier interface 2019-08-15 16:01:06 +04:00
Anton Kaliaev
6b38564409 refactor dbprovider 2019-08-15 15:42:22 +04:00
Anton Kaliaev
355834a691 one commit above 2019-08-15 12:10:16 +04:00
Anton Kaliaev
f9061f11e6 refactor multiprovider 2019-08-15 12:09:38 +04:00
Anton Kaliaev
46c3d0baec move concurrent_provider to its own file 2019-08-15 12:09:15 +04:00
Anton Kaliaev
91e69bbdac Merge branch 'master' into jae/verifyingcachineprovider 2019-08-14 16:03:33 +04:00
Zaki Manian
415d9579e1 Add the compare validator powers proposal as discussed with Zarko in the ADR 2019-07-16 19:55:09 -07:00
Anton Kaliaev
88b7c53dcf fix comment and use errors.New instead of fmt.Errorf 2019-07-09 19:38:14 +04:00
Anton Kaliaev
f95b11e07a fix HeightAndHashPresent func 2019-07-09 17:55:55 +04:00
Anton Kaliaev
1f03ceb0ba refactor verifying/provider
- mention unbonding period
- use log.Logger instead of fmt
2019-07-09 17:52:10 +04:00
Anton Kaliaev
45a447aa2f move PrivKeys to internal folder
read https://golang.org/cmd/go/#hdr-Internal_Directories if you want to
know how internal directories work
2019-07-09 17:42:04 +04:00
Anton Kaliaev
8d43cdd846 fix comment 2019-07-09 17:42:04 +04:00
Zaki Manian
7849675827 Update types/validator_set.go
Co-Authored-By: Bot from GolangCI <42910462+golangcibot@users.noreply.github.com>
2019-07-08 15:59:35 -07:00
Anton Kaliaev
ceff0af81d restructure code in verifying/provider 2019-07-08 16:45:22 +04:00
Anton Kaliaev
06720dcd6b add a safety check in ConcurrentProvider#UpdateToHeight 2019-07-08 15:57:47 +04:00
Anton Kaliaev
6810bab382 refactor ConcurrentProvider 2019-07-08 15:42:15 +04:00
Anton Kaliaev
4b2e323728 remove extra comments in lite/commit 2019-07-08 13:32:08 +04:00
Anton Kaliaev
ac330194b9 refactor lite/client package 2019-07-08 13:23:58 +04:00
Zaki Manian
b441a71221 Make ConcurrentProvider public 2019-07-07 22:09:53 -07:00
Zaki Manian
39e589e3c7 Fix VerifyingProvider Nit 2019-07-07 21:55:00 -07:00
Zaki Manian
24cd2eba8e Fixup tests 2019-07-07 21:47:39 -07:00
Zaki Manian
2872df6634 Remove verify future commit as uncessessary 2019-07-06 20:21:23 -07:00
Zaki Manian
6746befb44 Add linear non bisceting verifier 2019-07-06 20:20:37 -07:00
Zaki Manian
fb3663cfb1 verifyAndSave now internally checks if the commit is validate before proceeding.
Fix a comment for clarity
2019-07-06 19:05:10 -07:00
Zaki Manian
b97a8b69ed Update lite/verifying/provider.go
Co-Authored-By: Ismail Khoffi <Ismail.Khoffi@gmail.com>
2019-07-06 11:48:48 -07:00
Marko Baricevic
0be87cdcba privkey fix 2019-07-06 09:49:39 +02:00
Marko Baricevic
49bea520cf change trust to trusted 2019-07-06 09:47:47 +02:00
Marko Baricevic
3acc6e60e8 import and type calls fix 2019-07-06 09:46:01 +02:00
Zaki Manian
0a145b83be Update lite/verifying/provider.go
Co-Authored-By: Ismail Khoffi <Ismail.Khoffi@gmail.com>
2019-07-05 20:52:21 -07:00
Zaki Manian
d15f0f8df0 Fix some CI bot nits 2019-07-05 20:44:00 -07:00
Zaki Manian
02c467237e Fix DB backed name change 2019-07-05 20:23:38 -07:00
Zaki Manian
165a3d15fb Fix conflicts with upsteam changes 2019-07-05 20:23:23 -07:00
Zaki Manian
ce9cde0888 Apply suggestions from code review
Some typo fixes from review

Co-Authored-By: zmanian <zaki@manian.org>
2019-05-02 08:41:05 -07:00
Zaki Manian
7635b1ed7e Handle error 2019-05-01 17:32:49 -07:00
Jack Zampolin
312a36a0ab Compiling 2019-05-01 14:09:24 -07:00
Zaki Manian
59d3d40dbf Added the Verify method to implement Provider 2019-04-30 10:01:36 -07:00
Jack Zampolin
0a87ddef96 Abstract out provider init logic and logger 2019-04-26 13:38:39 -04:00
Jack Zampolin
236cdf87aa NewProvider function complete 2019-04-25 15:11:30 -04:00
Zaki Manian
88b69a956f Improve getTargetCommit with better business logic 2019-04-24 17:58:19 -04:00
jaekwon
1a86c869e8 WIP 2019-04-17 11:27:37 -07:00
50 changed files with 1158 additions and 3802 deletions

View File

@@ -216,11 +216,11 @@ jobs:
name: Trigger website build
command: |
curl --silent \
--show-error \
-X POST \
--header "Content-Type: application/json" \
-d "{\"branch\": \"$CIRCLE_BRANCH\"}" \
"https://circleci.com/api/v1.1/project/github/$CIRCLE_PROJECT_USERNAME/$WEBSITE_REPO_NAME/build?circle-token=$TENDERBOT_API_TOKEN" > response.json
--show-error \
-X POST \
--header "Content-Type: application/json" \
-d "{\"branch\": \"$CIRCLE_BRANCH\"}" \
"https://circleci.com/api/v1.1/project/github/$CIRCLE_PROJECT_USERNAME/$WEBSITE_REPO_NAME/build?circle-token=$TENDERBOT_API_TOKEN" > response.json
RESULT=`jq -r '.status' response.json`
MESSAGE=`jq -r '.message' response.json`
@@ -359,35 +359,6 @@ jobs:
- store_artifacts:
path: /go/src/github.com/tendermint/tendermint/tendermint-*.tar.gz
# Test RPC implementation against the swagger documented specs
contract_tests:
working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint
machine:
image: circleci/classic:latest
environment:
GOBIN: /home/circleci/.go_workspace/bin
GOPATH: /home/circleci/.go_workspace/
GOOS: linux
GOARCH: amd64
parallelism: 1
steps:
- checkout
- run:
name: Test RPC endpoints against swagger documentation
command: |
set -x
export PATH=~/.local/bin:$PATH
# install node and dredd
./scripts/get_nodejs.sh
# build the binaries with a proper version of Go
docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux build-contract-tests-hooks
# This docker image works with go 1.7, we can install here the hook handler that contract-tests is going to use
go get github.com/snikch/goodman/cmd/goodman
make contract-tests
workflows:
version: 2
test-suite:
@@ -426,10 +397,6 @@ workflows:
only:
- master
- /v[0-9]+\.[0-9]+/
- contract_tests:
requires:
- setup_dependencies
release:
jobs:
- prepare_build

View File

@@ -1,28 +1,5 @@
# Changelog
## v0.32.3
*August 27, 2019*
Special thanks to external contributors on this release:
@gchaincl, @bluele
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### IMPROVEMENTS:
- [consensus] [\#3839](https://github.com/tendermint/tendermint/issues/3839) Reduce "Error attempting to add vote" message severity (Error -> Info)
- [mempool] [\#3877](https://github.com/tendermint/tendermint/pull/3877) Make `max_tx_bytes` configurable instead of `max_msg_bytes` (@bluele)
- [privval] [\#3370](https://github.com/tendermint/tendermint/issues/3370) Refactor and simplify validator/kms connection handling. Please refer to [this comment](https://github.com/tendermint/tendermint/pull/3370#issue-257360971) for details
- [rpc] [\#3880](https://github.com/tendermint/tendermint/issues/3880) Document endpoints with `swagger`, introduce contract tests of implementation against documentation
### BUG FIXES:
- [config] [\#3868](https://github.com/tendermint/tendermint/issues/3868) Move misplaced `max_msg_bytes` into mempool section (@bluele)
- [rpc] [\#3910](https://github.com/tendermint/tendermint/pull/3910) Fix DATA RACE in HTTP client (@gchaincl)
- [store] [\#3893](https://github.com/tendermint/tendermint/issues/3893) Fix "Unregistered interface types.Evidence" panic
## v0.32.2
*July 31, 2019*
@@ -40,20 +17,20 @@ program](https://hackerone.com/tendermint).
### FEATURES:
- [blockchain] [\#3561](https://github.com/tendermint/tendermint/issues/3561) Add early version of the new blockchain reactor, which is supposed to be more modular and testable compared to the old version. To try it, you'll have to change `version` in the config file, [here](https://github.com/tendermint/tendermint/blob/master/config/toml.go#L303) NOTE: It's not ready for a production yet. For further information, see [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md) & [ADR-43](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-043-blockchain-riri-org.md)
- [mempool] [\#3826](https://github.com/tendermint/tendermint/issues/3826) Make `max_msg_bytes` configurable(@bluele)
- [node] [\#3846](https://github.com/tendermint/tendermint/pull/3846) Allow replacing existing p2p.Reactor(s) using [`CustomReactors`
option](https://godoc.org/github.com/tendermint/tendermint/node#CustomReactors).
Warning: beware of accidental name clashes. Here is the list of existing
reactors: MEMPOOL, BLOCKCHAIN, CONSENSUS, EVIDENCE, PEX.
- [p2p] [\#3834](https://github.com/tendermint/tendermint/issues/3834) Do not write 'Couldn't connect to any seeds' error log if there are no seeds in config file
- [rpc] [\#3818](https://github.com/tendermint/tendermint/issues/3818) Make `max_body_bytes` and `max_header_bytes` configurable(@bluele)
- [rpc] [\#2252](https://github.com/tendermint/tendermint/issues/2252) Add `/broadcast_evidence` endpoint to submit double signing and other types of evidence
- [mempool] [\#3826](https://github.com/tendermint/tendermint/issues/3826) Make `max_msg_bytes` configurable(@bluele)
- [blockchain] [\#3561](https://github.com/tendermint/tendermint/issues/3561) Add early version of the new blockchain reactor, which is supposed to be more modular and testable compared to the old version. To try it, you'll have to change `version` in the config file, [here](https://github.com/tendermint/tendermint/blob/master/config/toml.go#L303) NOTE: It's not ready for a production yet. For further information, see [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md) & [ADR-43](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-043-blockchain-riri-org.md)
### IMPROVEMENTS:
- [abci] [\#3809](https://github.com/tendermint/tendermint/issues/3809) Recover from application panics in `server/socket_server.go` to allow socket cleanup (@ruseinov)
- [rpc] [\#2252](https://github.com/tendermint/tendermint/issues/2252) Add `/broadcast_evidence` endpoint to submit double signing and other types of evidence
- [p2p] [\#3664](https://github.com/tendermint/tendermint/issues/3664) p2p/conn: reuse buffer when write/read from secret connection(@guagualvcha)
- [p2p] [\#3834](https://github.com/tendermint/tendermint/issues/3834) Do not write 'Couldn't connect to any seeds' error log if there are no seeds in config file
- [rpc] [\#3076](https://github.com/tendermint/tendermint/issues/3076) Improve transaction search performance
### BUG FIXES:

View File

@@ -1,4 +1,4 @@
## v0.32.4
## v0.32.3
\*\*
@@ -19,4 +19,11 @@ program](https://hackerone.com/tendermint).
### IMPROVEMENTS:
- [privval] \#3370 Refactors and simplifies validator/kms connection handling. Please refer to thttps://github.com/tendermint/tendermint/pull/3370#issue-257360971
- [consensus] \#3839 Reduce "Error attempting to add vote" message severity (Error -> Info)
- [mempool] \#3877 Make `max_tx_bytes` configurable instead of `max_msg_bytes`
### BUG FIXES:
- [config] \#3868 move misplaced `max_msg_bytes` into mempool section
- [store] \#3893 register block amino, not just crypto

View File

@@ -2,7 +2,7 @@
Thank you for considering making contributions to Tendermint and related repositories! Start by taking a look at the [coding repo](https://github.com/tendermint/coding) for overall information on repository workflow and standards.
Please follow standard github best practices: fork the repo, branch from the tip of `master`, make some commits, and submit a pull request to `master`.
Please follow standard github best practices: fork the repo, branch from the tip of `master`, make some commits, and submit a pull request to `master`.
See the [open issues](https://github.com/tendermint/tendermint/issues) for things we need help with!
Before making a pull request, please open an issue describing the
@@ -21,16 +21,16 @@ Please make sure to use `gofmt` before every commit - the easiest way to do this
Please note that Go requires code to live under absolute paths, which complicates forking.
While my fork lives at `https://github.com/ebuchman/tendermint`,
the code should never exist at `$GOPATH/src/github.com/ebuchman/tendermint`.
the code should never exist at `$GOPATH/src/github.com/ebuchman/tendermint`.
Instead, we use `git remote` to add the fork as a new remote for the original repo,
`$GOPATH/src/github.com/tendermint/tendermint`, and do all the work there.
`$GOPATH/src/github.com/tendermint/tendermint `, and do all the work there.
For instance, to create a fork and work on a branch of it, I would:
- Create the fork on github, using the fork button.
- Go to the original repo checked out locally (i.e. `$GOPATH/src/github.com/tendermint/tendermint`)
- `git remote rename origin upstream`
- `git remote add origin git@github.com:ebuchman/basecoin.git`
* Create the fork on github, using the fork button.
* Go to the original repo checked out locally (i.e. `$GOPATH/src/github.com/tendermint/tendermint`)
* `git remote rename origin upstream`
* `git remote add origin git@github.com:ebuchman/basecoin.git`
Now `origin` refers to my fork and `upstream` refers to the tendermint version.
So I can `git push -u origin master` to update my fork, and make pull requests to tendermint from there.
@@ -38,8 +38,8 @@ Of course, replace `ebuchman` with your git handle.
To pull in updates from the origin repo, run
- `git fetch upstream`
- `git rebase upstream/master` (or whatever branch you want)
* `git fetch upstream`
* `git rebase upstream/master` (or whatever branch you want)
## Dependencies
@@ -113,7 +113,7 @@ removed from the header in rpc responses as well.
## Branching Model and Release
The main development branch is master.
The main development branch is master.
Every release is maintained in a release branch named `vX.Y.Z`.
@@ -140,35 +140,36 @@ easy to reference the pull request where a change was introduced.
#### Major Release
1. start on `master`
1. start on `master`
2. run integration tests (see `test_integrations` in Makefile)
3. prepare release in a pull request against `master` (to be squash merged):
- copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
- run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
all issues
- run `bash ./scripts/authors.sh` to get a list of authors since the latest
release, and add the github aliases of external contributors to the top of
the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
- reset the `CHANGELOG_PENDING.md`
- bump versions
- copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
- run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
all issues
- run `bash ./scripts/authors.sh` to get a list of authors since the latest
release, and add the github aliases of external contributors to the top of
the changelog. To lookup an alias from an email, try `bash
./scripts/authors.sh <email>`
- reset the `CHANGELOG_PENDING.md`
- bump versions
4. push your changes with prepared release details to `vX.X` (this will trigger the release `vX.X.0`)
5. merge back to master (don't squash merge!)
#### Minor Release
If there were no breaking changes and you need to create a release nonetheless,
the procedure is almost exactly like with a new release above.
If there were no breaking changes and you need to create a release nonetheless,
the procedure is almost exactly like with a new release above.
The only difference is that in the end you create a pull request against the existing `X.X` branch.
The branch name should match the release number you want to create.
Merging this PR will trigger the next release.
For example, if the PR is against an existing 0.34 branch which already contains a v0.34.0 release/tag,
Merging this PR will trigger the next release.
For example, if the PR is against an existing 0.34 branch which already contains a v0.34.0 release/tag,
the patch version will be incremented and the created release will be v0.34.1.
#### Backport Release
1. start from the existing release branch you want to backport changes to (e.g. v0.30)
Branch to a release/vX.X.X branch locally (e.g. release/v0.30.7)
Branch to a release/vX.X.X branch locally (e.g. release/v0.30.7)
2. cherry pick the commit(s) that contain the changes you want to backport (usually these commits are from squash-merged PRs which were already reviewed)
3. steps 2 and 3 from [Major Release](#major-release)
4. push changes to release/vX.X.X branch
@@ -182,16 +183,3 @@ If they have `.go` files in the root directory, they will be automatically
tested by circle using `go test -v -race ./...`. If not, they will need a
`circle.yml`. Ideally, every repo has a `Makefile` that defines `make test` and
includes its continuous integration status using a badge in the `README.md`.
### RPC Testing
If you contribute to the RPC endpoints it's important to document your changes in the [Swagger file](./docs/spec/rpc/swagger.yaml)
To test your changes you should install `nodejs` and run:
```bash
npm i -g dredd
make build-linux build-contract-tests-hooks
make contract-tests
```
This command will popup a network and check every endpoint against what has been documented

View File

@@ -28,7 +28,7 @@ build_c:
CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) cleveldb" -o $(OUTPUT) ./cmd/tendermint/
build_race:
CGO_ENABLED=1 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint
CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint
install:
CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
@@ -308,23 +308,7 @@ sentry-stop:
build-slate:
bash scripts/slate.sh
# Build hooks for dredd, to skip or add information on some steps
build-contract-tests-hooks:
ifeq ($(OS),Windows_NT)
go build -mod=readonly $(BUILD_FLAGS) -o build/contract_tests.exe ./cmd/contract_tests
else
go build -mod=readonly $(BUILD_FLAGS) -o build/contract_tests ./cmd/contract_tests
endif
# Run a nodejs tool to test endpoints against a localnet
# The command takes care of starting and stopping the network
# prerequisits: build-contract-tests-hooks build-linux
# the two build commands were not added to let this command run from generic containers or machines.
# The binaries should be built beforehand
contract-tests:
dredd
# To avoid unintended conflicts with file names, always add to .PHONY
# unless there is a reason not to.
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
.PHONY: check build build_race build_abci dist install install_abci check_tools get_tools update_tools draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt rpc-docs build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate protoc_grpc protoc_all build_c install_c test_with_deadlock cleanup_after_test_with_deadlock lint build-contract-tests-hooks contract-tests
.PHONY: check build build_race build_abci dist install install_abci check_tools get_tools update_tools draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt rpc-docs build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate protoc_grpc protoc_all build_c install_c test_with_deadlock cleanup_after_test_with_deadlock lint

23
ROADMAP.md Normal file
View File

@@ -0,0 +1,23 @@
# Roadmap
BREAKING CHANGES:
- Better support for injecting randomness
- Upgrade consensus for more real-time use of evidence
FEATURES:
- Use the chain as its own CA for nodes and validators
- Tooling to run multiple blockchains/apps, possibly in a single process
- State syncing (without transaction replay)
- Add authentication and rate-limitting to the RPC
IMPROVEMENTS:
- Improve subtleties around mempool caching and logic
- Consensus optimizations:
- cache block parts for faster agreement after round changes
- propagate block parts rarest first
- Better testing of the consensus state machine (ie. use a DSL)
- Auto compiled serialization/deserialization code instead of go-wire reflection
BUG FIXES:
- Graceful handling/recovery for apps that have non-determinism or fail to halt
- Graceful handling/recovery for violations of safety, or liveness

View File

@@ -40,7 +40,7 @@ func main() {
}
if writeImportTime && !wroteImport {
wroteImport = true
fmt.Fprintf(outFile, "import \"github.com/tendermint/go-amino/data\"\n")
fmt.Fprintf(outFile, "import \"github.com/tendermint/go-wire/data\"\n")
}
if gotPackageLine {

View File

@@ -1,34 +0,0 @@
package main
import (
"fmt"
"strings"
"github.com/snikch/goodman/hooks"
"github.com/snikch/goodman/transaction"
)
func main() {
// This must be compiled beforehand and given to dredd as parameter, in the meantime the server should be running
h := hooks.NewHooks()
server := hooks.NewServer(hooks.NewHooksRunner(h))
h.BeforeAll(func(t []*transaction.Transaction) {
fmt.Println(t[0].Name)
})
h.BeforeEach(func(t *transaction.Transaction) {
if strings.HasPrefix(t.Name, "Tx") ||
// We need a proper example of evidence to broadcast
strings.HasPrefix(t.Name, "Info > /broadcast_evidence") ||
// We need a proper example of path and data
strings.HasPrefix(t.Name, "ABCI > /abci_query") ||
// We need to find a way to make a transaction before starting the tests,
// that hash should replace the dummy one in hte swagger file
strings.HasPrefix(t.Name, "Info > /tx") {
t.Skip = true
fmt.Printf("%s Has been skipped\n", t.Name)
}
})
server.Serve()
defer server.Listener.Close()
fmt.Print("FINE")
}

View File

@@ -9,6 +9,7 @@ import (
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/lite/proxy"
"github.com/tendermint/tendermint/lite/verifying"
rpcclient "github.com/tendermint/tendermint/rpc/client"
)
@@ -78,10 +79,10 @@ func runProxy(cmd *cobra.Command, args []string) error {
logger.Info("Connecting to source HTTP client...")
node := rpcclient.NewHTTP(nodeAddr, "/websocket")
logger.Info("Constructing Verifier...")
cert, err := proxy.NewVerifier(chainID, home, node, logger, cacheSize)
logger.Info("Constructing verifying provider...")
cert, err := verifying.NewProvider(chainID, home, node, logger, cacheSize, verifying.TrustOptions{})
if err != nil {
return errors.Wrap(err, "constructing Verifier")
return errors.Wrap(err, "constructing verifying provider")
}
cert.SetLogger(logger)
sc := proxy.SecureClient(node, cert)

View File

@@ -1672,7 +1672,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
}
default:
panic(fmt.Sprintf("Unexpected vote type %X", vote.Type)) // go-amino should prevent this.
panic(fmt.Sprintf("Unexpected vote type %X", vote.Type)) // go-wire should prevent this.
}
return

View File

@@ -1,141 +0,0 @@
# ADR 044: Lite Client with Weak Subjectivity
## Changelog
* 13-07-2019: Initial draft
* 14-08-2019: Address cwgoes comments
## Context
The concept of light clients was introduced in the Bitcoin white paper. It
describes a watcher of distributed consensus process that only validates the
consensus algorithm and not the state machine transactions within.
Tendermint light clients allow bandwidth & compute-constrained devices, such as smartphones, low-power embedded chips, or other blockchains to
efficiently verify the consensus of a Tendermint blockchain. This forms the
basis of safe and efficient state synchronization for new network nodes and
inter-blockchain communication (where a light client of one Tendermint instance
runs in another chain's state machine).
In a network that is expected to reliably punish validators for misbehavior
by slashing bonded stake and where the validator set changes
infrequently, clients can take advantage of this assumption to safely
synchronize a lite client without downloading the intervening headers.
Light clients (and full nodes) operating in the Proof Of Stake context need a
trusted block height from a trusted source that is no older than 1 unbonding
window plus a configurable evidence submission synchrony bound. This is called “weak subjectivity”.
Weak subjectivity is required in Proof of Stake blockchains because it is
costless for an attacker to buy up voting keys that are no longer bonded and
fork the network at some point in its prior history. See Vitaliks post at
[Proof of Stake: How I Learned to Love Weak
Subjectivity](https://blog.ethereum.org/2014/11/25/proof-stake-learned-love-weak-subjectivity/).
Currently, Tendermint provides a lite client implementation in the
[lite](https://github.com/tendermint/tendermint/tree/master/lite) package. This
lite client implements a bisection algorithm that tries to use a binary search
to find the minimum number of block headers where the validator set voting
power changes are less than < 1/3rd. This interface does not support weak
subjectivity at this time. The Cosmos SDK also does not support counterfactual
slashing, nor does the lite client have any capacity to report evidence making
these systems *theoretically unsafe*.
NOTE: Tendermint provides a somewhat different (stronger) light client model
than Bitcoin under eclipse, since the eclipsing node(s) can only fool the light
client if they have two-thirds of the private keys from the last root-of-trust.
## Decision
### The Weak Subjectivity Interface
Add the weak subjectivity interface for when a new light client connects to the
network or when a light client that has been offline for longer than the
unbonding period connects to the network. Specifically, the node needs to
initialize the following structure before syncing from user input:
```
type TrustOptions struct {
// Required: only trust commits up to this old.
// Should be equal to the unbonding period minus some delta for evidence reporting.
TrustPeriod time.Duration `json:"trust-period"`
// Option 1: TrustHeight and TrustHash can both be provided
// to force the trusting of a particular height and hash.
// If the latest trusted height/hash is more recent, then this option is
// ignored.
TrustHeight int64 `json:"trust-height"`
TrustHash []byte `json:"trust-hash"`
// Option 2: Callback can be set to implement a confirmation
// step if the trust store is uninitialized, or expired.
Callback func(height int64, hash []byte) error
}
```
The expectation is the user will get this information from a trusted source
like a validator, a friend, or a secure website. A more user friendly
solution with trust tradeoffs is that we establish an https based protocol with
a default end point that populates this information. Also an on-chain registry
of roots-of-trust (e.g. on the Cosmos Hub) seems likely in the future.
### Linear Verification
The linear verification algorithm requires downloading all headers
between the `TrustHeight` and the `LatestHeight`. The lite client downloads the
full header for the provided `TrustHeight` and then proceeds to download `N+1`
headers and applies the [Tendermint validation
rules](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/blockchain.md#validation)
to each block.
### Bisecting Verification
Bisecting Verification is a more bandwidth and compute intensive mechanism that
in the most optimistic case requires a light client to only download two block
headers to come into synchronization.
The bisection algorithm proceeds in the following fashion. The client downloads
and verifies the full block header for `TrustHeight` and then fetches
`LatestHeight` blocker header. The client then verifies the `LatestHeight`
header. Finally the client attempts to verify the `LatestHeight` header with
voting powers taken from `NextValidatorSet` in the `TrustHeight` header. This
verification will succeed if the validators from `TrustHeight` still have > 2/3
+1 of voting power in the `LatestHeight`. If this succeeds, the client is fully
synchronized. If this fails, then following Bisection Algorithm should be
executed.
The Client tries to download the block at the mid-point block between
`LatestHeight` and `TrustHeight` and attempts that same algorithm as above
using `MidPointHeight` instead of `LatestHeight` and a different threshold -
1/3 +1 of voting power for *non-adjacent headers*. In the case the of failure,
recursively perform the `MidPoint` verification until success then start over
with an updated `NextValidatorSet` and `TrustHeight`.
If the client encounters a forged header, it should submit the header along
with some other intermediate headers as the evidence of misbehavior to other
full nodes. After that, it can retry the bisection using another full node. An
optimal client will cache trusted headers from the previous run to minimize
network usage.
---
Check out the formal specification
[here](https://github.com/tendermint/tendermint/blob/master/docs/spec/consensus/light-client.md).
## Status
Accepted.
## Consequences
### Positive
* light client which is safe to use (it can go offline, but not for too long)
### Negative
* complexity of bisection
### Neutral
* social consensus can be prone to errors (for cases where a new light client
joins a network or it has been offline for too long)

View File

@@ -66,7 +66,7 @@ After `Commit`, CheckTx is run again on all transactions that remain in the
node's local mempool after filtering those included in the block. To prevent the
mempool from rechecking all transactions every time a block is committed, set
the configuration option `mempool.recheck=false`. As of Tendermint v0.32.1,
an additional `Type` parameter is made available to the CheckTx function that
an additional `Type` parameter is made available to the CheckTx function that
indicates whether an incoming transaction is new (`CheckTxType_New`), or a
recheck (`CheckTxType_Recheck`).
@@ -211,7 +211,7 @@ message PubKey {
The `pub_key` currently supports only one type:
- `type = "ed25519"` and `data = <raw 32-byte public key>`
- `type = "ed25519" and`data = <raw 32-byte public key>`
The `power` is the new voting power for the validator, with the
following rules:

View File

@@ -61,7 +61,7 @@ func (m MConnection) TrySend(chID byte, msg interface{}) bool {}
`Send(chID, msg)` is a blocking call that waits until `msg` is successfully queued
for the channel with the given id byte `chID`. The message `msg` is serialized
using the `tendermint/go-amino` submodule's `WriteBinary()` reflection routine.
using the `tendermint/wire` submodule's `WriteBinary()` reflection routine.
`TrySend(chID, msg)` is a nonblocking call that queues the message msg in the channel
with the given id byte chID if the queue is not full; otherwise it returns false immediately.

View File

@@ -13,13 +13,13 @@ type TxMessage struct {
}
```
TxMessage is go-amino encoded and prepended with `0x1` as a
"type byte". This is followed by a go-amino encoded byte-slice.
TxMessage is go-wire encoded and prepended with `0x1` as a
"type byte". This is followed by a go-wire encoded byte-slice.
Prefix of 40=0x28 byte tx is: `0x010128...` followed by
the actual 40-byte tx. Prefix of 350=0x015e byte tx is:
`0x0102015e...` followed by the actual 350 byte tx.
(Please see the [go-amino repo](https://github.com/tendermint/go-amino#an-interface-example) for more information)
(Please see the [go-wire repo](https://github.com/tendermint/go-wire#an-interface-example) for more information)
## RPC Messages

View File

@@ -1,25 +0,0 @@
<!-- HTML for static distribution bundle build -->
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Tendermint RPC</title>
<link rel="stylesheet" type="text/css" href="//unpkg.com/swagger-ui-dist@3/swagger-ui.css" >
<link rel="icon" type="image/png" href="//unpkg.com/swagger-ui-dist@3/favicon-16x16.png"/>
<script src="//unpkg.com/swagger-ui-dist@3/swagger-ui-bundle.js"></script>
</head>
<body>
<div id="swagger-ui"></div>
<script>
window.onload = function() {
window.ui = SwaggerUIBundle({
url: "./swagger.yaml",
dom_id: '#swagger-ui',
deepLinking: true,
layout: "BaseLayout"
});
}
</script>
</body>
</html>

File diff suppressed because it is too large Load Diff

View File

@@ -1,33 +0,0 @@
color: true
dry-run: null
hookfiles: build/contract_tests
language: go
require: null
server: make localnet-start
server-wait: 30
init: false
custom: {}
names: false
only: []
reporter: []
output: []
header: []
sorted: false
user: null
inline-errors: false
details: false
method: [GET]
loglevel: warning
path: []
hooks-worker-timeout: 5000
hooks-worker-connect-timeout: 1500
hooks-worker-connect-retry: 500
hooks-worker-after-connect-wait: 100
hooks-worker-term-timeout: 5000
hooks-worker-term-retry: 500
hooks-worker-handler-host: 127.0.0.1
hooks-worker-handler-port: 61321
config: ./dredd.yml
# This path accepts no variables
blueprint: ./docs/spec/rpc/swagger.yaml
endpoint: 'http://127.0.0.1:26657/'

4
go.mod
View File

@@ -14,8 +14,10 @@ require (
github.com/gogo/protobuf v1.2.1
github.com/golang/protobuf v1.3.2
github.com/google/gofuzz v1.0.0 // indirect
github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70 // indirect
github.com/gorilla/websocket v1.2.0
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 // indirect
github.com/libp2p/go-buffer-pool v0.0.1
@@ -30,7 +32,6 @@ require (
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d // indirect
github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165
github.com/rs/cors v1.6.0
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
github.com/spf13/afero v1.1.2 // indirect
github.com/spf13/cast v1.3.0 // indirect
github.com/spf13/cobra v0.0.1
@@ -40,6 +41,7 @@ require (
github.com/stretchr/testify v1.3.0
github.com/tendermint/go-amino v0.14.1
github.com/tendermint/tm-db v0.1.1
golang.org/x/arch v0.0.0-20190312162104-788fe5ffcd8c // indirect
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
golang.org/x/net v0.0.0-20190628185345-da137c7871d7
google.golang.org/grpc v1.22.0

11
go.sum
View File

@@ -46,12 +46,16 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70 h1:XTnP8fJpa4Kvpw2qARB4KS9izqxPS0Sd92cDlY3uk+w=
github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/gorilla/websocket v1.2.0 h1:VJtLvh6VQym50czpZzx07z/kw9EgAxI3x1ZB8taTMQQ=
github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
@@ -94,8 +98,6 @@ github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165 h1:nkcn14uNmFE
github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rs/cors v1.6.0 h1:G9tHG9lebljV9mfp9SNPDL36nCDxmo3zTlAf1YgvzmI=
github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa h1:YJfZp12Z3AFhSBeXOlv4BO55RMwPn2NoQeDsrdWnBtY=
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa/go.mod h1:oJyF+mSPHbB5mVY2iO9KV3pTt/QbIkGaO8gQ2WrDbP4=
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
@@ -117,10 +119,14 @@ github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 h1:1oFLiOyVl+W7
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA=
github.com/tendermint/go-amino v0.14.1 h1:o2WudxNfdLNBwMyl2dqOJxiro5rfrEaU0Ugs6offJMk=
github.com/tendermint/go-amino v0.14.1/go.mod h1:i/UKE5Uocn+argJJBb12qTZsCDBcAYMbR92AaJVmKso=
github.com/tendermint/tm-db v0.0.0-20190731085305-94017c88bf1d h1:yCHL2COLGLNfb4sA9AlzIHpapb8UATvAQyJulS6Eg6Q=
github.com/tendermint/tm-db v0.0.0-20190731085305-94017c88bf1d/go.mod h1:0cPKWu2Mou3IlxecH+MEUSYc1Ch537alLe6CpFrKzgw=
github.com/tendermint/tm-db v0.1.1 h1:G3Xezy3sOk9+ekhjZ/kjArYIs1SmwV+1OUgNkj7RgV0=
github.com/tendermint/tm-db v0.1.1/go.mod h1:0cPKWu2Mou3IlxecH+MEUSYc1Ch537alLe6CpFrKzgw=
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
golang.org/x/arch v0.0.0-20190312162104-788fe5ffcd8c h1:Rx/HTKi09myZ25t1SOlDHmHOy/mKxNAcu0hP1oPX9qM=
golang.org/x/arch v0.0.0-20190312162104-788fe5ffcd8c/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -157,3 +163,4 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=

View File

@@ -1,7 +1,9 @@
/*
Package client defines a provider that uses a rpcclient
to get information, which is used to get new headers
and validators directly from a Tendermint client.
Package client defines a provider that uses an RPC client (or SignStatusClient
more generally) to get information like new headers and validators directly
from a Tendermint node.
Use either NewProvider or NewHTTPProvider to construct one.
*/
package client
@@ -28,7 +30,8 @@ type provider struct {
client SignStatusClient
}
// NewProvider implements Provider (but not PersistentProvider).
// NewProvider creates a lite.Provider using the given chain ID and
// SignStatusClient.
func NewProvider(chainID string, client SignStatusClient) lite.Provider {
return &provider{
logger: log.NewNopLogger(),
@@ -37,87 +40,59 @@ func NewProvider(chainID string, client SignStatusClient) lite.Provider {
}
}
// NewHTTPProvider can connect to a tendermint json-rpc endpoint
// at the given url, and uses that as a read-only provider.
// NewHTTPProvider creates a lite.Provider, which is using the rpcclient.HTTP
// client under the hood.
func NewHTTPProvider(chainID, remote string) lite.Provider {
return NewProvider(chainID, rpcclient.NewHTTP(remote, "/websocket"))
}
// Implements Provider.
// SetLogger implements lite.Provider.
func (p *provider) SetLogger(logger log.Logger) {
logger = logger.With("module", "lite/client")
p.logger = logger
}
// StatusClient returns the internal client as a StatusClient
func (p *provider) StatusClient() rpcclient.StatusClient {
return p.client
}
// LatestFullCommit implements Provider.
// LatestFullCommit implements lite.Provider.
func (p *provider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (fc lite.FullCommit, err error) {
if chainID != p.chainID {
err = fmt.Errorf("expected chainID %s, got %s", p.chainID, chainID)
return
return fc, fmt.Errorf("expected chainID %s, got %s", p.chainID, chainID)
}
if maxHeight != 0 && maxHeight < minHeight {
err = fmt.Errorf("need maxHeight == 0 or minHeight <= maxHeight, got min %v and max %v",
return fc, fmt.Errorf("need maxHeight == 0 or minHeight <= maxHeight, got min %v and max %v",
minHeight, maxHeight)
return
}
commit, err := p.fetchLatestCommit(minHeight, maxHeight)
if err != nil {
return
return fc, err
}
fc, err = p.fillFullCommit(commit.SignedHeader)
return
return p.fillFullCommit(commit.SignedHeader)
}
// fetchLatestCommit fetches the latest commit from the client.
func (p *provider) fetchLatestCommit(minHeight int64, maxHeight int64) (*ctypes.ResultCommit, error) {
status, err := p.client.Status()
if err != nil {
return nil, err
}
if status.SyncInfo.LatestBlockHeight < minHeight {
err = fmt.Errorf("provider is at %v but require minHeight=%v",
return nil, fmt.Errorf("provider is at %d but require minHeight=%d",
status.SyncInfo.LatestBlockHeight, minHeight)
return nil, err
}
if maxHeight == 0 {
maxHeight = status.SyncInfo.LatestBlockHeight
} else if status.SyncInfo.LatestBlockHeight < maxHeight {
maxHeight = status.SyncInfo.LatestBlockHeight
}
return p.client.Commit(&maxHeight)
}
// Implements Provider.
func (p *provider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) {
return p.getValidatorSet(chainID, height)
}
func (p *provider) getValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) {
if chainID != p.chainID {
err = fmt.Errorf("expected chainID %s, got %s", p.chainID, chainID)
return
}
if height < 1 {
err = fmt.Errorf("expected height >= 1, got height %v", height)
return
}
res, err := p.client.Validators(&height)
if err != nil {
// TODO pass through other types of errors.
return nil, lerr.ErrUnknownValidators(chainID, height)
}
valset = types.NewValidatorSet(res.Validators)
return
}
// This does no validation.
func (p *provider) fillFullCommit(signedHeader types.SignedHeader) (fc lite.FullCommit, err error) {
// Get the validators.
valset, err := p.getValidatorSet(signedHeader.ChainID, signedHeader.Height)
if err != nil {
@@ -132,3 +107,26 @@ func (p *provider) fillFullCommit(signedHeader types.SignedHeader) (fc lite.Full
return lite.NewFullCommit(signedHeader, valset, nextValset), nil
}
// ValidatorSet implements lite.Provider.
func (p *provider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) {
return p.getValidatorSet(chainID, height)
}
func (p *provider) getValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) {
if chainID != p.chainID {
return nil, fmt.Errorf("expected chainID %s, got %s", p.chainID, chainID)
}
if height < 1 {
return nil, fmt.Errorf("expected height >= 1, got height %d", height)
}
res, err := p.client.Validators(&height)
if err != nil {
// TODO pass through other types of errors.
return nil, lerr.ErrUnknownValidators(chainID, height)
}
return types.NewValidatorSet(res.Validators), nil
}

View File

@@ -8,9 +8,10 @@ import (
"github.com/tendermint/tendermint/types"
)
// FullCommit contains a SignedHeader (the block header and a commit that signs it),
// the validator set which signed the commit, and the next validator set. The
// next validator set (which is proven from the block header) allows us to
// FullCommit contains a SignedHeader (the block's header and a commit that
// signs it), the validator set which signed the commit, and the next validator
// set.
// The next validator set (which is proven from the block header) allows us to
// revert to block-by-block updating of lite Verifier's latest validator set,
// even in the face of arbitrarily large power changes.
type FullCommit struct {
@@ -28,46 +29,41 @@ func NewFullCommit(signedHeader types.SignedHeader, valset, nextValset *types.Va
}
}
// Validate the components and check for consistency.
// This also checks to make sure that Validators actually
// signed the SignedHeader.Commit.
// If > 2/3 did not sign the Commit from fc.Validators, it
// is not a valid commit!
// ValidateFull validates the components and ensures consistency.
// It also checks that Validators actually signed the SignedHeader.Commit.
// If > 2/3 did not sign the Commit from fc.Validators, it is not a valid
// commit!
func (fc FullCommit) ValidateFull(chainID string) error {
// Ensure that Validators exists and matches the header.
if fc.Validators.Size() == 0 {
return errors.New("need FullCommit.Validators")
return errors.New("empty Validators")
}
if !bytes.Equal(
fc.SignedHeader.ValidatorsHash,
fc.Validators.Hash()) {
return fmt.Errorf("header has vhash %X but valset hash is %X",
if !bytes.Equal(fc.SignedHeader.ValidatorsHash, fc.Validators.Hash()) {
return fmt.Errorf("header has ValidatorsHash %X, but valset hash is %X",
fc.SignedHeader.ValidatorsHash,
fc.Validators.Hash(),
)
}
// Ensure that NextValidators exists and matches the header.
if fc.NextValidators.Size() == 0 {
return errors.New("need FullCommit.NextValidators")
return errors.New("empty NextValidators")
}
if !bytes.Equal(
fc.SignedHeader.NextValidatorsHash,
fc.NextValidators.Hash()) {
return fmt.Errorf("header has next vhash %X but next valset hash is %X",
if !bytes.Equal(fc.SignedHeader.NextValidatorsHash, fc.NextValidators.Hash()) {
return fmt.Errorf("header has next ValidatorsHash %X, but next valset hash is %X",
fc.SignedHeader.NextValidatorsHash,
fc.NextValidators.Hash(),
)
}
// Validate the header.
err := fc.SignedHeader.ValidateBasic(chainID)
if err != nil {
return err
}
// Validate the signatures on the commit.
hdr, cmt := fc.SignedHeader.Header, fc.SignedHeader.Commit
return fc.Validators.VerifyCommit(
hdr.ChainID, cmt.BlockID,
hdr.Height, cmt)
return fc.Validators.VerifyCommit(hdr.ChainID, cmt.BlockID, hdr.Height, cmt)
}
// Height returns the height of the header.
@@ -75,6 +71,7 @@ func (fc FullCommit) Height() int64 {
if fc.SignedHeader.Header == nil {
panic("should not happen")
}
return fc.SignedHeader.Height
}
@@ -83,5 +80,6 @@ func (fc FullCommit) ChainID() string {
if fc.SignedHeader.Header == nil {
panic("should not happen")
}
return fc.SignedHeader.ChainID
}

View File

@@ -0,0 +1,98 @@
package lite
import (
"fmt"
"sync"
)
// ConcurrentProvider is a provider which is safe to use by multiple threads.
type ConcurrentProvider struct {
UpdatingProvider
// pending map to synchronize concurrent verification requests
mtx sync.Mutex
pendingVerifications map[pendingKey]*pendingResult
}
// convenience to create the key for the lookup map
type pendingKey struct {
chainID string
height int64
}
// used to cache the result from underlying UpdatingProvider.
type pendingResult struct {
wait chan struct{}
err error // cached result.
}
// NewConcurrentProvider creates a ConcurrentProvider using the given
// UpdatingProvider.
func NewConcurrentProvider(up UpdatingProvider) *ConcurrentProvider {
return &ConcurrentProvider{
UpdatingProvider: up,
pendingVerifications: make(map[pendingKey]*pendingResult),
}
}
// Returns the unique pending request for all identical calls to
// joinConcurrency(chainID,height), and returns true for isFirstCall only for
// the first call, which should call the returned callback w/ results if any.
//
// NOTE: The callback must be called, otherwise there will be memory leaks.
//
// Other subsequent calls should just return pr.err.
// This is a separate function, primarily to make mtx unlocking more
// obviously safe via defer.
func (cp *ConcurrentProvider) joinConcurrency(chainID string, height int64) (pr *pendingResult, isFirstCall bool, callback func(error)) {
cp.mtx.Lock()
defer cp.mtx.Unlock()
pk := pendingKey{chainID, height}
if pr = cp.pendingVerifications[pk]; pr != nil {
<-pr.wait
return pr, false, nil
}
pr = &pendingResult{wait: make(chan struct{}), err: nil}
cp.pendingVerifications[pk] = pr
// The caller must call this, otherwise there will be memory leaks.
return pr, true, func(err error) {
// NOTE: other result parameters can be added here.
pr.err = err
// *After* setting the results, *then* call close(pr.wait).
close(pr.wait)
cp.mtx.Lock()
delete(cp.pendingVerifications, pk)
cp.mtx.Unlock()
}
}
// UpdateToHeight implements UpdatingProvider.
func (cp *ConcurrentProvider) UpdateToHeight(chainID string, height int64) error {
// Performs synchronization for multi-threads verifications at the same height.
pr, isFirstCall, callback := cp.joinConcurrency(chainID, height)
if isFirstCall {
var err error
// Use a defer in case UpdateToHeight itself fails.
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("Recovered from panic: %v", r)
}
callback(err)
}()
err = cp.UpdatingProvider.UpdateToHeight(chainID, height)
return err
}
// Is not the first call, so return the error from previous concurrent calls.
if callback != nil {
panic("expected callback to be nil")
}
return pr.err
}

View File

@@ -13,19 +13,23 @@ import (
dbm "github.com/tendermint/tm-db"
)
var _ PersistentProvider = (*DBProvider)(nil)
// DBProvider stores commits and validator sets in a DB.
//
// The number of heights for which DBProvider stores commits and validator sets
// can be optionally limited by calling SetLimit with the desired limit.
type DBProvider struct {
label string
db dbm.DB
cdc *amino.Codec
limit int
logger log.Logger
label string
db dbm.DB
cdc *amino.Codec
limit int
}
func NewDBProvider(label string, db dbm.DB) *DBProvider {
var _ PersistentProvider = (*DBProvider)(nil)
// NewDBProvider returns a DBProvider.
func NewDBProvider(label string, db dbm.DB) *DBProvider {
// NOTE: when debugging, this type of construction might be useful.
//db = dbm.NewDebugDB("db provider "+cmn.RandStr(4), db)
@@ -40,19 +44,21 @@ func NewDBProvider(label string, db dbm.DB) *DBProvider {
return dbp
}
func (dbp *DBProvider) SetLogger(logger log.Logger) {
dbp.logger = logger.With("label", dbp.label)
}
// SetLimit limits the number of heights for which DBProvider stores commits
// and validator sets. E.g. 3 will result in storing only commits and validator
// sets for the 3 latest heights.
func (dbp *DBProvider) SetLimit(limit int) *DBProvider {
dbp.limit = limit
return dbp
}
// Implements PersistentProvider.
func (dbp *DBProvider) SaveFullCommit(fc FullCommit) error {
func (dbp *DBProvider) SetLogger(logger log.Logger) {
dbp.logger = logger.With("label", dbp.label)
}
func (dbp *DBProvider) SaveFullCommit(fc FullCommit) error {
dbp.logger.Info("DBProvider.SaveFullCommit()...", "fc", fc)
batch := dbp.db.NewBatch()
defer batch.Close()
@@ -94,17 +100,21 @@ func (dbp *DBProvider) SaveFullCommit(fc FullCommit) error {
return nil
}
// Implements Provider.
func (dbp *DBProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (
FullCommit, error) {
dbp.logger.Info("DBProvider.LatestFullCommit()...",
"chainID", chainID, "minHeight", minHeight, "maxHeight", maxHeight)
if maxHeight != 0 && maxHeight < minHeight {
return FullCommit{}, fmt.Errorf("need maxHeight == 0 or minHeight <= maxHeight, got min %v and max %v",
minHeight, maxHeight)
}
if minHeight <= 0 {
minHeight = 1
}
if maxHeight == 0 {
if maxHeight <= 0 {
maxHeight = 1<<63 - 1
}
@@ -131,16 +141,16 @@ func (dbp *DBProvider) LatestFullCommit(chainID string, minHeight, maxHeight int
} else {
lfc, err := dbp.fillFullCommit(sh)
if err == nil {
dbp.logger.Info("DBProvider.LatestFullCommit() found latest.", "height", lfc.Height())
dbp.logger.Info("DBProvider.LatestFullCommit() found latest", "height", lfc.Height())
return lfc, nil
} else {
dbp.logger.Error("DBProvider.LatestFullCommit() got error", "lfc", lfc)
dbp.logger.Error(fmt.Sprintf("%+v", err))
dbp.logger.Error("DBProvider.LatestFullCommit() got error", "lfc", lfc, "err", fmt.Sprintf("%+v", err))
return lfc, err
}
}
}
}
return FullCommit{}, lerr.ErrCommitNotFound()
}
@@ -168,19 +178,23 @@ func (dbp *DBProvider) getValidatorSet(chainID string, height int64) (valset *ty
}
func (dbp *DBProvider) fillFullCommit(sh types.SignedHeader) (FullCommit, error) {
var chainID = sh.ChainID
var height = sh.Height
var valset, nextValset *types.ValidatorSet
var (
chainID = sh.ChainID
height = sh.Height
)
// Load the validator set.
valset, err := dbp.getValidatorSet(chainID, height)
if err != nil {
return FullCommit{}, err
}
// Load the next validator set.
nextValset, err = dbp.getValidatorSet(chainID, height+1)
nextValset, err := dbp.getValidatorSet(chainID, height+1)
if err != nil {
return FullCommit{}, err
}
// Return filled FullCommit.
return FullCommit{
SignedHeader: sh,
@@ -189,9 +203,16 @@ func (dbp *DBProvider) fillFullCommit(sh types.SignedHeader) (FullCommit, error)
}, nil
}
// deleteAfterN deletes all items except skipping first {after} items.
// example - deleteAfterN("test", 1):
// - signedHeader#188
// - signedHeader#187
// - validatorSet#187
// - signedHeader#186
// ==>
// - signedHeader#188
func (dbp *DBProvider) deleteAfterN(chainID string, after int) error {
dbp.logger.Info("DBProvider.deleteAfterN()...", "chainID", chainID, "after", after)
dbp.logger.Debug("DBProvider.deleteAfterN()...", "chainID", chainID, "after", after)
itr := dbp.db.ReverseIterator(
signedHeaderKey(chainID, 1),
@@ -199,29 +220,30 @@ func (dbp *DBProvider) deleteAfterN(chainID string, after int) error {
)
defer itr.Close()
var lastHeight int64 = 1<<63 - 1
var numSeen = 0
var numDeleted = 0
var (
minHeight int64 = 1<<63 - 1
numSeen = 0
numDeleted = 0
)
for itr.Valid() {
key := itr.Key()
_, height, ok := parseChainKeyPrefix(key)
if !ok {
return fmt.Errorf("unexpected key %v", key)
} else {
if height < lastHeight {
lastHeight = height
numSeen += 1
}
if numSeen > after {
dbp.db.Delete(key)
numDeleted += 1
}
}
if height < minHeight {
minHeight = height
numSeen++
}
if numSeen > after {
dbp.db.Delete(key)
numDeleted++
}
itr.Next()
}
dbp.logger.Info(fmt.Sprintf("DBProvider.deleteAfterN() deleted %v items", numDeleted))
dbp.logger.Debug(fmt.Sprintf("DBProvider.deleteAfterN() deleted %d items (seen %d)", numDeleted, numSeen))
return nil
}

View File

@@ -1,5 +1,38 @@
/*
Package lite allows you to securely validate headers without a full node.
Package lite provides a light client implementation.
The concept of light clients was introduced in the Bitcoin white paper. It
describes a watcher of distributed consensus process that only validates the
consensus algorithm and not the state machine transactions within.
Tendermint light clients allow bandwidth & compute-constrained devices, such as
smartphones, low-power embedded chips, or other blockchains to efficiently
verify the consensus of a Tendermint blockchain. This forms the basis of safe
and efficient state synchronization for new network nodes and inter-blockchain
communication (where a light client of one Tendermint instance runs in another
chain's state machine).
In a network that is expected to reliably punish validators for misbehavior by
slashing bonded stake and where the validator set changes infrequently, clients
can take advantage of this assumption to safely synchronize a lite client
without downloading the intervening headers.
Light clients (and full nodes) operating in the Proof Of Stake context need a
trusted block height from a trusted source that is no older than 1 unbonding
window plus a configurable evidence submission synchrony bound. This is called
weak subjectivity.
Weak subjectivity is required in Proof of Stake blockchains because it is
costless for an attacker to buy up voting keys that are no longer bonded and
fork the network at some point in its prior history. See Vitalik's post at
[Proof of Stake: How I Learned to Love Weak
Subjectivity](https://blog.ethereum.org/2014/11/25/proof-stake-learned-love-weak-subjectivity/).
NOTE: Tendermint provides a somewhat different (stronger) light client model
than Bitcoin under eclipse, since the eclipsing node(s) can only fool the light
client if they have two-thirds of the private keys from the last root-of-trust.
===
This library pulls together all the crypto and algorithms, so given a
relatively recent (< unbonding period) known validator set, one can get
@@ -42,13 +75,13 @@ ValidatorSets.
Verifier
A Verifier validates a new SignedHeader given the currently known state. There
are two different types of Verifiers provided.
Verifier validates a new SignedHeader given the currently known state. There
re two different types of Verifiers provided.
BaseVerifier - given a validator set and a height, this Verifier verifies
that > 2/3 of the voting power of the given validator set had signed the
SignedHeader, and that the SignedHeader was to be signed by the exact given
validator set, and that the height of the commit is at least height (or
erifier - given a validator set and a height, this Verifier verifies
hat > 2/3 of the voting power of the given validator set had signed the
ignedHeader, and that the SignedHeader was to be signed by the exact given
alidator set, and that the height of the commit is at least height (or
greater).
DynamicVerifier - this Verifier implements an auto-update and persistence

View File

@@ -1,275 +0,0 @@
package lite
import (
"bytes"
"fmt"
"sync"
log "github.com/tendermint/tendermint/libs/log"
lerr "github.com/tendermint/tendermint/lite/errors"
"github.com/tendermint/tendermint/types"
)
const sizeOfPendingMap = 1024
var _ Verifier = (*DynamicVerifier)(nil)
// DynamicVerifier implements an auto-updating Verifier. It uses a
// "source" provider to obtain the needed FullCommits to securely sync with
// validator set changes. It stores properly validated data on the
// "trusted" local system.
// TODO: make this single threaded and create a new
// ConcurrentDynamicVerifier that wraps it with concurrency.
// see https://github.com/tendermint/tendermint/issues/3170
type DynamicVerifier struct {
chainID string
logger log.Logger
// Already validated, stored locally
trusted PersistentProvider
// New info, like a node rpc, or other import method.
source Provider
// pending map to synchronize concurrent verification requests
mtx sync.Mutex
pendingVerifications map[int64]chan struct{}
}
// NewDynamicVerifier returns a new DynamicVerifier. It uses the
// trusted provider to store validated data and the source provider to
// obtain missing data (e.g. FullCommits).
//
// The trusted provider should be a DBProvider.
// The source provider should be a client.HTTPProvider.
func NewDynamicVerifier(chainID string, trusted PersistentProvider, source Provider) *DynamicVerifier {
return &DynamicVerifier{
logger: log.NewNopLogger(),
chainID: chainID,
trusted: trusted,
source: source,
pendingVerifications: make(map[int64]chan struct{}, sizeOfPendingMap),
}
}
func (dv *DynamicVerifier) SetLogger(logger log.Logger) {
logger = logger.With("module", "lite")
dv.logger = logger
dv.trusted.SetLogger(logger)
dv.source.SetLogger(logger)
}
// Implements Verifier.
func (dv *DynamicVerifier) ChainID() string {
return dv.chainID
}
// Implements Verifier.
//
// If the validators have changed since the last known time, it looks to
// dv.trusted and dv.source to prove the new validators. On success, it will
// try to store the SignedHeader in dv.trusted if the next
// validator can be sourced.
func (dv *DynamicVerifier) Verify(shdr types.SignedHeader) error {
// Performs synchronization for multi-threads verification at the same height.
dv.mtx.Lock()
if pending := dv.pendingVerifications[shdr.Height]; pending != nil {
dv.mtx.Unlock()
<-pending // pending is chan struct{}
} else {
pending := make(chan struct{})
dv.pendingVerifications[shdr.Height] = pending
defer func() {
close(pending)
dv.mtx.Lock()
delete(dv.pendingVerifications, shdr.Height)
dv.mtx.Unlock()
}()
dv.mtx.Unlock()
}
//Get the exact trusted commit for h, and if it is
// equal to shdr, then it's already trusted, so
// just return nil.
trustedFCSameHeight, err := dv.trusted.LatestFullCommit(dv.chainID, shdr.Height, shdr.Height)
if err == nil {
// If loading trust commit successfully, and trust commit equal to shdr, then don't verify it,
// just return nil.
if bytes.Equal(trustedFCSameHeight.SignedHeader.Hash(), shdr.Hash()) {
dv.logger.Info(fmt.Sprintf("Load full commit at height %d from cache, there is not need to verify.", shdr.Height))
return nil
}
} else if !lerr.IsErrCommitNotFound(err) {
// Return error if it is not CommitNotFound error
dv.logger.Info(fmt.Sprintf("Encountered unknown error in loading full commit at height %d.", shdr.Height))
return err
}
// Get the latest known full commit <= h-1 from our trusted providers.
// The full commit at h-1 contains the valset to sign for h.
prevHeight := shdr.Height - 1
trustedFC, err := dv.trusted.LatestFullCommit(dv.chainID, 1, prevHeight)
if err != nil {
return err
}
// sync up to the prevHeight and assert our latest NextValidatorSet
// is the ValidatorSet for the SignedHeader
if trustedFC.Height() == prevHeight {
// Return error if valset doesn't match.
if !bytes.Equal(
trustedFC.NextValidators.Hash(),
shdr.Header.ValidatorsHash) {
return lerr.ErrUnexpectedValidators(
trustedFC.NextValidators.Hash(),
shdr.Header.ValidatorsHash)
}
} else {
// If valset doesn't match, try to update
if !bytes.Equal(
trustedFC.NextValidators.Hash(),
shdr.Header.ValidatorsHash) {
// ... update.
trustedFC, err = dv.updateToHeight(prevHeight)
if err != nil {
return err
}
// Return error if valset _still_ doesn't match.
if !bytes.Equal(trustedFC.NextValidators.Hash(),
shdr.Header.ValidatorsHash) {
return lerr.ErrUnexpectedValidators(
trustedFC.NextValidators.Hash(),
shdr.Header.ValidatorsHash)
}
}
}
// Verify the signed header using the matching valset.
cert := NewBaseVerifier(dv.chainID, trustedFC.Height()+1, trustedFC.NextValidators)
err = cert.Verify(shdr)
if err != nil {
return err
}
// By now, the SignedHeader is fully validated and we're synced up to
// SignedHeader.Height - 1. To sync to SignedHeader.Height, we need
// the validator set at SignedHeader.Height + 1 so we can verify the
// SignedHeader.NextValidatorSet.
// TODO: is the ValidateFull below mostly redundant with the BaseVerifier.Verify above?
// See https://github.com/tendermint/tendermint/issues/3174.
// Get the next validator set.
nextValset, err := dv.source.ValidatorSet(dv.chainID, shdr.Height+1)
if lerr.IsErrUnknownValidators(err) {
// Ignore this error.
return nil
} else if err != nil {
return err
}
// Create filled FullCommit.
nfc := FullCommit{
SignedHeader: shdr,
Validators: trustedFC.NextValidators,
NextValidators: nextValset,
}
// Validate the full commit. This checks the cryptographic
// signatures of Commit against Validators.
if err := nfc.ValidateFull(dv.chainID); err != nil {
return err
}
// Trust it.
return dv.trusted.SaveFullCommit(nfc)
}
// verifyAndSave will verify if this is a valid source full commit given the
// best match trusted full commit, and if good, persist to dv.trusted.
// Returns ErrTooMuchChange when >2/3 of trustedFC did not sign sourceFC.
// Panics if trustedFC.Height() >= sourceFC.Height().
func (dv *DynamicVerifier) verifyAndSave(trustedFC, sourceFC FullCommit) error {
if trustedFC.Height() >= sourceFC.Height() {
panic("should not happen")
}
err := trustedFC.NextValidators.VerifyFutureCommit(
sourceFC.Validators,
dv.chainID, sourceFC.SignedHeader.Commit.BlockID,
sourceFC.SignedHeader.Height, sourceFC.SignedHeader.Commit,
)
if err != nil {
return err
}
return dv.trusted.SaveFullCommit(sourceFC)
}
// updateToHeight will use divide-and-conquer to find a path to h.
// Returns nil error iff we successfully verify and persist a full commit
// for height h, using repeated applications of bisection if necessary.
//
// Returns ErrCommitNotFound if source provider doesn't have the commit for h.
func (dv *DynamicVerifier) updateToHeight(h int64) (FullCommit, error) {
// Fetch latest full commit from source.
sourceFC, err := dv.source.LatestFullCommit(dv.chainID, h, h)
if err != nil {
return FullCommit{}, err
}
// If sourceFC.Height() != h, we can't do it.
if sourceFC.Height() != h {
return FullCommit{}, lerr.ErrCommitNotFound()
}
// Validate the full commit. This checks the cryptographic
// signatures of Commit against Validators.
if err := sourceFC.ValidateFull(dv.chainID); err != nil {
return FullCommit{}, err
}
// Verify latest FullCommit against trusted FullCommits
FOR_LOOP:
for {
// Fetch latest full commit from trusted.
trustedFC, err := dv.trusted.LatestFullCommit(dv.chainID, 1, h)
if err != nil {
return FullCommit{}, err
}
// We have nothing to do.
if trustedFC.Height() == h {
return trustedFC, nil
}
// Try to update to full commit with checks.
err = dv.verifyAndSave(trustedFC, sourceFC)
if err == nil {
// All good!
return sourceFC, nil
}
// Handle special case when err is ErrTooMuchChange.
if types.IsErrTooMuchChange(err) {
// Divide and conquer.
start, end := trustedFC.Height(), sourceFC.Height()
if !(start < end) {
panic("should not happen")
}
mid := (start + end) / 2
_, err = dv.updateToHeight(mid)
if err != nil {
return FullCommit{}, err
}
// If we made it to mid, we retry.
continue FOR_LOOP
}
return FullCommit{}, err
}
}
func (dv *DynamicVerifier) LastTrustedHeight() int64 {
fc, err := dv.trusted.LatestFullCommit(dv.chainID, 1, 1<<63-1)
if err != nil {
panic("should not happen")
}
return fc.Height()
}

View File

@@ -6,9 +6,6 @@ import (
"github.com/pkg/errors"
)
//----------------------------------------
// Error types
type errCommitNotFound struct{}
func (e errCommitNotFound) Error() string {
@@ -41,11 +38,11 @@ func (e errEmptyTree) Error() string {
return "Tree is empty"
}
//----------------------------------------
// Methods for above error types
type errCommitExpired struct{}
//-----------------
// ErrCommitNotFound
func (e errCommitExpired) Error() string {
return "commit is too old to be trusted"
}
// ErrCommitNotFound indicates that a the requested commit was not found.
func ErrCommitNotFound() error {
@@ -57,9 +54,6 @@ func IsErrCommitNotFound(err error) bool {
return ok
}
//-----------------
// ErrUnexpectedValidators
// ErrUnexpectedValidators indicates a validator set mismatch.
func ErrUnexpectedValidators(got, want []byte) error {
return errors.Wrap(errUnexpectedValidators{
@@ -73,9 +67,6 @@ func IsErrUnexpectedValidators(err error) bool {
return ok
}
//-----------------
// ErrUnknownValidators
// ErrUnknownValidators indicates that some validator set was missing or unknown.
func ErrUnknownValidators(chainID string, height int64) error {
return errors.Wrap(errUnknownValidators{chainID, height}, "")
@@ -86,9 +77,6 @@ func IsErrUnknownValidators(err error) bool {
return ok
}
//-----------------
// ErrEmptyTree
func ErrEmptyTree() error {
return errors.Wrap(errEmptyTree{}, "")
}
@@ -97,3 +85,29 @@ func IsErrEmptyTree(err error) bool {
_, ok := errors.Cause(err).(errEmptyTree)
return ok
}
func ErrCommitExpired() error {
return errors.Wrap(errCommitExpired{}, "")
}
func IsErrCommitExpired(err error) bool {
_, ok := errors.Cause(err).(errCommitExpired)
return ok
}
type errValidatorChange struct {
change float64
}
func (e errValidatorChange) Error() string {
return fmt.Sprintf("%f is more than 1/3rd validator change", e.change)
}
func ErrValidatorChange(change float64) error {
return errors.Wrap(errValidatorChange{change: change}, "")
}
func IsErrValidatorChange(err error) bool {
_, ok := errors.Cause(err).(errValidatorChange)
return ok
}

View File

@@ -1,26 +1,25 @@
package lite
package privkeys
import (
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/crypto/secp256k1"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
// privKeys is a helper type for testing.
// PrivKeys is a helper type for testing.
//
// It lets us simulate signing with many keys. The main use case is to create
// a set, and call GenSignedHeader to get properly signed header for testing.
//
// You can set different weights of validators each time you call ToValidators,
// and can optionally extend the validator set later with Extend.
type privKeys []crypto.PrivKey
type PrivKeys []crypto.PrivKey
// genPrivKeys produces an array of private keys to generate commits.
func genPrivKeys(n int) privKeys {
res := make(privKeys, n)
// GenPrivKeys produces an array of private keys to generate commits.
func GenPrivKeys(n int) PrivKeys {
res := make(PrivKeys, n)
for i := range res {
res[i] = ed25519.GenPrivKey()
}
@@ -28,22 +27,22 @@ func genPrivKeys(n int) privKeys {
}
// Change replaces the key at index i.
func (pkz privKeys) Change(i int) privKeys {
res := make(privKeys, len(pkz))
func (pkz PrivKeys) Change(i int) PrivKeys {
res := make(PrivKeys, len(pkz))
copy(res, pkz)
res[i] = ed25519.GenPrivKey()
return res
}
// Extend adds n more keys (to remove, just take a slice).
func (pkz privKeys) Extend(n int) privKeys {
extra := genPrivKeys(n)
func (pkz PrivKeys) Extend(n int) PrivKeys {
extra := GenPrivKeys(n)
return append(pkz, extra...)
}
// GenSecpPrivKeys produces an array of secp256k1 private keys to generate commits.
func GenSecpPrivKeys(n int) privKeys {
res := make(privKeys, n)
func GenSecpPrivKeys(n int) PrivKeys {
res := make(PrivKeys, n)
for i := range res {
res[i] = secp256k1.GenPrivKey()
}
@@ -51,7 +50,7 @@ func GenSecpPrivKeys(n int) privKeys {
}
// ExtendSecp adds n more secp256k1 keys (to remove, just take a slice).
func (pkz privKeys) ExtendSecp(n int) privKeys {
func (pkz PrivKeys) ExtendSecp(n int) PrivKeys {
extra := GenSecpPrivKeys(n)
return append(pkz, extra...)
}
@@ -60,7 +59,7 @@ func (pkz privKeys) ExtendSecp(n int) privKeys {
// The first key has weight `init` and it increases by `inc` every step
// so we can have all the same weight, or a simple linear distribution
// (should be enough for testing).
func (pkz privKeys) ToValidators(init, inc int64) *types.ValidatorSet {
func (pkz PrivKeys) ToValidators(init, inc int64) *types.ValidatorSet {
res := make([]*types.Validator, len(pkz))
for i, k := range pkz {
res[i] = types.NewValidator(k.PubKey(), init+int64(i)*inc)
@@ -69,7 +68,7 @@ func (pkz privKeys) ToValidators(init, inc int64) *types.ValidatorSet {
}
// signHeader properly signs the header with all keys from first to last exclusive.
func (pkz privKeys) signHeader(header *types.Header, first, last int) *types.Commit {
func (pkz PrivKeys) signHeader(header *types.Header, first, last int) *types.Commit {
commitSigs := make([]*types.CommitSig, len(pkz))
// We need this list to keep the ordering.
@@ -84,6 +83,18 @@ func (pkz privKeys) signHeader(header *types.Header, first, last int) *types.Com
return types.NewCommit(blockID, commitSigs)
}
// GenSignedHeader calls genHeader and signHeader and combines them into a SignedHeader.
func (pkz PrivKeys) GenSignedHeader(chainID string, height int64, txs types.Txs,
valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) types.SignedHeader {
header := genHeader(chainID, height, txs, valset, nextValset, appHash, consHash, resHash)
check := types.SignedHeader{
Header: header,
Commit: pkz.signHeader(header, first, last),
}
return check
}
func makeVote(header *types.Header, valset *types.ValidatorSet, key crypto.PrivKey) *types.Vote {
addr := key.PubKey().Address()
idx, _ := valset.GetByAddress(addr)
@@ -127,27 +138,3 @@ func genHeader(chainID string, height int64, txs types.Txs,
LastResultsHash: resHash,
}
}
// GenSignedHeader calls genHeader and signHeader and combines them into a SignedHeader.
func (pkz privKeys) GenSignedHeader(chainID string, height int64, txs types.Txs,
valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) types.SignedHeader {
header := genHeader(chainID, height, txs, valset, nextValset, appHash, consHash, resHash)
check := types.SignedHeader{
Header: header,
Commit: pkz.signHeader(header, first, last),
}
return check
}
// GenFullCommit calls genHeader and signHeader and combines them into a FullCommit.
func (pkz privKeys) GenFullCommit(chainID string, height int64, txs types.Txs,
valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) FullCommit {
header := genHeader(chainID, height, txs, valset, nextValset, appHash, consHash, resHash)
commit := types.SignedHeader{
Header: header,
Commit: pkz.signHeader(header, first, last),
}
return NewFullCommit(commit, valset, nextValset)
}

View File

@@ -6,17 +6,16 @@ import (
"github.com/tendermint/tendermint/types"
)
var _ PersistentProvider = (*multiProvider)(nil)
// multiProvider allows you to place one or more caches in front of a source
// Provider. It runs through them in order until a match is found.
// Provider. It runs through them in order until a match is found.
type multiProvider struct {
logger log.Logger
providers []PersistentProvider
logger log.Logger
}
// NewMultiProvider returns a new provider which wraps multiple other providers.
func NewMultiProvider(providers ...PersistentProvider) *multiProvider {
func NewMultiProvider(providers ...PersistentProvider) PersistentProvider {
return &multiProvider{
logger: log.NewNopLogger(),
providers: providers,
@@ -47,8 +46,8 @@ func (mc *multiProvider) SaveFullCommit(fc FullCommit) (err error) {
// Returns the first error encountered.
func (mc *multiProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (fc FullCommit, err error) {
for _, p := range mc.providers {
var fc_ FullCommit
fc_, err = p.LatestFullCommit(chainID, minHeight, maxHeight)
var pfc FullCommit
pfc, err = p.LatestFullCommit(chainID, minHeight, maxHeight)
if lerr.IsErrCommitNotFound(err) {
err = nil
continue
@@ -56,18 +55,20 @@ func (mc *multiProvider) LatestFullCommit(chainID string, minHeight, maxHeight i
return
}
if fc == (FullCommit{}) {
fc = fc_
} else if fc_.Height() > fc.Height() {
fc = fc_
fc = pfc
} else if pfc.Height() > fc.Height() {
fc = pfc
}
if fc.Height() == maxHeight {
return
}
}
if fc == (FullCommit{}) {
err = lerr.ErrCommitNotFound()
return
}
return
}
@@ -76,10 +77,11 @@ func (mc *multiProvider) LatestFullCommit(chainID string, minHeight, maxHeight i
func (mc *multiProvider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) {
for _, p := range mc.providers {
valset, err = p.ValidatorSet(chainID, height)
if err == nil {
// TODO Log unexpected types of errors.
return valset, nil
if lerr.IsErrUnknownValidators(err) {
err = nil
continue
}
return
}
return nil, lerr.ErrUnknownValidators(chainID, height)
}

View File

@@ -6,27 +6,40 @@ import (
)
// Provider provides information for the lite client to sync validators.
// Examples: MemProvider, files.Provider, client.Provider, CacheProvider.
type Provider interface {
// LatestFullCommit returns the latest commit with minHeight <= height <=
// maxHeight.
// If maxHeight is zero, returns the latest where minHeight <= height.
// If maxHeight is greater than the latest height, the latter one should be returned.
LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error)
// Get the valset that corresponds to chainID and height and return.
// Height must be >= 1.
// ValidatorSet returns the valset that corresponds to chainID and height.
// height must be >= 1.
ValidatorSet(chainID string, height int64) (*types.ValidatorSet, error)
// Set a logger.
// SetLogger sets a logger.
SetLogger(logger log.Logger)
}
// A provider that can also persist new information.
// Examples: MemProvider, files.Provider, CacheProvider.
// PersistentProvider is a provider that can also persist new information.
type PersistentProvider interface {
Provider
// SaveFullCommit saves a FullCommit (without verification).
SaveFullCommit(fc FullCommit) error
}
// UpdatingProvider is a provider that can update itself w/ more recent commit
// data.
type UpdatingProvider interface {
Provider
// Update internal information by fetching information somehow.
// UpdateToHeight will block until the request is complete, or returns an
// error if the request cannot complete. Generally, one must call
// UpdateToHeight(h) before LatestFullCommit(_,h,h) will return this height.
//
// NOTE: Behavior with concurrent requests is undefined. To make concurrent
// calls safe, look at the struct `ConcurrentUpdatingProvider`.
UpdateToHeight(chainID string, height int64) error
}

View File

@@ -9,6 +9,7 @@ import (
log "github.com/tendermint/tendermint/libs/log"
lerr "github.com/tendermint/tendermint/lite/errors"
pks "github.com/tendermint/tendermint/lite/internal/privkeys"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-db"
)
@@ -48,7 +49,7 @@ func TestMultiProvider(t *testing.T) {
func checkProvider(t *testing.T, p PersistentProvider, chainID, app string) {
assert, require := assert.New(t), require.New(t)
appHash := []byte(app)
keys := genPrivKeys(5)
keys := pks.GenPrivKeys(5)
count := 10
// Make a bunch of full commits.
@@ -56,7 +57,9 @@ func checkProvider(t *testing.T, p PersistentProvider, chainID, app string) {
for i := 0; i < count; i++ {
vals := keys.ToValidators(10, int64(count/2))
h := int64(20 + 10*i)
fcz[i] = keys.GenFullCommit(chainID, h, nil, vals, vals, appHash, []byte("params"), []byte("results"), 0, 5)
signedHeader := keys.GenSignedHeader(chainID, h, nil, vals, vals, appHash,
[]byte("params"), []byte("results"), 0, 5)
fcz[i] = NewFullCommit(signedHeader, vals, vals)
}
// Check that provider is initially empty.
@@ -113,14 +116,15 @@ func TestMultiLatestFullCommit(t *testing.T) {
chainID := "cache-best-height"
appHash := []byte("01234567")
keys := genPrivKeys(5)
keys := pks.GenPrivKeys(5)
count := 10
// Set a bunch of full commits.
for i := 0; i < count; i++ {
vals := keys.ToValidators(10, int64(count/2))
h := int64(10 * (i + 1))
fc := keys.GenFullCommit(chainID, h, nil, vals, vals, appHash, []byte("params"), []byte("results"), 0, 5)
signedHeader := keys.GenSignedHeader(chainID, h, nil, vals, vals, appHash, []byte("params"), []byte("results"), 0, 5)
fc := NewFullCommit(signedHeader, vals, vals)
err := p2.SaveFullCommit(fc)
require.NoError(err)
}

View File

@@ -54,7 +54,7 @@ func _TestAppProofs(t *testing.T) {
source := certclient.NewProvider(chainID, cl)
seed, err := source.LatestFullCommit(chainID, 1, 1)
require.NoError(err, "%#v", err)
cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators)
cert := lite.NewVerifier(chainID, seed.Height(), seed.Validators)
// Wait for tx confirmation.
done := make(chan int64)
@@ -139,7 +139,7 @@ func TestTxProofs(t *testing.T) {
source := certclient.NewProvider(chainID, cl)
seed, err := source.LatestFullCommit(chainID, brh-2, brh-2)
require.NoError(err, "%#v", err)
cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators)
cert := lite.NewVerifier(chainID, seed.Height(), seed.Validators)
// First let's make sure a bogus transaction hash returns a valid non-existence proof.
key := types.Tx([]byte("bogus")).Hash()

View File

@@ -1,42 +0,0 @@
package proxy
import (
"github.com/pkg/errors"
log "github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/lite"
lclient "github.com/tendermint/tendermint/lite/client"
dbm "github.com/tendermint/tm-db"
)
func NewVerifier(chainID, rootDir string, client lclient.SignStatusClient, logger log.Logger, cacheSize int) (*lite.DynamicVerifier, error) {
logger = logger.With("module", "lite/proxy")
logger.Info("lite/proxy/NewVerifier()...", "chainID", chainID, "rootDir", rootDir, "client", client)
memProvider := lite.NewDBProvider("trusted.mem", dbm.NewMemDB()).SetLimit(cacheSize)
lvlProvider := lite.NewDBProvider("trusted.lvl", dbm.NewDB("trust-base", dbm.GoLevelDBBackend, rootDir))
trust := lite.NewMultiProvider(
memProvider,
lvlProvider,
)
source := lclient.NewProvider(chainID, client)
cert := lite.NewDynamicVerifier(chainID, trust, source)
cert.SetLogger(logger) // Sets logger recursively.
// TODO: Make this more secure, e.g. make it interactive in the console?
_, err := trust.LatestFullCommit(chainID, 1, 1<<63-1)
if err != nil {
logger.Info("lite/proxy/NewVerifier found no trusted full commit, initializing from source from height 1...")
fc, err := source.LatestFullCommit(chainID, 1, 1)
if err != nil {
return nil, errors.Wrap(err, "fetching source full commit @ height 1")
}
err = trust.SaveFullCommit(fc)
if err != nil {
return nil, errors.Wrap(err, "saving full commit to trusted")
}
}
return cert, nil
}

View File

@@ -7,7 +7,7 @@ import (
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/crypto/merkle"
"github.com/tendermint/tendermint/lite"
"github.com/tendermint/tendermint/lite/verifying"
rpcclient "github.com/tendermint/tendermint/rpc/client"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
rpctypes "github.com/tendermint/tendermint/rpc/lib/types"
@@ -19,7 +19,7 @@ var _ rpcclient.Client = Wrapper{}
// provable before passing it along. Allows you to make any rpcclient fully secure.
type Wrapper struct {
rpcclient.Client
cert *lite.DynamicVerifier
cert *verifying.Provider
prt *merkle.ProofRuntime
}
@@ -27,7 +27,7 @@ type Wrapper struct {
// host and return a cryptographically secure rpc client.
//
// If it is wrapping an HTTP rpcclient, it will also wrap the websocket interface
func SecureClient(c rpcclient.Client, cert *lite.DynamicVerifier) Wrapper {
func SecureClient(c rpcclient.Client, cert *verifying.Provider) Wrapper {
prt := defaultProofRuntime()
wrap := Wrapper{c, cert, prt}
// TODO: no longer possible as no more such interface exposed....

View File

@@ -1,13 +0,0 @@
package lite
import (
"github.com/tendermint/tendermint/types"
)
// Verifier checks the votes to make sure the block really is signed properly.
// Verifier must know the current or recent set of validitors by some other
// means.
type Verifier interface {
Verify(sheader types.SignedHeader) error
ChainID() string
}

View File

@@ -10,55 +10,48 @@ import (
"github.com/tendermint/tendermint/types"
)
var _ Verifier = (*BaseVerifier)(nil)
// BaseVerifier lets us check the validity of SignedHeaders at height or
// later, requiring sufficient votes (> 2/3) from the given valset.
// To verify blocks produced by a blockchain with mutable validator sets,
// use the DynamicVerifier.
// TODO: Handle unbonding time.
type BaseVerifier struct {
// verifier lets us check the validity of SignedHeaders at the given height or later,
// requiring sufficient votes (> 2/3) from the given valset. To verify blocks
// produced by a blockchain with mutable validator sets, use the
// DynamicVerifier.
type verifier struct {
chainID string
height int64
valset *types.ValidatorSet
}
// NewBaseVerifier returns a new Verifier initialized with a validator set at
// NewVerifier returns a new Verifier initialized with a validator set at
// some height.
func NewBaseVerifier(chainID string, height int64, valset *types.ValidatorSet) *BaseVerifier {
func NewVerifier(chainID string, height int64, valset *types.ValidatorSet) *verifier {
if valset.IsNilOrEmpty() {
panic("NewBaseVerifier requires a valid valset")
panic("NewVerifier requires a valid valset")
}
return &BaseVerifier{
return &verifier{
chainID: chainID,
height: height,
valset: valset,
}
}
// Implements Verifier.
func (bv *BaseVerifier) ChainID() string {
func (bv *verifier) ChainID() string {
return bv.chainID
}
// Implements Verifier.
func (bv *BaseVerifier) Verify(signedHeader types.SignedHeader) error {
func (bv *verifier) Verify(signedHeader types.SignedHeader) error {
// We can't verify commits for a different chain.
if signedHeader.ChainID != bv.chainID {
return cmn.NewError("BaseVerifier chainID is %v, cannot verify chainID %v",
return cmn.NewError("verifier chainID is %v, cannot verify chainID %v",
bv.chainID, signedHeader.ChainID)
}
// We can't verify commits older than bv.height.
if signedHeader.Height < bv.height {
return cmn.NewError("BaseVerifier height is %v, cannot verify height %v",
return cmn.NewError("verifier height is %v, cannot verify height %v",
bv.height, signedHeader.Height)
}
// We can't verify with the wrong validator set.
if !bytes.Equal(signedHeader.ValidatorsHash,
bv.valset.Hash()) {
if !bytes.Equal(signedHeader.ValidatorsHash, bv.valset.Hash()) {
return lerr.ErrUnexpectedValidators(signedHeader.ValidatorsHash, bv.valset.Hash())
}

View File

@@ -6,21 +6,22 @@ import (
"github.com/stretchr/testify/assert"
lerr "github.com/tendermint/tendermint/lite/errors"
pks "github.com/tendermint/tendermint/lite/internal/privkeys"
"github.com/tendermint/tendermint/types"
)
func TestBaseCert(t *testing.T) {
func TestVerifier(t *testing.T) {
assert := assert.New(t)
keys := genPrivKeys(4)
keys := pks.GenPrivKeys(4)
// 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do!
vals := keys.ToValidators(20, 10)
// and a Verifier based on our known set
chainID := "test-static"
cert := NewBaseVerifier(chainID, 2, vals)
cert := NewVerifier(chainID, 2, vals)
cases := []struct {
keys privKeys
keys pks.PrivKeys
vals *types.ValidatorSet
height int64
first, last int // who actually signs

533
lite/verifying/provider.go Normal file
View File

@@ -0,0 +1,533 @@
package verifying
import (
"bytes"
"fmt"
"math"
"sync"
"time"
"github.com/pkg/errors"
log "github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/lite"
lclient "github.com/tendermint/tendermint/lite/client"
lerr "github.com/tendermint/tendermint/lite/errors"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-db"
)
const (
loggerPath = "lite"
memDBFile = "trusted.mem"
lvlDBFile = "trusted.lvl"
dbName = "trust-base"
sizeOfPendingMap = 1024
)
// TrustOptions are the trust parameters needed for when a new light client
// connects to the network or when a light client that has been offline for
// longer than the unbonding period connects to the network.
//
// The expectation is the user will get this information from a trusted source
// like a validator, a friend, or a secure website. A more user friendly
// solution with trust tradeoffs is that we establish an https based protocol
// with a default end point that populates this information. Also an on-chain
// registry of roots-of-trust (e.g. on the Cosmos Hub) seems likely in the
// future.
type TrustOptions struct {
// Required: only trust commits up to this old.
// Should be equal to the unbonding period minus a configurable evidence
// submission synchrony bound.
TrustPeriod time.Duration `json:"trust-period"`
// Option 1: TrustHeight and TrustHash can both be provided
// to force the trusting of a particular height and hash.
// If the latest trusted height/hash is more recent, then this option is
// ignored.
TrustHeight int64 `json:"trust-height"`
TrustHash []byte `json:"trust-hash"`
// Option 2: Callback can be set to implement a confirmation
// step if the trust store is uninitialized, or expired.
Callback func(height int64, hash []byte) error
}
// HeightAndHashPresent returns true if TrustHeight and TrustHash are present.
func (opts TrustOptions) HeightAndHashPresent() bool {
return opts.TrustHeight > 0 && len(opts.TrustHash) > 0
}
// Provider implements a persistent caching Provider that auto-validates. It
// uses a "source" Provider to obtain the needed FullCommits to securely sync
// with validator set changes. It stores properly validated data on the
// "trusted" local system using a "trusted" Provider.
//
// NOTE:
// - This Provider can only work with one chainID, provided upon
// instantiation;
// - For concurrent usage, use ConcurrentProvider.
type Provider struct {
chainID string
trustPeriod time.Duration // see TrustOptions above
now nowFn
height int64
logger log.Logger
// Already validated, stored locally
trusted lite.PersistentProvider
// New info, like a node rpc, or other import method.
source lite.Provider
// pending map to synchronize concurrent verification requests
mtx sync.Mutex
pendingVerifications map[int64]chan struct{}
}
var _ lite.UpdatingProvider = (*Provider)(nil)
type nowFn func() time.Time
// NewProvider creates a Provider.
//
// NOTE: If you retain the resulting struct in memory for a long time, usage of
// it may eventually error, but immediate usage should not error like that, so
// that e.g. cli usage never errors unexpectedly.
func NewProvider(chainID, rootDir string, client lclient.SignStatusClient,
logger log.Logger, cacheSize int, options TrustOptions) (*Provider, error) {
vp := initProvider(chainID, rootDir, client, logger, cacheSize, options)
// Get the latest source commit, or the one provided in options.
trustCommit, err := getTrustedCommit(vp.logger, client, options)
if err != nil {
return nil, err
}
err = vp.fillValsetAndSaveFC(trustCommit, nil, nil)
if err != nil {
return nil, err
}
// sanity check
// FIXME: Can't it happen that the local clock is a bit off and the
// trustCommit.Time is a few seconds in the future?
now := time.Now()
if now.Sub(trustCommit.Time) <= 0 {
panic(fmt.Sprintf("impossible time %v vs %v", now, trustCommit.Time))
}
// Otherwise we're syncing within the unbonding period.
// NOTE: There is a duplication of fetching this latest commit (since
// UpdateToHeight() will fetch it again, and latestCommit isn't used), but
// it's only once upon initialization so it's not a big deal.
if options.HeightAndHashPresent() {
// Fetch latest commit (nil means latest height).
latestCommit, err := client.Commit(nil)
if err != nil {
return nil, err
}
err = vp.UpdateToHeight(chainID, latestCommit.SignedHeader.Height)
if err != nil {
return nil, err
}
}
return vp, nil
}
func initProvider(chainID, rootDir string, client lclient.SignStatusClient,
logger log.Logger, cacheSize int, options TrustOptions) *Provider {
// Validate TrustOptions.
if options.TrustPeriod == 0 {
panic("Provider must have non-zero trust period")
}
// Init logger.
logger = logger.With("module", loggerPath)
logger.Info("lite/verifying/NewProvider", "chainID", chainID, "rootDir", rootDir, "client", client)
// The trusted Provider should be a DBProvider.
trusted := lite.NewMultiProvider(
lite.NewDBProvider(memDBFile, dbm.NewMemDB()).SetLimit(cacheSize),
lite.NewDBProvider(lvlDBFile, dbm.NewDB(dbName, dbm.GoLevelDBBackend, rootDir)),
)
trusted.SetLogger(logger)
// The source Provider should be a client.HTTPProvider.
source := lclient.NewProvider(chainID, client)
source.SetLogger(logger)
return &Provider{
chainID: chainID,
trustPeriod: options.TrustPeriod,
trusted: trusted,
source: source,
logger: logger,
pendingVerifications: make(map[int64]chan struct{}, sizeOfPendingMap),
}
}
// getTrustedCommit returns a commit trusted with weak subjectivity. It either:
// 1. Fetches a commit at height provided in options and ensures the specified
// commit is within the trust period of latest block
// 2. Trusts the remote node and gets the latest commit
// 3. Returns an error if the height provided in trust option is too old to
// sync to latest.
func getTrustedCommit(logger log.Logger, client lclient.SignStatusClient, options TrustOptions) (types.SignedHeader, error) {
// Get the latest commit always.
latestCommit, err := client.Commit(nil)
if err != nil {
return types.SignedHeader{}, err
}
// If the user has set a root of trust, confirm it then update to newest.
if options.HeightAndHashPresent() {
trustCommit, err := client.Commit(&options.TrustHeight)
if err != nil {
return types.SignedHeader{}, err
}
if latestCommit.Time.Sub(trustCommit.Time) > options.TrustPeriod {
return types.SignedHeader{},
errors.New("your trusted block height is older than the trust period from latest block")
}
signedHeader := trustCommit.SignedHeader
if !bytes.Equal(signedHeader.Hash(), options.TrustHash) {
return types.SignedHeader{},
fmt.Errorf("WARNING: expected hash %X, but got %X", options.TrustHash, signedHeader.Hash())
}
return signedHeader, nil
}
signedHeader := latestCommit.SignedHeader
// NOTE: This should really belong in the callback.
// WARN THE USER IN ALL CAPS THAT THE LITE CLIENT IS NEW, AND THAT WE WILL
// SYNC TO AND VERIFY LATEST COMMIT.
logger.Info("WARNING: trusting source at height %d and hash %X...\n", signedHeader.Height, signedHeader.Hash())
if options.Callback != nil {
err := options.Callback(signedHeader.Height, signedHeader.Hash())
if err != nil {
return types.SignedHeader{}, err
}
}
return signedHeader, nil
}
func (vp *Provider) Verify(signedHeader types.SignedHeader) error {
if signedHeader.ChainID != vp.chainID {
return fmt.Errorf("expected chainID %s, got %s", vp.chainID, signedHeader.ChainID)
}
valSet, err := vp.ValidatorSet(signedHeader.ChainID, signedHeader.Height)
if err != nil {
return err
}
if signedHeader.Height < vp.height {
return fmt.Errorf("expected height %d, got %d", vp.height, signedHeader.Height)
}
if !bytes.Equal(signedHeader.ValidatorsHash, valSet.Hash()) {
return lerr.ErrUnexpectedValidators(signedHeader.ValidatorsHash, valSet.Hash())
}
err = signedHeader.ValidateBasic(vp.chainID)
if err != nil {
return err
}
// Check commit signatures.
err = valSet.VerifyCommit(vp.chainID, signedHeader.Commit.BlockID, signedHeader.Height, signedHeader.Commit)
if err != nil {
return err
}
return nil
}
func (vp *Provider) SetLogger(logger log.Logger) {
vp.logger = logger
vp.trusted.SetLogger(logger)
vp.source.SetLogger(logger)
}
func (vp *Provider) ChainID() string { return vp.chainID }
// UpdateToHeight ... stores the full commit (SignedHeader + Validators) in
// vp.trusted.
func (vp *Provider) UpdateToHeight(chainID string, height int64) error {
_, err := vp.trusted.LatestFullCommit(vp.chainID, height, height)
// If we alreedy have the commit, just return nil.
if err == nil {
return nil
} else if !lerr.IsErrCommitNotFound(err) {
// Return error if it is not CommitNotFound error.
vp.logger.Error("Encountered unknown error while loading full commit", "height", height, "err", err)
return err
}
// Fetch trusted FC at exactly height, while updating trust when possible.
_, err = vp.fetchAndVerifyToHeightBisecting(height)
if err != nil {
return err
}
vp.height = height
// Good!
return nil
}
// If valset or nextValset are nil, fetches them.
// Then validates full commit, then saves it.
func (vp *Provider) fillValsetAndSaveFC(signedHeader types.SignedHeader,
valset, nextValset *types.ValidatorSet) (err error) {
// If there is no valset passed, fetch it
if valset == nil {
valset, err = vp.source.ValidatorSet(vp.chainID, signedHeader.Height)
if err != nil {
return errors.Wrap(err, "fetching the valset")
}
}
// If there is no nextvalset passed, fetch it
if nextValset == nil {
// TODO: Don't loop forever, just do it 10 times
for {
// fetch block at signedHeader.Height+1
nextValset, err = vp.source.ValidatorSet(vp.chainID, signedHeader.Height+1)
if lerr.IsErrUnknownValidators(err) {
// try again until we get it.
vp.logger.Debug("fetching valset for height %d...\n", signedHeader.Height+1)
continue
} else if err != nil {
return errors.Wrap(err, "fetching the next valset")
} else if nextValset != nil {
break
}
}
}
// Create filled FullCommit.
fc := lite.FullCommit{
SignedHeader: signedHeader,
Validators: valset,
NextValidators: nextValset,
}
// Validate the full commit. This checks the cryptographic
// signatures of Commit against Validators.
if err := fc.ValidateFull(vp.chainID); err != nil {
return errors.Wrap(err, "verifying validators from source")
}
// Trust it.
err = vp.trusted.SaveFullCommit(fc)
if err != nil {
return errors.Wrap(err, "saving full commit")
}
return nil
}
// verifyAndSave will verify if this is a valid source full commit given the
// best match trusted full commit, and persist to vp.trusted.
//
// Returns ErrTooMuchChange when >2/3 of trustedFC did not sign newFC.
// Returns ErrCommitExpired when trustedFC is too old.
// Panics if trustedFC.Height() >= newFC.Height().
func (vp *Provider) verifyAndSave(trustedFC, newFC lite.FullCommit) error {
// Shouldn't have trusted commits before the new commit height.
if trustedFC.Height() >= newFC.Height() {
panic("should not happen")
}
// Check that the latest commit isn't beyond the vp.trustPeriod.
if vp.now().Sub(trustedFC.SignedHeader.Time) > vp.trustPeriod {
return lerr.ErrCommitExpired()
}
// Validate the new commit in terms of validator set of last trusted commit.
if err := trustedFC.NextValidators.VerifyCommit(vp.chainID, newFC.SignedHeader.Commit.BlockID, newFC.SignedHeader.Height, newFC.SignedHeader.Commit); err != nil {
return err
}
// Locally validate the full commit before we can trust it.
if newFC.Height() >= trustedFC.Height()+1 {
err := newFC.ValidateFull(vp.chainID)
if err != nil {
return err
}
}
change := compareVotingPowers(trustedFC, newFC)
if change > float64(1/3) {
return lerr.ErrValidatorChange(change)
}
return vp.trusted.SaveFullCommit(newFC)
}
func compareVotingPowers(trustedFC, newFC lite.FullCommit) float64 {
var diffAccumulator float64
for _, val := range newFC.Validators.Validators {
newPowerRatio := float64(val.VotingPower) / float64(newFC.Validators.TotalVotingPower())
_, tval := trustedFC.NextValidators.GetByAddress(val.Address)
oldPowerRatio := float64(tval.VotingPower) / float64(trustedFC.NextValidators.TotalVotingPower())
diffAccumulator += math.Abs(newPowerRatio - oldPowerRatio)
}
return diffAccumulator
}
func (vp *Provider) fetchAndVerifyToHeightLinear(h int64) (lite.FullCommit, error) {
// Fetch latest full commit from source.
sourceFC, err := vp.source.LatestFullCommit(vp.chainID, h, h)
if err != nil {
return lite.FullCommit{}, err
}
// If sourceFC.Height() != h, we can't do it.
if sourceFC.Height() != h {
return lite.FullCommit{}, lerr.ErrCommitNotFound()
}
// Validate the full commit. This checks the cryptographic
// signatures of Commit against Validators.
if err := sourceFC.ValidateFull(vp.chainID); err != nil {
return lite.FullCommit{}, err
}
if h == sourceFC.Height()+1 {
trustedFC, err := vp.trusted.LatestFullCommit(vp.chainID, 1, h)
if err != nil {
return lite.FullCommit{}, err
}
err = vp.verifyAndSave(trustedFC, sourceFC)
if err != nil {
return lite.FullCommit{}, err
}
return sourceFC, nil
}
// Verify latest FullCommit against trusted FullCommits
// Use a loop rather than recursion to avoid stack overflows.
for {
// Fetch latest full commit from trusted.
trustedFC, err := vp.trusted.LatestFullCommit(vp.chainID, 1, h)
if err != nil {
return lite.FullCommit{}, err
}
// We have nothing to do.
if trustedFC.Height() == h {
return trustedFC, nil
}
sourceFC, err = vp.source.LatestFullCommit(vp.chainID, trustedFC.Height()+1, trustedFC.Height()+1)
if err != nil {
return lite.FullCommit{}, err
}
err = vp.verifyAndSave(trustedFC, sourceFC)
if err != nil {
return lite.FullCommit{}, err
}
}
}
// fetchAndVerifyToHeightBiscecting will use divide-and-conquer to find a path to h.
// Returns nil error iff we successfully verify for height h, using repeated
// applications of bisection if necessary.
// Along the way, if a recent trust is used to verify a more recent header, the
// more recent header becomes trusted.
//
// Returns ErrCommitNotFound if source Provider doesn't have the commit for h.
func (vp *Provider) fetchAndVerifyToHeightBisecting(h int64) (lite.FullCommit, error) {
// Fetch latest full commit from source.
sourceFC, err := vp.source.LatestFullCommit(vp.chainID, h, h)
if err != nil {
return lite.FullCommit{}, err
}
// If sourceFC.Height() != h, we can't do it.
if sourceFC.Height() != h {
return lite.FullCommit{}, lerr.ErrCommitNotFound()
}
// Validate the full commit. This checks the cryptographic
// signatures of Commit against Validators.
if err := sourceFC.ValidateFull(vp.chainID); err != nil {
return lite.FullCommit{}, err
}
// Verify latest FullCommit against trusted FullCommits
// Use a loop rather than recursion to avoid stack overflows.
for {
// Fetch latest full commit from trusted.
trustedFC, err := vp.trusted.LatestFullCommit(vp.chainID, 1, h)
if err != nil {
return lite.FullCommit{}, err
}
// We have nothing to do.
if trustedFC.Height() == h {
return trustedFC, nil
}
// Update to full commit with checks.
err = vp.verifyAndSave(trustedFC, sourceFC)
// Handle special case when err is ErrTooMuchChange.
if types.IsErrTooMuchChange(err) {
// Divide and conquer.
start, end := trustedFC.Height(), sourceFC.Height()
if !(start < end) {
panic("should not happen")
}
mid := (start + end) / 2
// Recursive call back into fetchAndVerifyToHeight. Once you get to an inner
// call that succeeeds, the outer calls will succeed.
_, err = vp.fetchAndVerifyToHeightBisecting(mid)
if err != nil {
return lite.FullCommit{}, err
}
// If we made it to mid, we retry.
continue
} else if err != nil {
return lite.FullCommit{}, err
}
// All good!
return sourceFC, nil
}
}
func (vp *Provider) LastTrustedHeight() int64 {
fc, err := vp.trusted.LatestFullCommit(vp.chainID, 1, 1<<63-1)
if err != nil {
panic("should not happen")
}
return fc.Height()
}
func (vp *Provider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (lite.FullCommit, error) {
return vp.trusted.LatestFullCommit(chainID, minHeight, maxHeight)
}
func (vp *Provider) ValidatorSet(chainID string, height int64) (*types.ValidatorSet, error) {
// XXX try to sync?
return vp.trusted.ValidatorSet(chainID, height)
}

View File

@@ -1,4 +1,4 @@
package lite
package verifying
import (
"fmt"
@@ -9,18 +9,20 @@ import (
"github.com/stretchr/testify/require"
log "github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/lite"
pks "github.com/tendermint/tendermint/lite/internal/privkeys"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-db"
)
func TestInquirerValidPath(t *testing.T) {
assert, require := assert.New(t), require.New(t)
trust := NewDBProvider("trust", dbm.NewMemDB())
source := NewDBProvider("source", dbm.NewMemDB())
func TestProviderValidPath(t *testing.T) {
require := require.New(t)
trust := lite.NewDBProvider("trust", dbm.NewMemDB())
source := lite.NewDBProvider("source", dbm.NewMemDB())
// Set up the validators to generate test blocks.
var vote int64 = 10
keys := genPrivKeys(5)
keys := pks.GenPrivKeys(5)
nkeys := keys.Extend(1)
// Construct a bunch of commits, each with one more height than the last.
@@ -28,16 +30,17 @@ func TestInquirerValidPath(t *testing.T) {
consHash := []byte("params")
resHash := []byte("results")
count := 50
fcz := make([]FullCommit, count)
fcz := make([]lite.FullCommit, count)
for i := 0; i < count; i++ {
vals := keys.ToValidators(vote, 0)
nextVals := nkeys.ToValidators(vote, 0)
h := int64(1 + i)
appHash := []byte(fmt.Sprintf("h=%d", h))
fcz[i] = keys.GenFullCommit(
signedHeader := keys.GenSignedHeader(
chainID, h, nil,
vals, nextVals,
appHash, consHash, resHash, 0, len(keys))
fcz[i] = lite.NewFullCommit(signedHeader, vals, nextVals)
// Extend the keys by 1 each time.
keys = nkeys
nkeys = nkeys.Extend(1)
@@ -45,49 +48,55 @@ func TestInquirerValidPath(t *testing.T) {
// Initialize a Verifier with the initial state.
err := trust.SaveFullCommit(fcz[0])
require.Nil(err)
cert := NewDynamicVerifier(chainID, trust, source)
cert.SetLogger(log.TestingLogger())
require.NoError(err)
vp, _ := NewProvider(chainID, trust, source)
vp.SetLogger(log.TestingLogger())
// This should fail validation:
sh := fcz[count-1].SignedHeader
err = cert.Verify(sh)
require.NotNil(err)
// The latest commit is the first one.
fc, err := vp.LatestFullCommit(chainID, 0, fcz[count-1].SignedHeader.Height)
require.NoError(err)
require.NoError(fc.ValidateFull(chainID))
require.Equal(fcz[0].SignedHeader, fc.SignedHeader)
// Adding a few commits in the middle should be insufficient.
// The latest commit is still the first one.
for i := 10; i < 13; i++ {
err := source.SaveFullCommit(fcz[i])
require.Nil(err)
require.NoError(err)
}
err = cert.Verify(sh)
assert.NotNil(err)
fc, err = vp.LatestFullCommit(chainID, 0, fcz[count-1].SignedHeader.Height)
require.NoError(err)
require.NoError(fc.ValidateFull(chainID))
require.Equal(fcz[0].SignedHeader, fc.SignedHeader)
// With more info, we succeed.
for i := 0; i < count; i++ {
err := source.SaveFullCommit(fcz[i])
require.Nil(err)
require.NoError(err)
}
err = cert.Verify(sh)
assert.Nil(err, "%+v", err)
fc, err = vp.LatestFullCommit(chainID, 0, fcz[count-1].SignedHeader.Height)
require.NoError(err)
require.NoError(fc.ValidateFull(chainID))
require.Equal(fcz[count-1].SignedHeader, fc.SignedHeader)
}
func TestDynamicVerify(t *testing.T) {
trust := NewDBProvider("trust", dbm.NewMemDB())
source := NewDBProvider("source", dbm.NewMemDB())
func TestProviderDynamicVerification(t *testing.T) {
trust := lite.NewDBProvider("trust", dbm.NewMemDB())
source := lite.NewDBProvider("source", dbm.NewMemDB())
// 10 commits with one valset, 1 to change,
// 10 commits with the next one
n1, n2 := 10, 10
nCommits := n1 + n2 + 1
maxHeight := int64(nCommits)
fcz := make([]FullCommit, nCommits)
fcz := make([]lite.FullCommit, nCommits)
// gen the 2 val sets
chainID := "dynamic-verifier"
power := int64(10)
keys1 := genPrivKeys(5)
keys1 := pks.GenPrivKeys(5)
vals1 := keys1.ToValidators(power, 0)
keys2 := genPrivKeys(5)
keys2 := pks.GenPrivKeys(5)
vals2 := keys2.ToValidators(power, 0)
// make some commits with the first
@@ -110,56 +119,55 @@ func TestDynamicVerify(t *testing.T) {
// Initialize a Verifier with the initial state.
err := trust.SaveFullCommit(fcz[0])
require.Nil(t, err)
ver := NewDynamicVerifier(chainID, trust, source)
ver.SetLogger(log.TestingLogger())
require.NoError(t, err)
vp, _ := NewProvider(chainID, trust, source)
vp.SetLogger(log.TestingLogger())
// fetch the latest from the source
latestFC, err := source.LatestFullCommit(chainID, 1, maxHeight)
require.NoError(t, err)
// try to update to the latest
err = ver.Verify(latestFC.SignedHeader)
require.NoError(t, err)
require.NoError(latestFC.ValidateFull(chainID))
require.Equal(fcz[nCommits-1].SignedHeader, latestFC.SignedHeader)
}
func makeFullCommit(height int64, keys privKeys, vals, nextVals *types.ValidatorSet, chainID string) FullCommit {
height += 1
func makeFullCommit(height int64, keys lite.PrivKeys, vals, nextVals *types.ValidatorSet, chainID string) lite.FullCommit {
height++
consHash := []byte("special-params")
appHash := []byte(fmt.Sprintf("h=%d", height))
resHash := []byte(fmt.Sprintf("res=%d", height))
return keys.GenFullCommit(
signedHeader := keys.GenSignedHeader(
chainID, height, nil,
vals, nextVals,
appHash, consHash, resHash, 0, len(keys))
return lite.NewFullCommit(signedHeader, vals, nextVals)
}
func TestInquirerVerifyHistorical(t *testing.T) {
func TestVerifingProviderHistorical(t *testing.T) {
assert, require := assert.New(t), require.New(t)
trust := NewDBProvider("trust", dbm.NewMemDB())
source := NewDBProvider("source", dbm.NewMemDB())
trust := lite.NewDBProvider("trust", dbm.NewMemDB())
source := lite.NewDBProvider("source", dbm.NewMemDB())
// Set up the validators to generate test blocks.
var vote int64 = 10
keys := genPrivKeys(5)
keys := pks.GenPrivKeys(5)
nkeys := keys.Extend(1)
// Construct a bunch of commits, each with one more height than the last.
chainID := "inquiry-test"
count := 10
consHash := []byte("special-params")
fcz := make([]FullCommit, count)
fcz := make([]lite.FullCommit, count)
for i := 0; i < count; i++ {
vals := keys.ToValidators(vote, 0)
nextVals := nkeys.ToValidators(vote, 0)
h := int64(1 + i)
appHash := []byte(fmt.Sprintf("h=%d", h))
resHash := []byte(fmt.Sprintf("res=%d", h))
fcz[i] = keys.GenFullCommit(
signedHeader := keys.GenSignedHeader(
chainID, h, nil,
vals, nextVals,
appHash, consHash, resHash, 0, len(keys))
fcz[i] = lite.NewFullCommit(signedHeader, vals, nextVals)
// Extend the keys by 1 each time.
keys = nkeys
nkeys = nkeys.Extend(1)
@@ -167,9 +175,9 @@ func TestInquirerVerifyHistorical(t *testing.T) {
// Initialize a Verifier with the initial state.
err := trust.SaveFullCommit(fcz[0])
require.Nil(err)
cert := NewDynamicVerifier(chainID, trust, source)
cert.SetLogger(log.TestingLogger())
require.NoError(err)
vp, _ := NewProvider(chainID, trust, source)
vp.SetLogger(log.TestingLogger())
// Store a few full commits as trust.
for _, i := range []int{2, 5} {
@@ -177,72 +185,71 @@ func TestInquirerVerifyHistorical(t *testing.T) {
}
// See if we can jump forward using trusted full commits.
// Souce doesn't have fcz[9] so cert.LastTrustedHeight wont' change.
// Souce doesn't have fcz[9] so vp.LastTrustedHeight wont' change.
err = source.SaveFullCommit(fcz[7])
require.Nil(err, "%+v", err)
sh := fcz[8].SignedHeader
err = cert.Verify(sh)
require.Nil(err, "%+v", err)
assert.Equal(fcz[7].Height(), cert.LastTrustedHeight())
require.NoError(err, "%+v", err)
assert.Equal(fcz[7].Height(), vp.LastTrustedHeight())
fc_, err := trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height())
require.NotNil(err, "%+v", err)
assert.Equal(fc_, (FullCommit{}))
require.Error(err, "%+v", err)
assert.Equal((lite.FullCommit{}), fc_)
// With fcz[9] Verify will update last trusted height.
err = source.SaveFullCommit(fcz[9])
require.Nil(err, "%+v", err)
sh = fcz[8].SignedHeader
err = cert.Verify(sh)
require.Nil(err, "%+v", err)
assert.Equal(fcz[8].Height(), cert.LastTrustedHeight())
require.NoError(err, "%+v", err)
assert.Equal(fcz[8].Height(), vp.LastTrustedHeight())
fc_, err = trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height())
require.Nil(err, "%+v", err)
assert.Equal(fc_.Height(), fcz[8].Height())
require.NoError(err, "%+v", err)
assert.Equal(fcz[8].Height(), fc_.Height())
// Add access to all full commits via untrusted source.
for i := 0; i < count; i++ {
err := source.SaveFullCommit(fcz[i])
require.Nil(err)
require.NoError(err)
}
// Try to check an unknown seed in the past.
sh = fcz[3].SignedHeader
err = cert.Verify(sh)
require.Nil(err, "%+v", err)
assert.Equal(fcz[8].Height(), cert.LastTrustedHeight())
// Try to fetch an unknown commit from the past.
fc_, err = trust.LatestFullCommit(chainID, fcz[2].Height(), fcz[3].Height())
require.NoError(err, "%+v", err)
assert.Equal(fcz[2].Height(), fc_.Height())
assert.Equal(fcz[8].Height(), vp.LastTrustedHeight())
// TODO This should work for as long as the trust period hasn't passed for
// fcz[2]. Write a test that tries to retroactively fetchees fcz[3] from
// source. Initially it should fail since source doesn't have it, but it
// should succeed once source is provided it.
// Jump all the way forward again.
sh = fcz[count-1].SignedHeader
err = cert.Verify(sh)
require.Nil(err, "%+v", err)
assert.Equal(fcz[9].Height(), cert.LastTrustedHeight())
// Try to fetch the latest known commit.
fc_, err = trust.LatestFullCommit(chainID, 0, fcz[9].Height())
require.NoError(err, "%+v", err)
assert.Equal(fcz[9].Height(), fc_.Height())
assert.Equal(fcz[9].Height(), vp.LastTrustedHeight())
}
func TestConcurrencyInquirerVerify(t *testing.T) {
func TestConcurrentProvider(t *testing.T) {
_, require := assert.New(t), require.New(t)
trust := NewDBProvider("trust", dbm.NewMemDB()).SetLimit(10)
source := NewDBProvider("source", dbm.NewMemDB())
trust := lite.NewDBProvider("trust", dbm.NewMemDB()).SetLimit(10)
source := lite.NewDBProvider("source", dbm.NewMemDB())
// Set up the validators to generate test blocks.
var vote int64 = 10
keys := genPrivKeys(5)
keys := pks.GenPrivKeys(5)
nkeys := keys.Extend(1)
// Construct a bunch of commits, each with one more height than the last.
chainID := "inquiry-test"
count := 10
consHash := []byte("special-params")
fcz := make([]FullCommit, count)
fcz := make([]lite.FullCommit, count)
for i := 0; i < count; i++ {
vals := keys.ToValidators(vote, 0)
nextVals := nkeys.ToValidators(vote, 0)
h := int64(1 + i)
appHash := []byte(fmt.Sprintf("h=%d", h))
resHash := []byte(fmt.Sprintf("res=%d", h))
fcz[i] = keys.GenFullCommit(
signedHeader := keys.GenSignedHeader(
chainID, h, nil,
vals, nextVals,
appHash, consHash, resHash, 0, len(keys))
fcz[i] = lite.NewFullCommit(signedHeader, vals, nextVals)
// Extend the keys by 1 each time.
keys = nkeys
nkeys = nkeys.Extend(1)
@@ -250,15 +257,16 @@ func TestConcurrencyInquirerVerify(t *testing.T) {
// Initialize a Verifier with the initial state.
err := trust.SaveFullCommit(fcz[0])
require.Nil(err)
cert := NewDynamicVerifier(chainID, trust, source)
cert.SetLogger(log.TestingLogger())
require.NoError(err)
vp, _ := NewProvider(chainID, trust, source)
vp.SetLogger(log.TestingLogger())
cp := lite.NewConcurrentProvider(vp)
err = source.SaveFullCommit(fcz[7])
require.Nil(err, "%+v", err)
err = source.SaveFullCommit(fcz[8])
require.Nil(err, "%+v", err)
sh := fcz[8].SignedHeader
require.NoError(err, "%+v", err)
// sh := fcz[8].SignedHeader unused
var wg sync.WaitGroup
count = 100
@@ -266,12 +274,12 @@ func TestConcurrencyInquirerVerify(t *testing.T) {
for i := 0; i < count; i++ {
wg.Add(1)
go func(index int) {
errList[index] = cert.Verify(sh)
errList[index] = cp.UpdateToHeight(chainID, fcz[8].SignedHeader.Height)
defer wg.Done()
}(i)
}
wg.Wait()
for _, err := range errList {
require.Nil(err)
require.NoError(err)
}
}

View File

@@ -453,8 +453,6 @@ func (w *WSEvents) UnsubscribeAll(ctx context.Context, subscriber string) error
func (w *WSEvents) redoSubscriptionsAfter(d time.Duration) {
time.Sleep(d)
w.mtx.RLock()
defer w.mtx.RUnlock()
for q := range w.subscriptions {
err := w.ws.Subscribe(context.Background(), q)
if err != nil {

View File

@@ -5,7 +5,7 @@ import (
)
// TODO: better system than "unsafe" prefix
// NOTE: Amino is registered in rpc/core/types/codec.go.
// NOTE: Amino is registered in rpc/core/types/wire.go.
var Routes = map[string]*rpc.RPCFunc{
// subscribe/unsubscribe are reserved for websocket events.
"subscribe": rpc.NewWSRPCFunc(Subscribe, "query"),

View File

@@ -1,14 +0,0 @@
#!/usr/bin/env bash
VERSION=v12.9.0
NODE_FULL=node-${VERSION}-linux-x64
mkdir -p ~/.local/bin
mkdir -p ~/.local/node
wget http://nodejs.org/dist/${VERSION}/${NODE_FULL}.tar.gz -O ~/.local/node/${NODE_FULL}.tar.gz
tar -xzf ~/.local/node/${NODE_FULL}.tar.gz -C ~/.local/node/
ln -s ~/.local/node/${NODE_FULL}/bin/node ~/.local/bin/node
ln -s ~/.local/node/${NODE_FULL}/bin/npm ~/.local/bin/npm
export PATH=~/.local/bin:$PATH
npm i -g dredd
ln -s ~/.local/node/${NODE_FULL}/bin/dredd ~/.local/bin/dredd

View File

@@ -64,4 +64,3 @@ installFromGithub golangci/golangci-lint 7b2421d55194c9dc385eff7720a037aa9244ca3
installFromGithub petermattis/goid b0b1615b78e5ee59739545bb38426383b2cda4c9
installFromGithub sasha-s/go-deadlock d68e2bc52ae3291765881b9056f2c1527f245f1e
go get golang.org/x/tools/cmd/goimports
installFromGithub snikch/goodman 10e37e294daa3c9a90abded60ff9924bafab3888 cmd/goodman

View File

@@ -8,7 +8,7 @@
set -euo pipefail
GITIAN_CACHE_DIRNAME='.gitian-builder-cache'
GO_DEBIAN_RELEASE='1.12.8-1'
GO_DEBIAN_RELEASE='1.12.5-1'
GO_TARBALL="golang-debian-${GO_DEBIAN_RELEASE}.tar.gz"
GO_TARBALL_URL="https://salsa.debian.org/go-team/compiler/golang/-/archive/debian/${GO_DEBIAN_RELEASE}/${GO_TARBALL}"

View File

@@ -23,11 +23,11 @@ remotes:
- "url": "https://github.com/tendermint/tendermint.git"
"dir": "tendermint"
files:
- "golang-debian-1.12.8-1.tar.gz"
- "golang-debian-1.12.5-1.tar.gz"
script: |
set -e -o pipefail
GO_SRC_RELEASE=golang-debian-1.12.8-1
GO_SRC_RELEASE=golang-debian-1.12.5-1
GO_SRC_TARBALL="${GO_SRC_RELEASE}.tar.gz"
# Compile go and configure the environment
export TAR_OPTIONS="--mtime="$REFERENCE_DATE\\\ $REFERENCE_TIME""

View File

@@ -23,11 +23,11 @@ remotes:
- "url": "https://github.com/tendermint/tendermint.git"
"dir": "tendermint"
files:
- "golang-debian-1.12.8-1.tar.gz"
- "golang-debian-1.12.5-1.tar.gz"
script: |
set -e -o pipefail
GO_SRC_RELEASE=golang-debian-1.12.8-1
GO_SRC_RELEASE=golang-debian-1.12.5-1
GO_SRC_TARBALL="${GO_SRC_RELEASE}.tar.gz"
# Compile go and configure the environment
export TAR_OPTIONS="--mtime="$REFERENCE_DATE\\\ $REFERENCE_TIME""

View File

@@ -23,11 +23,11 @@ remotes:
- "url": "https://github.com/tendermint/tendermint.git"
"dir": "tendermint"
files:
- "golang-debian-1.12.8-1.tar.gz"
- "golang-debian-1.12.5-1.tar.gz"
script: |
set -e -o pipefail
GO_SRC_RELEASE=golang-debian-1.12.8-1
GO_SRC_RELEASE=golang-debian-1.12.5-1
GO_SRC_TARBALL="${GO_SRC_RELEASE}.tar.gz"
# Compile go and configure the environment
export TAR_OPTIONS="--mtime="$REFERENCE_DATE\\\ $REFERENCE_TIME""

View File

@@ -384,6 +384,18 @@ func (h *Header) Populate(
h.ProposerAddress = proposerAddress
}
// NOTE: While it's possible to make this faster via a custom implementation,
// (or naively via a struct copy, though this isn't yet a frozen design goal),
// for now use hashes in case of any issues that may arise in implementation.
func (h *Header) Equal(h2 *Header) bool {
h1Hash := h.Hash()
if h1Hash == nil {
panic("incomplete heaeders cannot be compared")
}
h2Hash := h2.Hash()
return bytes.Equal(h1Hash, h2Hash)
}
// Hash returns the hash of the header.
// It computes a Merkle tree from the header fields
// ordered as they appear in the Header.
@@ -668,6 +680,18 @@ func (commit *Commit) ValidateBasic() error {
return nil
}
// NOTE: While it's possible to make this faster via a custom implementation,
// (naively via a struct copy won't work due to the volatile fields),
// for now use hashes in case of any issues that may arise in implementation.
func (commit *Commit) Equal(commit2 *Commit) bool {
c1Hash := commit.Hash()
if c1Hash == nil {
panic("incomplete commit cannot be compared")
}
c2Hash := commit2.Hash()
return bytes.Equal(c1Hash, c2Hash)
}
// Hash returns the hash of the commit
func (commit *Commit) Hash() cmn.HexBytes {
if commit == nil {
@@ -712,6 +736,15 @@ type SignedHeader struct {
Commit *Commit `json:"commit"`
}
// Returns true iff both the header and commit hold identical information
// (disregarding any volatile memoized fields).
// Header and Commit must be their final immutable forms, otherwise this
// function will panic.
func (sh SignedHeader) Equal(sh2 SignedHeader) bool {
return sh.Header.Equal(sh2.Header) &&
sh.Commit.Equal(sh2.Commit)
}
// ValidateBasic does basic consistency checks and makes sure the header
// and commit are consistent.
//

View File

@@ -41,7 +41,7 @@ func NewResultFromResponse(response *abci.ResponseDeliverTx) ABCIResult {
}
}
// Bytes serializes the ABCIResponse using amino
// Bytes serializes the ABCIResponse using wire
func (a ABCIResults) Bytes() []byte {
bz, err := cdc.MarshalBinaryLengthPrefixed(a)
if err != nil {

View File

@@ -596,9 +596,13 @@ func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height i
if vals.Size() != len(commit.Precommits) {
return NewErrInvalidCommitPrecommits(vals.Size(), len(commit.Precommits))
}
// If the height to check is different than the commit height return an error
if height != commit.Height() {
return NewErrInvalidCommitHeight(height, commit.Height())
}
// If the blockHash is not equal to the commit block hash return an error
if !blockID.Equals(commit.BlockID) {
return fmt.Errorf("Invalid commit -- wrong block id: want %v got %v",
blockID, commit.BlockID)
@@ -607,8 +611,9 @@ func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height i
talliedVotingPower := int64(0)
for idx, precommit := range commit.Precommits {
// Some precommits will likely be missing, skip those
if precommit == nil {
continue // OK, some precommits can be missing.
continue
}
_, val := vals.GetByIndex(idx)
// Validate signature.
@@ -616,6 +621,7 @@ func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height i
if !val.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) {
return fmt.Errorf("Invalid commit -- invalid signature: %v", precommit)
}
// Good precommit!
if blockID.Equals(precommit.BlockID) {
talliedVotingPower += val.VotingPower
@@ -629,97 +635,13 @@ func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height i
if talliedVotingPower > vals.TotalVotingPower()*2/3 {
return nil
}
return errTooMuchChange{talliedVotingPower, vals.TotalVotingPower()*2/3 + 1}
}
// VerifyFutureCommit will check to see if the set would be valid with a different
// validator set.
//
// vals is the old validator set that we know. Over 2/3 of the power in old
// signed this block.
//
// In Tendermint, 1/3 of the voting power can halt or fork the chain, but 1/3
// can't make arbitrary state transitions. You still need > 2/3 Byzantine to
// make arbitrary state transitions.
//
// To preserve this property in the light client, we also require > 2/3 of the
// old vals to sign the future commit at H, that way we preserve the property
// that if they weren't being truthful about the validator set at H (block hash
// -> vals hash) or about the app state (block hash -> app hash) we can slash
// > 2/3. Otherwise, the lite client isn't providing the same security
// guarantees.
//
// Even if we added a slashing condition that if you sign a block header with
// the wrong validator set, then we would only need > 1/3 of signatures from
// the old vals on the new commit, it wouldn't be sufficient because the new
// vals can be arbitrary and commit some arbitrary app hash.
//
// newSet is the validator set that signed this block. Only votes from new are
// sufficient for 2/3 majority in the new set as well, for it to be a valid
// commit.
//
// NOTE: This doesn't check whether the commit is a future commit, because the
// current height isn't part of the ValidatorSet. Caller must check that the
// commit height is greater than the height for this validator set.
func (vals *ValidatorSet) VerifyFutureCommit(newSet *ValidatorSet, chainID string,
blockID BlockID, height int64, commit *Commit) error {
oldVals := vals
// Commit must be a valid commit for newSet.
err := newSet.VerifyCommit(chainID, blockID, height, commit)
if err != nil {
return err
}
// Check old voting power.
oldVotingPower := int64(0)
seen := map[int]bool{}
round := commit.Round()
for idx, precommit := range commit.Precommits {
if precommit == nil {
continue
}
if precommit.Height != height {
return errors.Errorf("Blocks don't match - %d vs %d", round, precommit.Round)
}
if precommit.Round != round {
return errors.Errorf("Invalid commit -- wrong round: %v vs %v", round, precommit.Round)
}
if precommit.Type != PrecommitType {
return errors.Errorf("Invalid commit -- not precommit @ index %v", idx)
}
// See if this validator is in oldVals.
oldIdx, val := oldVals.GetByAddress(precommit.ValidatorAddress)
if val == nil || seen[oldIdx] {
continue // missing or double vote...
}
seen[oldIdx] = true
// Validate signature.
precommitSignBytes := commit.VoteSignBytes(chainID, idx)
if !val.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) {
return errors.Errorf("Invalid commit -- invalid signature: %v", precommit)
}
// Good precommit!
if blockID.Equals(precommit.BlockID) {
oldVotingPower += val.VotingPower
}
// else {
// It's OK that the BlockID doesn't match. We include stray
// precommits to measure validator availability.
// }
}
if oldVotingPower <= oldVals.TotalVotingPower()*2/3 {
return errTooMuchChange{oldVotingPower, oldVals.TotalVotingPower()*2/3 + 1}
}
return nil
}
//-----------------
// ErrTooMuchChange
// IsErrTooMuchChange
func IsErrTooMuchChange(err error) bool {
_, ok := errors.Cause(err).(errTooMuchChange)
return ok

View File

@@ -20,7 +20,7 @@ const (
// Must be a string because scripts like dist.sh read this file.
// XXX: Don't change the name of this variable or you will break
// automation :)
TMCoreSemVer = "0.32.3"
TMCoreSemVer = "0.32.2"
// ABCISemVer is the semantic version of the ABCI library
ABCISemVer = "0.16.1"