mirror of
https://github.com/fluencelabs/tendermint
synced 2025-07-15 20:41:37 +00:00
Compare commits
37 Commits
v0.29.2
...
release/v0
Author | SHA1 | Date | |
---|---|---|---|
|
4ae9b633ed | ||
|
1fb900f9f5 | ||
|
eb7af9a765 | ||
|
5a7a97541b | ||
|
df2f25fe82 | ||
|
f0c2c9ba5a | ||
|
976819537d | ||
|
f996b10f47 | ||
|
36d7180ca2 | ||
|
e0f8936455 | ||
|
f2351dc758 | ||
|
9d4f59b836 | ||
|
d2c7f8dbcf | ||
|
8283ca7ddb | ||
|
59cc6d36c9 | ||
|
af8793c01a | ||
|
0b0a8b3128 | ||
|
7ced9e416b | ||
|
af3ba5145a | ||
|
cf737ec85c | ||
|
d32f7d2416 | ||
|
dc6567c677 | ||
|
08dabab024 | ||
|
8a9eecce7f | ||
|
b089587b42 | ||
|
7fd51e6ade | ||
|
966b5bdf6e | ||
|
021b5cc7f6 | ||
|
28d75ec801 | ||
|
792b12573e | ||
|
4f2ef36701 | ||
|
6b1b595951 | ||
|
87bdc42bf8 | ||
|
90ba63948a | ||
|
cce4d21ccb | ||
|
c1f7399a86 | ||
|
44a89a3537 |
@@ -3,17 +3,20 @@ version: 2
|
||||
defaults: &defaults
|
||||
working_directory: /go/src/github.com/tendermint/tendermint
|
||||
docker:
|
||||
- image: circleci/golang:1.11.4
|
||||
- image: circleci/golang
|
||||
environment:
|
||||
GOBIN: /tmp/workspace/bin
|
||||
|
||||
docs_update_config: &docs_update_config
|
||||
working_directory: ~/repo
|
||||
docker:
|
||||
- image: tendermint/docs_deployment
|
||||
- image: tendermintdev/jq_curl
|
||||
environment:
|
||||
AWS_REGION: us-east-1
|
||||
|
||||
release_management_docker: &release_management_docker
|
||||
machine: true
|
||||
|
||||
jobs:
|
||||
setup_dependencies:
|
||||
<<: *defaults
|
||||
@@ -154,7 +157,7 @@ jobs:
|
||||
for pkg in $(go list github.com/tendermint/tendermint/... | circleci tests split --split-by=timings); do
|
||||
id=$(basename "$pkg")
|
||||
|
||||
GOCACHE=off go test -v -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log"
|
||||
go test -v -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log"
|
||||
done
|
||||
- persist_to_workspace:
|
||||
root: /tmp/workspace
|
||||
@@ -192,7 +195,7 @@ jobs:
|
||||
name: run localnet and exit on failure
|
||||
command: |
|
||||
set -x
|
||||
docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang:1.11.4 make build-linux
|
||||
docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux
|
||||
make localnet-start &
|
||||
./scripts/localnet-blocks-test.sh 40 5 10 localhost
|
||||
|
||||
@@ -239,7 +242,121 @@ jobs:
|
||||
- run:
|
||||
name: Trigger website build
|
||||
command: |
|
||||
chamber exec tendermint -- start_website_build
|
||||
curl --silent \
|
||||
--show-error \
|
||||
-X POST \
|
||||
--header "Content-Type: application/json" \
|
||||
-d "{\"branch\": \"$CIRCLE_BRANCH\"}" \
|
||||
"https://circleci.com/api/v1.1/project/github/$CIRCLE_PROJECT_USERNAME/$WEBSITE_REPO_NAME/build?circle-token=$TENDERBOT_API_TOKEN" > response.json
|
||||
|
||||
RESULT=`jq -r '.status' response.json`
|
||||
MESSAGE=`jq -r '.message' response.json`
|
||||
|
||||
if [[ ${RESULT} == "null" ]] || [[ ${RESULT} -ne "200" ]]; then
|
||||
echo "CircleCI API call failed: $MESSAGE"
|
||||
exit 1
|
||||
else
|
||||
echo "Website build started"
|
||||
fi
|
||||
|
||||
prepare_build:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Get next release number
|
||||
command: |
|
||||
export LAST_TAG="`git describe --tags --abbrev=0 --match "${CIRCLE_BRANCH}.*"`"
|
||||
echo "Last tag: ${LAST_TAG}"
|
||||
if [ -z "${LAST_TAG}" ]; then
|
||||
export LAST_TAG="${CIRCLE_BRANCH}"
|
||||
echo "Last tag not found. Possibly fresh branch or feature branch. Setting ${LAST_TAG} as tag."
|
||||
fi
|
||||
export NEXT_TAG="`python -u scripts/release_management/bump-semver.py --version "${LAST_TAG}"`"
|
||||
echo "Next tag: ${NEXT_TAG}"
|
||||
echo "export CIRCLE_TAG=\"${NEXT_TAG}\"" > release-version.source
|
||||
- run:
|
||||
name: Build dependencies
|
||||
command: |
|
||||
make get_tools get_vendor_deps
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- "release-version.source"
|
||||
- save_cache:
|
||||
key: v1-release-deps-{{ .Branch }}-{{ .Revision }}
|
||||
paths:
|
||||
- "vendor"
|
||||
|
||||
build_artifacts:
|
||||
<<: *defaults
|
||||
parallelism: 4
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v1-release-deps-{{ .Branch }}-{{ .Revision }}
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: Build artifact
|
||||
command: |
|
||||
# Setting CIRCLE_TAG because we do not tag the release ourselves.
|
||||
source /tmp/workspace/release-version.source
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 0 ;then export GOOS=linux GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 1 ;then export GOOS=darwin GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 2 ;then export GOOS=windows GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 3 ;then export GOOS=linux GOARCH=arm && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
- persist_to_workspace:
|
||||
root: build
|
||||
paths:
|
||||
- "*.zip"
|
||||
- "tendermint_linux_amd64"
|
||||
|
||||
release_artifacts:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: Deploy to GitHub
|
||||
command: |
|
||||
# Setting CIRCLE_TAG because we do not tag the release ourselves.
|
||||
source /tmp/workspace/release-version.source
|
||||
echo "---"
|
||||
ls -la /tmp/workspace/*.zip
|
||||
echo "---"
|
||||
python -u scripts/release_management/sha-files.py
|
||||
echo "---"
|
||||
cat /tmp/workspace/SHA256SUMS
|
||||
echo "---"
|
||||
export RELEASE_ID="`python -u scripts/release_management/github-draft.py`"
|
||||
echo "Release ID: ${RELEASE_ID}"
|
||||
#Todo: Parallelize uploads
|
||||
export GOOS=linux GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
export GOOS=darwin GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
export GOOS=windows GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
export GOOS=linux GOARCH=arm && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
python -u scripts/release_management/github-upload.py --file "/tmp/workspace/SHA256SUMS" --id "${RELEASE_ID}"
|
||||
python -u scripts/release_management/github-publish.py --id "${RELEASE_ID}"
|
||||
|
||||
release_docker:
|
||||
<<: *release_management_docker
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: Deploy to Docker Hub
|
||||
command: |
|
||||
# Setting CIRCLE_TAG because we do not tag the release ourselves.
|
||||
source /tmp/workspace/release-version.source
|
||||
cp /tmp/workspace/tendermint_linux_amd64 DOCKER/tendermint
|
||||
docker build --label="tendermint" --tag="tendermint/tendermint:${CIRCLE_TAG}" --tag="tendermint/tendermint:latest" "DOCKER"
|
||||
docker login -u "${DOCKERHUB_USER}" --password-stdin <<< "${DOCKERHUB_PASS}"
|
||||
docker push "tendermint/tendermint"
|
||||
docker logout
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
@@ -277,3 +394,25 @@ workflows:
|
||||
- upload_coverage:
|
||||
requires:
|
||||
- test_cover
|
||||
release:
|
||||
jobs:
|
||||
- prepare_build
|
||||
- build_artifacts:
|
||||
requires:
|
||||
- prepare_build
|
||||
- release_artifacts:
|
||||
requires:
|
||||
- prepare_build
|
||||
- build_artifacts
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /v[0-9]+\.[0-9]+/
|
||||
- release_docker:
|
||||
requires:
|
||||
- prepare_build
|
||||
- build_artifacts
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /v[0-9]+\.[0-9]+/
|
||||
|
4
.github/CODEOWNERS
vendored
4
.github/CODEOWNERS
vendored
@@ -4,4 +4,6 @@
|
||||
* @ebuchman @melekes @xla
|
||||
|
||||
# Precious documentation
|
||||
/docs/ @zramsay
|
||||
/docs/README.md @zramsay
|
||||
/docs/DOCS_README.md @zramsay
|
||||
/docs/.vuepress/ @zramsay
|
||||
|
@@ -18,9 +18,7 @@ linters:
|
||||
- nakedret
|
||||
- lll
|
||||
- gochecknoglobals
|
||||
- govet
|
||||
- gocritic
|
||||
- gosec
|
||||
- gochecknoinits
|
||||
- scopelint
|
||||
- stylecheck
|
||||
|
120
CHANGELOG.md
120
CHANGELOG.md
@@ -1,5 +1,121 @@
|
||||
# Changelog
|
||||
|
||||
## v0.30.3
|
||||
|
||||
*April 1st, 2019*
|
||||
|
||||
This release includes two security sensitive fixes: it ensures generated private
|
||||
keys are valid, and it prevents certain DNS lookups that would cause the node to
|
||||
panic if the lookup failed.
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
- [crypto/secp256k1] [\#3439](https://github.com/tendermint/tendermint/issues/3439)
|
||||
Ensure generated private keys are valid by randomly sampling until a valid key is found.
|
||||
Previously, it was possible (though rare!) to generate keys that exceeded the curve order.
|
||||
Such keys would lead to invalid signatures.
|
||||
- [p2p] [\#3522](https://github.com/tendermint/tendermint/issues/3522) Memoize
|
||||
socket address in peer connections to avoid DNS lookups. Previously, failed
|
||||
DNS lookups could cause the node to panic.
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [circle] [\#3497](https://github.com/tendermint/tendermint/issues/3497) Move release management to CircleCI
|
||||
|
||||
## v0.30.2
|
||||
|
||||
*March 10th, 2019*
|
||||
|
||||
This release fixes a CLevelDB memory leak. It was happening because we were not
|
||||
closing the WriteBatch object after use. See [levigo's
|
||||
godoc](https://godoc.org/github.com/jmhodges/levigo#WriteBatch.Close) for the
|
||||
Close method. Special thanks goes to @Stumble who both reported an issue in
|
||||
[cosmos-sdk](https://github.com/cosmos/cosmos-sdk/issues/3842) and provided a
|
||||
fix here.
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* Go API
|
||||
- [libs/db] [\#3842](https://github.com/cosmos/cosmos-sdk/issues/3842) Add Close() method to Batch interface (@Stumble)
|
||||
|
||||
### BUG FIXES:
|
||||
- [libs/db] [\#3842](https://github.com/cosmos/cosmos-sdk/issues/3842) Fix CLevelDB memory leak (@Stumble)
|
||||
|
||||
## v0.30.1
|
||||
|
||||
*February 20th, 2019*
|
||||
|
||||
This release fixes a consensus halt and a DataCorruptionError after restart
|
||||
discovered in `game_of_stakes_6`. It also fixes a security issue in the p2p
|
||||
handshake by authenticating the NetAddress.ID of the peer we're dialing.
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
* [config] [\#3291](https://github.com/tendermint/tendermint/issues/3291) Make
|
||||
config.ResetTestRootWithChainID() create concurrency-safe test directories.
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
* [consensus] [\#3295](https://github.com/tendermint/tendermint/issues/3295)
|
||||
Flush WAL on stop to prevent data corruption during graceful shutdown.
|
||||
* [consensus] [\#3302](https://github.com/tendermint/tendermint/issues/3302)
|
||||
Fix possible halt by resetting TriggeredTimeoutPrecommit before starting next height.
|
||||
* [rpc] [\#3251](https://github.com/tendermint/tendermint/issues/3251) Fix
|
||||
`/net_info#peers#remote_ip` format. New format spec:
|
||||
* dotted decimal ("192.0.2.1"), if ip is an IPv4 or IP4-mapped IPv6 address
|
||||
* IPv6 ("2001:db8::1"), if ip is a valid IPv6 address
|
||||
* [cmd] [\#3314](https://github.com/tendermint/tendermint/issues/3314) Return
|
||||
an error on `show_validator` when the private validator file does not exist.
|
||||
* [p2p] [\#3010](https://github.com/tendermint/tendermint/issues/3010#issuecomment-464287627)
|
||||
Authenticate a peer against its NetAddress.ID when dialing.
|
||||
|
||||
## v0.30.0
|
||||
|
||||
*February 8th, 2019*
|
||||
|
||||
This release fixes yet another issue with the proposer selection algorithm.
|
||||
We hope it's the last one, but we won't be surprised if it's not.
|
||||
We plan to one day expose the selection algorithm more directly to
|
||||
the application ([\#3285](https://github.com/tendermint/tendermint/issues/3285)), and even to support randomness ([\#763](https://github.com/tendermint/tendermint/issues/763)).
|
||||
For more, see issues marked
|
||||
[proposer-selection](https://github.com/tendermint/tendermint/labels/proposer-selection).
|
||||
|
||||
This release also includes a fix to prevent Tendermint from including the same
|
||||
piece of evidence in more than one block. This issue was reported by @chengwenxi in our
|
||||
[bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* Apps
|
||||
- [state] [\#3222](https://github.com/tendermint/tendermint/issues/3222)
|
||||
Duplicate updates for the same validator are forbidden. Apps must ensure
|
||||
that a given `ResponseEndBlock.ValidatorUpdates` contains only one entry per pubkey.
|
||||
|
||||
* Go API
|
||||
- [types] [\#3222](https://github.com/tendermint/tendermint/issues/3222)
|
||||
Remove `Add` and `Update` methods from `ValidatorSet` in favor of new
|
||||
`UpdateWithChangeSet`. This allows updates to be applied as a set, instead of
|
||||
one at a time.
|
||||
|
||||
* Block Protocol
|
||||
- [state] [\#3286](https://github.com/tendermint/tendermint/issues/3286) Blocks that include already committed evidence are invalid.
|
||||
|
||||
* P2P Protocol
|
||||
- [consensus] [\#3222](https://github.com/tendermint/tendermint/issues/3222)
|
||||
Validator updates are applied as a set, instead of one at a time, thus
|
||||
impacting the proposer priority calculation. This ensures that the proposer
|
||||
selection algorithm does not depend on the order of updates in
|
||||
`ResponseEndBlock.ValidatorUpdates`.
|
||||
|
||||
### IMPROVEMENTS:
|
||||
- [crypto] [\#3279](https://github.com/tendermint/tendermint/issues/3279) Use `btcec.S256().N` directly instead of hard coding a copy.
|
||||
|
||||
### BUG FIXES:
|
||||
- [state] [\#3222](https://github.com/tendermint/tendermint/issues/3222) Fix validator set updates so they are applied as a set, rather
|
||||
than one at a time. This makes the proposer selection algorithm independent of
|
||||
the order of updates in `ResponseEndBlock.ValidatorUpdates`.
|
||||
- [evidence] [\#3286](https://github.com/tendermint/tendermint/issues/3286) Don't add committed evidence to evidence pool.
|
||||
|
||||
## v0.29.2
|
||||
|
||||
*February 7th, 2019*
|
||||
@@ -11,7 +127,7 @@ Special thanks to external contributors on this release:
|
||||
`crypto` packages:
|
||||
- p2p:
|
||||
- Partial fix for MITM attacks on the p2p connection. MITM conditions may
|
||||
still exist. See \#3010.
|
||||
still exist. See [\#3010](https://github.com/tendermint/tendermint/issues/3010).
|
||||
- crypto:
|
||||
- Eliminate our fork of `btcd` and use the `btcd/btcec` library directly for
|
||||
native secp256k1 signing. Note we still modify the signature encoding to
|
||||
@@ -42,7 +158,7 @@ Special thanks to external contributors on this release:
|
||||
- [p2p] [\#3247](https://github.com/tendermint/tendermint/issues/3247) Fix panic in SeedMode when calling FlushStop and OnStop
|
||||
concurrently
|
||||
- [p2p] [\#3040](https://github.com/tendermint/tendermint/issues/3040) Fix MITM on secret connection by checking low-order points
|
||||
- [privval] [\#3258](https://github.com/tendermint/tendermint/issues/3258) Fix race between sign requests and ping requests in socket
|
||||
- [privval] [\#3258](https://github.com/tendermint/tendermint/issues/3258) Fix race between sign requests and ping requests in socket that was causing messages to be corrupted
|
||||
|
||||
## v0.29.1
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
## v0.30
|
||||
## v0.31.0
|
||||
|
||||
**
|
||||
|
||||
@@ -6,8 +6,20 @@ Special thanks to external contributors on this release:
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
|
||||
* Apps
|
||||
|
||||
* Go API
|
||||
|
||||
* Blockchain Protocol
|
||||
|
||||
* P2P Protocol
|
||||
|
||||
### FEATURES:
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [CircleCI] \#3497 Move release management to CircleCI
|
||||
|
||||
### BUG FIXES:
|
||||
|
@@ -1,5 +1,5 @@
|
||||
FROM alpine:3.7
|
||||
MAINTAINER Greg Szabo <greg@tendermint.com>
|
||||
FROM alpine:3.9
|
||||
LABEL maintainer="hello@tendermint.com"
|
||||
|
||||
# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json
|
||||
# (unless you change `genesis_file` in config.toml). You can put your config.toml and
|
||||
|
11
Makefile
11
Makefile
@@ -6,6 +6,7 @@ GOTOOLS = \
|
||||
github.com/square/certstrap
|
||||
GOBIN?=${GOPATH}/bin
|
||||
PACKAGES=$(shell go list ./...)
|
||||
OUTPUT?=build/tendermint
|
||||
|
||||
INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf
|
||||
BUILD_TAGS?='tendermint'
|
||||
@@ -19,13 +20,13 @@ check: check_tools get_vendor_deps
|
||||
### Build Tendermint
|
||||
|
||||
build:
|
||||
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/
|
||||
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/
|
||||
|
||||
build_c:
|
||||
CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" -o build/tendermint ./cmd/tendermint/
|
||||
CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" -o $(OUTPUT) ./cmd/tendermint/
|
||||
|
||||
build_race:
|
||||
CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint
|
||||
CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint
|
||||
|
||||
install:
|
||||
CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
|
||||
@@ -109,7 +110,7 @@ draw_deps:
|
||||
|
||||
get_deps_bin_size:
|
||||
@# Copy of build recipe with additional flags to perform binary size analysis
|
||||
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/ 2>&1))
|
||||
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/ 2>&1))
|
||||
@find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log
|
||||
@echo "Results can be found here: $(CURDIR)/deps_bin_size.log"
|
||||
|
||||
@@ -261,7 +262,7 @@ check_dep:
|
||||
### Docker image
|
||||
|
||||
build-docker:
|
||||
cp build/tendermint DOCKER/tendermint
|
||||
cp $(OUTPUT) DOCKER/tendermint
|
||||
docker build --label=tendermint --tag="tendermint/tendermint" DOCKER
|
||||
rm -rf DOCKER/tendermint
|
||||
|
||||
|
23
UPGRADING.md
23
UPGRADING.md
@@ -3,6 +3,29 @@
|
||||
This guide provides steps to be followed when you upgrade your applications to
|
||||
a newer version of Tendermint Core.
|
||||
|
||||
## v0.30.0
|
||||
|
||||
This release contains a breaking change to both the block and p2p protocols,
|
||||
however it may be compatible with blockchains created with v0.29.0 depending on
|
||||
the chain history. If your blockchain has not included any pieces of evidence,
|
||||
or no piece of evidence has been included in more than one block,
|
||||
and if your application has never returned multiple updates
|
||||
for the same validator in a single block, then v0.30.0 will work fine with
|
||||
blockchains created with v0.29.0.
|
||||
|
||||
The p2p protocol change is to fix the proposer selection algorithm again.
|
||||
Note that proposer selection is purely a p2p concern right
|
||||
now since the algorithm is only relevant during real time consensus.
|
||||
This change is thus compatible with v0.29.0, but
|
||||
all nodes must be upgraded to avoid disagreements on the proposer.
|
||||
|
||||
### Applications
|
||||
|
||||
Applications must ensure they do not return duplicates in
|
||||
`ResponseEndBlock.ValidatorUpdates`. A pubkey must only appear once per set of
|
||||
updates. Duplicates will cause irrecoverable failure. If you have a very good
|
||||
reason why we shouldn't do this, please open an issue.
|
||||
|
||||
## v0.29.0
|
||||
|
||||
This release contains some breaking changes to the block and p2p protocols,
|
||||
|
@@ -129,7 +129,7 @@ func (cli *grpcClient) EchoAsync(msg string) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Echo{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Echo{Echo: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) FlushAsync() *ReqRes {
|
||||
@@ -138,7 +138,7 @@ func (cli *grpcClient) FlushAsync() *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Flush{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Flush{Flush: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
|
||||
@@ -147,7 +147,7 @@ func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{Info: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
|
||||
@@ -156,7 +156,7 @@ func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{SetOption: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) DeliverTxAsync(tx []byte) *ReqRes {
|
||||
@@ -165,7 +165,7 @@ func (cli *grpcClient) DeliverTxAsync(tx []byte) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CheckTxAsync(tx []byte) *ReqRes {
|
||||
@@ -174,7 +174,7 @@ func (cli *grpcClient) CheckTxAsync(tx []byte) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
|
||||
@@ -183,7 +183,7 @@ func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Query{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Query{Query: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CommitAsync() *ReqRes {
|
||||
@@ -192,7 +192,7 @@ func (cli *grpcClient) CommitAsync() *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Commit{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Commit{Commit: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
|
||||
@@ -201,7 +201,7 @@ func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_InitChain{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
|
||||
@@ -210,7 +210,7 @@ func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_BeginBlock{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes {
|
||||
@@ -219,7 +219,7 @@ func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes {
|
||||
|
@@ -394,7 +394,6 @@ func cmdConsole(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
|
||||
|
@@ -8,7 +8,6 @@ import (
|
||||
|
||||
amino "github.com/tendermint/go-amino"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
@@ -302,7 +301,7 @@ FOR_LOOP:
|
||||
|
||||
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstPartsHeader := firstParts.Header()
|
||||
firstID := types.BlockID{first.Hash(), firstPartsHeader}
|
||||
firstID := types.BlockID{Hash: first.Hash(), PartsHeader: firstPartsHeader}
|
||||
// Finally, verify the first block using the second's commit
|
||||
// NOTE: we can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
@@ -338,8 +337,7 @@ FOR_LOOP:
|
||||
state, err = bcR.blockExec.ApplyBlock(state, firstID, first)
|
||||
if err != nil {
|
||||
// TODO This is bad, are we zombie?
|
||||
cmn.PanicQ(fmt.Sprintf("Failed to process committed block (%d:%X): %v",
|
||||
first.Height, first.Hash(), err))
|
||||
panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
blocksSynced++
|
||||
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -95,13 +96,13 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals
|
||||
|
||||
// let's add some blocks in
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
lastCommit := &types.Commit{}
|
||||
lastCommit := types.NewCommit(types.BlockID{}, nil)
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
|
||||
vote := makeVote(&lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVals[0]).CommitSig()
|
||||
lastCommit = &types.Commit{Precommits: []*types.CommitSig{vote}, BlockID: lastBlockMeta.BlockID}
|
||||
lastCommit = types.NewCommit(lastBlockMeta.BlockID, []*types.CommitSig{vote})
|
||||
}
|
||||
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
@@ -125,6 +126,7 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals
|
||||
|
||||
func TestNoBlockResponse(t *testing.T) {
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
maxBlockHeight := int64(65)
|
||||
@@ -184,6 +186,7 @@ func TestNoBlockResponse(t *testing.T) {
|
||||
// that seems extreme.
|
||||
func TestBadBlockStopsPeer(t *testing.T) {
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
maxBlockHeight := int64(148)
|
||||
|
@@ -3,6 +3,7 @@ package blockchain
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -21,16 +22,17 @@ import (
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
// A cleanupFunc cleans up any config / test files created for a particular
|
||||
// test.
|
||||
type cleanupFunc func()
|
||||
|
||||
// make a Commit with a single vote containing just the height and a timestamp
|
||||
func makeTestCommit(height int64, timestamp time.Time) *types.Commit {
|
||||
return &types.Commit{
|
||||
Precommits: []*types.CommitSig{
|
||||
{Height: height, Timestamp: timestamp},
|
||||
},
|
||||
}
|
||||
commitSigs := []*types.CommitSig{{Height: height, Timestamp: timestamp}}
|
||||
return types.NewCommit(types.BlockID{}, commitSigs)
|
||||
}
|
||||
|
||||
func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) {
|
||||
func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFunc) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
// blockDB := dbm.NewDebugDB("blockDB", dbm.NewMemDB())
|
||||
// stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB())
|
||||
@@ -40,7 +42,7 @@ func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) {
|
||||
if err != nil {
|
||||
panic(cmn.ErrorWrap(err, "error constructing state from genesis file"))
|
||||
}
|
||||
return state, NewBlockStore(blockDB)
|
||||
return state, NewBlockStore(blockDB), func() { os.RemoveAll(config.RootDir) }
|
||||
}
|
||||
|
||||
func TestLoadBlockStoreStateJSON(t *testing.T) {
|
||||
@@ -90,19 +92,32 @@ func freshBlockStore() (*BlockStore, db.DB) {
|
||||
}
|
||||
|
||||
var (
|
||||
state, _ = makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
||||
|
||||
block = makeBlock(1, state, new(types.Commit))
|
||||
partSet = block.MakePartSet(2)
|
||||
part1 = partSet.GetPart(0)
|
||||
part2 = partSet.GetPart(1)
|
||||
seenCommit1 = makeTestCommit(10, tmtime.Now())
|
||||
state sm.State
|
||||
block *types.Block
|
||||
partSet *types.PartSet
|
||||
part1 *types.Part
|
||||
part2 *types.Part
|
||||
seenCommit1 *types.Commit
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
var cleanup cleanupFunc
|
||||
state, _, cleanup = makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
||||
block = makeBlock(1, state, new(types.Commit))
|
||||
partSet = block.MakePartSet(2)
|
||||
part1 = partSet.GetPart(0)
|
||||
part2 = partSet.GetPart(1)
|
||||
seenCommit1 = makeTestCommit(10, tmtime.Now())
|
||||
code := m.Run()
|
||||
cleanup()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
// TODO: This test should be simplified ...
|
||||
|
||||
func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
state, bs := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
||||
state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
||||
defer cleanup()
|
||||
require.Equal(t, bs.Height(), int64(0), "initially the height should be zero")
|
||||
|
||||
// check there are no blocks at various heights
|
||||
@@ -353,7 +368,8 @@ func TestLoadBlockMeta(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlockFetchAtHeight(t *testing.T) {
|
||||
state, bs := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
||||
state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
||||
defer cleanup()
|
||||
require.Equal(t, bs.Height(), int64(0), "initially the height should be zero")
|
||||
block := makeBlock(bs.Height()+1, state, new(types.Commit))
|
||||
|
||||
|
@@ -61,7 +61,7 @@ func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) {
|
||||
} else {
|
||||
pv := privval.GenFilePV(privValKeyFile, privValStateFile)
|
||||
pv.Save()
|
||||
logger.Info("Generated private validator file", "file", "keyFile", privValKeyFile,
|
||||
logger.Info("Generated private validator file", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
}
|
||||
}
|
||||
|
@@ -77,8 +77,6 @@ func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command {
|
||||
|
||||
// Run forever
|
||||
select {}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
|
@@ -16,12 +16,11 @@ var ShowNodeIDCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
func showNodeID(cmd *cobra.Command, args []string) error {
|
||||
|
||||
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(nodeKey.ID())
|
||||
|
||||
fmt.Println(nodeKey.ID())
|
||||
return nil
|
||||
}
|
||||
|
@@ -3,8 +3,10 @@ package commands
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
)
|
||||
|
||||
@@ -12,11 +14,21 @@ import (
|
||||
var ShowValidatorCmd = &cobra.Command{
|
||||
Use: "show_validator",
|
||||
Short: "Show this node's validator info",
|
||||
Run: showValidator,
|
||||
RunE: showValidator,
|
||||
}
|
||||
|
||||
func showValidator(cmd *cobra.Command, args []string) {
|
||||
privValidator := privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
|
||||
pubKeyJSONBytes, _ := cdc.MarshalJSON(privValidator.GetPubKey())
|
||||
fmt.Println(string(pubKeyJSONBytes))
|
||||
func showValidator(cmd *cobra.Command, args []string) error {
|
||||
keyFilePath := config.PrivValidatorKeyFile()
|
||||
if !cmn.FileExists(keyFilePath) {
|
||||
return fmt.Errorf("private validator file %s does not exist", keyFilePath)
|
||||
}
|
||||
|
||||
pv := privval.LoadFilePV(keyFilePath, config.PrivValidatorStateFile())
|
||||
bz, err := cdc.MarshalJSON(pv.GetPubKey())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to marshal private validator pubkey")
|
||||
}
|
||||
|
||||
fmt.Println(string(bz))
|
||||
return nil
|
||||
}
|
||||
|
@@ -3,13 +3,16 @@ package config
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"text/template"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
// DefaultDirPerm is the default permissions used when creating directories.
|
||||
const DefaultDirPerm = 0700
|
||||
|
||||
var configTemplate *template.Template
|
||||
|
||||
func init() {
|
||||
@@ -24,13 +27,13 @@ func init() {
|
||||
// EnsureRoot creates the root, config, and data directories if they don't exist,
|
||||
// and panics if it fails.
|
||||
func EnsureRoot(rootDir string) {
|
||||
if err := cmn.EnsureDir(rootDir, 0700); err != nil {
|
||||
if err := cmn.EnsureDir(rootDir, DefaultDirPerm); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), 0700); err != nil {
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), 0700); err != nil {
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
|
||||
@@ -322,29 +325,17 @@ func ResetTestRoot(testName string) *Config {
|
||||
}
|
||||
|
||||
func ResetTestRootWithChainID(testName string, chainID string) *Config {
|
||||
rootDir := os.ExpandEnv("$HOME/.tendermint_test")
|
||||
rootDir = filepath.Join(rootDir, testName)
|
||||
// Remove ~/.tendermint_test_bak
|
||||
if cmn.FileExists(rootDir + "_bak") {
|
||||
if err := os.RemoveAll(rootDir + "_bak"); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
// create a unique, concurrency-safe test directory under os.TempDir()
|
||||
rootDir, err := ioutil.TempDir("", fmt.Sprintf("%s-%s_", chainID, testName))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Move ~/.tendermint_test to ~/.tendermint_test_bak
|
||||
if cmn.FileExists(rootDir) {
|
||||
if err := os.Rename(rootDir, rootDir+"_bak"); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
// ensure config and data subdirs are created
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Create new dir
|
||||
if err := cmn.EnsureDir(rootDir, 0700); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), 0700); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), 0700); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
baseConfig := DefaultBaseConfig()
|
||||
|
@@ -48,6 +48,7 @@ func TestEnsureTestRoot(t *testing.T) {
|
||||
|
||||
// create root dir
|
||||
cfg := ResetTestRoot(testName)
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
rootDir := cfg.RootDir
|
||||
|
||||
// make sure config is set properly
|
||||
|
@@ -13,10 +13,6 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
config = ResetConfig("consensus_byzantine_test")
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// byzantine failures
|
||||
|
||||
@@ -29,7 +25,8 @@ func init() {
|
||||
func TestByzantine(t *testing.T) {
|
||||
N := 4
|
||||
logger := consensusLogger().With("test", "byzantine")
|
||||
css := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), newCounter)
|
||||
css, cleanup := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), newCounter)
|
||||
defer cleanup()
|
||||
|
||||
// give the byzantine validator a normal ticker
|
||||
ticker := NewTimeoutTicker()
|
||||
|
@@ -37,8 +37,13 @@ const (
|
||||
testSubscriber = "test-client"
|
||||
)
|
||||
|
||||
// A cleanupFunc cleans up any config / test files created for a particular
|
||||
// test.
|
||||
type cleanupFunc func()
|
||||
|
||||
// genesis, chain_id, priv_val
|
||||
var config *cfg.Config // NOTE: must be reset for each _test.go file
|
||||
var consensusReplayConfig *cfg.Config
|
||||
var ensureTimeout = time.Millisecond * 100
|
||||
|
||||
func ensureDir(dir string, mode os.FileMode) {
|
||||
@@ -124,15 +129,21 @@ func startTestRound(cs *ConsensusState, height int64, round int) {
|
||||
|
||||
// Create proposal block from cs1 but sign it with vs
|
||||
func decideProposal(cs1 *ConsensusState, vs *validatorStub, height int64, round int) (proposal *types.Proposal, block *types.Block) {
|
||||
cs1.mtx.Lock()
|
||||
block, blockParts := cs1.createProposalBlock()
|
||||
cs1.mtx.Unlock()
|
||||
if block == nil { // on error
|
||||
panic("error creating proposal block")
|
||||
}
|
||||
|
||||
// Make proposal
|
||||
polRound, propBlockID := cs1.ValidRound, types.BlockID{block.Hash(), blockParts.Header()}
|
||||
cs1.mtx.RLock()
|
||||
validRound := cs1.ValidRound
|
||||
chainID := cs1.state.ChainID
|
||||
cs1.mtx.RUnlock()
|
||||
polRound, propBlockID := validRound, types.BlockID{block.Hash(), blockParts.Header()}
|
||||
proposal = types.NewProposal(height, round, polRound, propBlockID)
|
||||
if err := vs.SignProposal(cs1.state.ChainID, proposal); err != nil {
|
||||
if err := vs.SignProposal(chainID, proposal); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
@@ -242,6 +253,7 @@ func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
|
||||
// consensus states
|
||||
|
||||
func newConsensusState(state sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState {
|
||||
config := cfg.ResetTestRoot("consensus_state_test")
|
||||
return newConsensusStateWithConfig(config, state, pv, app)
|
||||
}
|
||||
|
||||
@@ -400,7 +412,7 @@ func ensureNewRound(roundCh <-chan interface{}, height int64, round int) {
|
||||
}
|
||||
|
||||
func ensureNewTimeout(timeoutCh <-chan interface{}, height int64, round int, timeout int64) {
|
||||
timeoutDuration := time.Duration(timeout*3) * time.Nanosecond
|
||||
timeoutDuration := time.Duration(timeout*5) * time.Nanosecond
|
||||
ensureNewEvent(timeoutCh, height, round, timeoutDuration,
|
||||
"Timeout expired while waiting for NewTimeout event")
|
||||
}
|
||||
@@ -554,14 +566,17 @@ func consensusLogger() log.Logger {
|
||||
}).With("module", "consensus")
|
||||
}
|
||||
|
||||
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application, configOpts ...func(*cfg.Config)) []*ConsensusState {
|
||||
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker,
|
||||
appFunc func() abci.Application, configOpts ...func(*cfg.Config)) ([]*ConsensusState, cleanupFunc) {
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
|
||||
css := make([]*ConsensusState, nValidators)
|
||||
logger := consensusLogger()
|
||||
configRootDirs := make([]string, 0, nValidators)
|
||||
for i := 0; i < nValidators; i++ {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
configRootDirs = append(configRootDirs, thisConfig.RootDir)
|
||||
for _, opt := range configOpts {
|
||||
opt(thisConfig)
|
||||
}
|
||||
@@ -574,18 +589,26 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
|
||||
css[i].SetTimeoutTicker(tickerFunc())
|
||||
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
|
||||
}
|
||||
return css
|
||||
return css, func() {
|
||||
for _, dir := range configRootDirs {
|
||||
os.RemoveAll(dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// nPeers = nValidators + nNotValidator
|
||||
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application) []*ConsensusState {
|
||||
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker,
|
||||
appFunc func() abci.Application) ([]*ConsensusState, cleanupFunc) {
|
||||
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower)
|
||||
css := make([]*ConsensusState, nPeers)
|
||||
logger := consensusLogger()
|
||||
configRootDirs := make([]string, 0, nPeers)
|
||||
for i := 0; i < nPeers; i++ {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
configRootDirs = append(configRootDirs, thisConfig.RootDir)
|
||||
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
var privVal types.PrivValidator
|
||||
if i < nValidators {
|
||||
@@ -611,7 +634,11 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
|
||||
css[i].SetTimeoutTicker(tickerFunc())
|
||||
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
|
||||
}
|
||||
return css
|
||||
return css, func() {
|
||||
for _, dir := range configRootDirs {
|
||||
os.RemoveAll(dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int {
|
||||
@@ -621,7 +648,6 @@ func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int {
|
||||
}
|
||||
}
|
||||
panic("didnt find peer in switches")
|
||||
return -1
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------------
|
||||
@@ -699,8 +725,7 @@ func (m *mockTicker) Chan() <-chan timeoutInfo {
|
||||
return m.c
|
||||
}
|
||||
|
||||
func (mockTicker) SetLogger(log.Logger) {
|
||||
}
|
||||
func (*mockTicker) SetLogger(log.Logger) {}
|
||||
|
||||
//------------------------------------
|
||||
|
||||
@@ -709,6 +734,9 @@ func newCounter() abci.Application {
|
||||
}
|
||||
|
||||
func newPersistentKVStore() abci.Application {
|
||||
dir, _ := ioutil.TempDir("/tmp", "persistent-kvstore")
|
||||
dir, err := ioutil.TempDir("", "persistent-kvstore")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return kvstore.NewPersistentKVStoreApplication(dir)
|
||||
}
|
||||
|
@@ -3,6 +3,7 @@ package consensus
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -14,10 +15,6 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
config = ResetConfig("consensus_mempool_test")
|
||||
}
|
||||
|
||||
// for testing
|
||||
func assertMempool(txn txNotifier) sm.Mempool {
|
||||
return txn.(sm.Mempool)
|
||||
@@ -25,6 +22,7 @@ func assertMempool(txn txNotifier) sm.Mempool {
|
||||
|
||||
func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
|
||||
config := ResetConfig("consensus_mempool_txs_available_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
config.Consensus.CreateEmptyBlocks = false
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
|
||||
@@ -43,6 +41,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
|
||||
|
||||
func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
|
||||
config := ResetConfig("consensus_mempool_txs_available_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
config.Consensus.CreateEmptyBlocksInterval = ensureTimeout
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
|
||||
@@ -58,6 +57,7 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
|
||||
|
||||
func TestMempoolProgressInHigherRound(t *testing.T) {
|
||||
config := ResetConfig("consensus_mempool_txs_available_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
config.Consensus.CreateEmptyBlocks = false
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
|
||||
|
@@ -896,7 +896,7 @@ type PeerState struct {
|
||||
peer p2p.Peer
|
||||
logger log.Logger
|
||||
|
||||
mtx sync.Mutex `json:"-"` // NOTE: Modify below using setters, never directly.
|
||||
mtx sync.Mutex // NOTE: Modify below using setters, never directly.
|
||||
PRS cstypes.PeerRoundState `json:"round_state"` // Exposed.
|
||||
Stats *peerStateStats `json:"stats"` // Exposed.
|
||||
}
|
||||
|
@@ -27,10 +27,6 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
config = ResetConfig("consensus_reactor_test")
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// in-process testnets
|
||||
|
||||
@@ -86,7 +82,8 @@ func stopConsensusNet(logger log.Logger, reactors []*ConsensusReactor, eventBuse
|
||||
// Ensure a testnet makes blocks
|
||||
func TestReactorBasic(t *testing.T) {
|
||||
N := 4
|
||||
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
defer cleanup()
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
// wait till everyone makes the first new block
|
||||
@@ -116,6 +113,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
defer os.RemoveAll(thisConfig.RootDir)
|
||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
app := appFunc()
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
@@ -211,16 +209,18 @@ func (m *mockEvidencePool) Update(block *types.Block, state sm.State) {
|
||||
}
|
||||
m.height++
|
||||
}
|
||||
func (m *mockEvidencePool) IsCommitted(types.Evidence) bool { return false }
|
||||
|
||||
//------------------------------------
|
||||
|
||||
// Ensure a testnet makes blocks when there are txs
|
||||
func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
N := 4
|
||||
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter,
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter,
|
||||
func(c *cfg.Config) {
|
||||
c.Consensus.CreateEmptyBlocks = false
|
||||
})
|
||||
defer cleanup()
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
@@ -238,7 +238,8 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
// Test we record stats about votes and block parts from other peers.
|
||||
func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
|
||||
N := 4
|
||||
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
defer cleanup()
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
@@ -262,7 +263,8 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
|
||||
func TestReactorVotingPowerChange(t *testing.T) {
|
||||
nVals := 4
|
||||
logger := log.TestingLogger()
|
||||
css := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentKVStore)
|
||||
css, cleanup := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentKVStore)
|
||||
defer cleanup()
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, nVals)
|
||||
defer stopConsensusNet(logger, reactors, eventBuses)
|
||||
|
||||
@@ -323,8 +325,8 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
nPeers := 7
|
||||
nVals := 4
|
||||
css := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStore)
|
||||
|
||||
css, cleanup := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStore)
|
||||
defer cleanup()
|
||||
logger := log.TestingLogger()
|
||||
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, nPeers)
|
||||
@@ -421,7 +423,8 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
// Check we can make blocks with skip_timeout_commit=false
|
||||
func TestReactorWithTimeoutCommit(t *testing.T) {
|
||||
N := 4
|
||||
css := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter)
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter)
|
||||
defer cleanup()
|
||||
// override default SkipTimeoutCommit == true for tests
|
||||
for i := 0; i < N; i++ {
|
||||
css[i].config.SkipTimeoutCommit = false
|
||||
|
@@ -334,7 +334,7 @@ func (h *Handshaker) ReplayBlocks(
|
||||
|
||||
} else if storeBlockHeight < appBlockHeight {
|
||||
// the app should never be ahead of the store (but this is under app's control)
|
||||
return appHash, sm.ErrAppBlockHeightTooHigh{storeBlockHeight, appBlockHeight}
|
||||
return appHash, sm.ErrAppBlockHeightTooHigh{CoreHeight: storeBlockHeight, AppHeight: appBlockHeight}
|
||||
|
||||
} else if storeBlockHeight < stateBlockHeight {
|
||||
// the state should never be ahead of the store (this is under tendermint's control)
|
||||
|
@@ -103,7 +103,6 @@ func (cs *ConsensusState) ReplayFile(file string, console bool) error {
|
||||
}
|
||||
pb.count++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//------------------------------------------------
|
||||
@@ -295,7 +294,6 @@ func (pb *playback) replayConsoleLoop() int {
|
||||
fmt.Println(pb.count)
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
|
@@ -17,23 +17,31 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
auto "github.com/tendermint/tendermint/libs/autofile"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
)
|
||||
|
||||
var consensusReplayConfig *cfg.Config
|
||||
|
||||
func init() {
|
||||
func TestMain(m *testing.M) {
|
||||
config = ResetConfig("consensus_reactor_test")
|
||||
consensusReplayConfig = ResetConfig("consensus_replay_test")
|
||||
configStateTest := ResetConfig("consensus_state_test")
|
||||
configMempoolTest := ResetConfig("consensus_mempool_test")
|
||||
configByzantineTest := ResetConfig("consensus_byzantine_test")
|
||||
code := m.Run()
|
||||
os.RemoveAll(config.RootDir)
|
||||
os.RemoveAll(consensusReplayConfig.RootDir)
|
||||
os.RemoveAll(configStateTest.RootDir)
|
||||
os.RemoveAll(configMempoolTest.RootDir)
|
||||
os.RemoveAll(configByzantineTest.RootDir)
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
// These tests ensure we can always recover from failure at any part of the consensus process.
|
||||
@@ -51,7 +59,8 @@ func init() {
|
||||
// and which ones we need the wal for - then we'd also be able to only flush the
|
||||
// wal writer when we need to, instead of with every message.
|
||||
|
||||
func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64, blockDB dbm.DB, stateDB dbm.DB) {
|
||||
func startNewConsensusStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config,
|
||||
lastBlockHeight int64, blockDB dbm.DB, stateDB dbm.DB) {
|
||||
logger := log.TestingLogger()
|
||||
state, _ := sm.LoadStateFromDBOrGenesisFile(stateDB, consensusReplayConfig.GenesisFile())
|
||||
privValidator := loadPrivValidator(consensusReplayConfig)
|
||||
@@ -59,7 +68,6 @@ func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64,
|
||||
cs.SetLogger(logger)
|
||||
|
||||
bytes, _ := ioutil.ReadFile(cs.config.WalFile())
|
||||
// fmt.Printf("====== WAL: \n\r%s\n", bytes)
|
||||
t.Logf("====== WAL: \n\r%X\n", bytes)
|
||||
|
||||
err := cs.Start()
|
||||
@@ -110,21 +118,22 @@ func TestWALCrash(t *testing.T) {
|
||||
3},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
for i, tc := range testCases {
|
||||
consensusReplayConfig := ResetConfig(fmt.Sprintf("%s_%d", t.Name(), i))
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
crashWALandCheckLiveness(t, tc.initFn, tc.heightToStop)
|
||||
crashWALandCheckLiveness(t, consensusReplayConfig, tc.initFn, tc.heightToStop)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func crashWALandCheckLiveness(t *testing.T, initFn func(dbm.DB, *ConsensusState, context.Context), heightToStop int64) {
|
||||
func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *cfg.Config,
|
||||
initFn func(dbm.DB, *ConsensusState, context.Context), heightToStop int64) {
|
||||
walPaniced := make(chan error)
|
||||
crashingWal := &crashingWAL{panicCh: walPaniced, heightToStop: heightToStop}
|
||||
|
||||
i := 1
|
||||
LOOP:
|
||||
for {
|
||||
// fmt.Printf("====== LOOP %d\n", i)
|
||||
t.Logf("====== LOOP %d\n", i)
|
||||
|
||||
// create consensus state from a clean slate
|
||||
@@ -163,7 +172,7 @@ LOOP:
|
||||
t.Logf("WAL paniced: %v", err)
|
||||
|
||||
// make sure we can make blocks after a crash
|
||||
startNewConsensusStateAndWaitForBlock(t, cs.Height, blockDB, stateDB)
|
||||
startNewConsensusStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateDB)
|
||||
|
||||
// stop consensus state and transactions sender (initFn)
|
||||
cs.Stop()
|
||||
@@ -269,29 +278,37 @@ var modes = []uint{0, 1, 2}
|
||||
|
||||
// Sync from scratch
|
||||
func TestHandshakeReplayAll(t *testing.T) {
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, 0, m)
|
||||
for i, m := range modes {
|
||||
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
testHandshakeReplay(t, config, 0, m)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync many, not from scratch
|
||||
func TestHandshakeReplaySome(t *testing.T) {
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, 1, m)
|
||||
for i, m := range modes {
|
||||
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
testHandshakeReplay(t, config, 1, m)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync from lagging by one
|
||||
func TestHandshakeReplayOne(t *testing.T) {
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, NUM_BLOCKS-1, m)
|
||||
for i, m := range modes {
|
||||
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
testHandshakeReplay(t, config, NUM_BLOCKS-1, m)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync from caught up
|
||||
func TestHandshakeReplayNone(t *testing.T) {
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, NUM_BLOCKS, m)
|
||||
for i, m := range modes {
|
||||
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
testHandshakeReplay(t, config, NUM_BLOCKS, m)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -311,10 +328,8 @@ func tempWALWithData(data []byte) string {
|
||||
}
|
||||
|
||||
// Make some blocks. Start a fresh app and apply nBlocks blocks. Then restart the app and sync it up with the remaining blocks
|
||||
func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
|
||||
config := ResetConfig("proxy_test_")
|
||||
|
||||
walBody, err := WALWithNBlocks(NUM_BLOCKS)
|
||||
func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint) {
|
||||
walBody, err := WALWithNBlocks(t, NUM_BLOCKS)
|
||||
require.NoError(t, err)
|
||||
walFile := tempWALWithData(walBody)
|
||||
config.Consensus.SetWalFile(walFile)
|
||||
@@ -537,10 +552,8 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
|
||||
}
|
||||
case *types.Vote:
|
||||
if p.Type == types.PrecommitType {
|
||||
thisBlockCommit = &types.Commit{
|
||||
BlockID: p.BlockID,
|
||||
Precommits: []*types.CommitSig{p.CommitSig()},
|
||||
}
|
||||
commitSigs := []*types.CommitSig{p.CommitSig()}
|
||||
thisBlockCommit = types.NewCommit(p.BlockID, commitSigs)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -633,6 +646,7 @@ func TestInitChainUpdateValidators(t *testing.T) {
|
||||
clientCreator := proxy.NewLocalClientCreator(app)
|
||||
|
||||
config := ResetConfig("proxy_test_")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
|
||||
stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), 0x0)
|
||||
|
||||
|
@@ -566,6 +566,7 @@ func (cs *ConsensusState) updateToState(state sm.State) {
|
||||
cs.CommitRound = -1
|
||||
cs.LastCommit = lastPrecommits
|
||||
cs.LastValidators = state.LastValidators
|
||||
cs.TriggeredTimeoutPrecommit = false
|
||||
|
||||
cs.state = state
|
||||
|
||||
@@ -909,7 +910,7 @@ func (cs *ConsensusState) defaultDecideProposal(height int64, round int) {
|
||||
}
|
||||
|
||||
// Make proposal
|
||||
propBlockId := types.BlockID{block.Hash(), blockParts.Header()}
|
||||
propBlockId := types.BlockID{Hash: block.Hash(), PartsHeader: blockParts.Header()}
|
||||
proposal := types.NewProposal(height, round, cs.ValidRound, propBlockId)
|
||||
if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal); err == nil {
|
||||
|
||||
@@ -954,7 +955,7 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts
|
||||
if cs.Height == 1 {
|
||||
// We're creating a proposal for the first block.
|
||||
// The commit is empty, but not nil.
|
||||
commit = &types.Commit{}
|
||||
commit = types.NewCommit(types.BlockID{}, nil)
|
||||
} else if cs.LastCommit.HasTwoThirdsMajority() {
|
||||
// Make the commit from LastCommit
|
||||
commit = cs.LastCommit.MakeCommit()
|
||||
@@ -1320,7 +1321,7 @@ func (cs *ConsensusState) finalizeCommit(height int64) {
|
||||
// Execute and commit the block, update and save the state, and update the mempool.
|
||||
// NOTE The block.AppHash wont reflect these txs until the next block.
|
||||
var err error
|
||||
stateCopy, err = cs.blockExec.ApplyBlock(stateCopy, types.BlockID{block.Hash(), blockParts.Header()}, block)
|
||||
stateCopy, err = cs.blockExec.ApplyBlock(stateCopy, types.BlockID{Hash: block.Hash(), PartsHeader: blockParts.Header()}, block)
|
||||
if err != nil {
|
||||
cs.Logger.Error("Error on ApplyBlock. Did the application crash? Please restart tendermint", "err", err)
|
||||
err := cmn.Kill()
|
||||
@@ -1543,7 +1544,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
|
||||
}
|
||||
|
||||
cs.Logger.Info(fmt.Sprintf("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
|
||||
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
|
||||
cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote})
|
||||
cs.evsw.FireEvent(types.EventVote, vote)
|
||||
|
||||
// if we can skip timeoutCommit and have all the votes now,
|
||||
@@ -1571,7 +1572,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
|
||||
return
|
||||
}
|
||||
|
||||
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
|
||||
cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote})
|
||||
cs.evsw.FireEvent(types.EventVote, vote)
|
||||
|
||||
switch vote.Type {
|
||||
@@ -1683,7 +1684,7 @@ func (cs *ConsensusState) signVote(type_ types.SignedMsgType, hash []byte, heade
|
||||
Round: cs.Round,
|
||||
Timestamp: cs.voteTime(),
|
||||
Type: type_,
|
||||
BlockID: types.BlockID{hash, header},
|
||||
BlockID: types.BlockID{Hash: hash, PartsHeader: header},
|
||||
}
|
||||
err := cs.privValidator.SignVote(cs.state.ChainID, vote)
|
||||
return vote, err
|
||||
|
@@ -14,14 +14,10 @@ import (
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
p2pdummy "github.com/tendermint/tendermint/p2p/dummy"
|
||||
p2pmock "github.com/tendermint/tendermint/p2p/mock"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
config = ResetConfig("consensus_state_test")
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
ProposeSuite
|
||||
@@ -1288,8 +1284,8 @@ func (n *fakeTxNotifier) Notify() {
|
||||
}
|
||||
|
||||
func TestStartNextHeightCorrectly(t *testing.T) {
|
||||
config.Consensus.SkipTimeoutCommit = false
|
||||
cs1, vss := randConsensusState(4)
|
||||
cs1.config.SkipTimeoutCommit = false
|
||||
cs1.txNotifier = &fakeTxNotifier{ch: make(chan struct{})}
|
||||
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
@@ -1326,13 +1322,14 @@ func TestStartNextHeightCorrectly(t *testing.T) {
|
||||
// add precommits
|
||||
signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2)
|
||||
signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3)
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs4)
|
||||
|
||||
ensureNewBlockHeader(newBlockHeader, height, theBlockHash)
|
||||
|
||||
rs = cs1.GetRoundState()
|
||||
assert.True(t, rs.TriggeredTimeoutPrecommit)
|
||||
|
||||
ensureNewBlockHeader(newBlockHeader, height, theBlockHash)
|
||||
|
||||
cs1.txNotifier.(*fakeTxNotifier).Notify()
|
||||
|
||||
ensureNewTimeout(timeoutProposeCh, height+1, round, cs1.config.TimeoutPropose.Nanoseconds())
|
||||
@@ -1340,6 +1337,64 @@ func TestStartNextHeightCorrectly(t *testing.T) {
|
||||
assert.False(t, rs.TriggeredTimeoutPrecommit, "triggeredTimeoutPrecommit should be false at the beginning of each round")
|
||||
}
|
||||
|
||||
func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) {
|
||||
config.Consensus.SkipTimeoutCommit = false
|
||||
cs1, vss := randConsensusState(4)
|
||||
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
height, round := cs1.Height, cs1.Round
|
||||
|
||||
partSize := types.BlockPartSizeBytes
|
||||
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
newBlockHeader := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader)
|
||||
addr := cs1.privValidator.GetPubKey().Address()
|
||||
voteCh := subscribeToVoter(cs1, addr)
|
||||
|
||||
// start round and wait for propose and prevote
|
||||
startTestRound(cs1, height, round)
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
|
||||
ensureNewProposal(proposalCh, height, round)
|
||||
rs := cs1.GetRoundState()
|
||||
theBlockHash := rs.ProposalBlock.Hash()
|
||||
theBlockParts := rs.ProposalBlockParts.Header()
|
||||
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], theBlockHash)
|
||||
|
||||
signAddVotes(cs1, types.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4)
|
||||
|
||||
ensurePrecommit(voteCh, height, round)
|
||||
validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash)
|
||||
|
||||
rs = cs1.GetRoundState()
|
||||
|
||||
// add precommits
|
||||
signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2)
|
||||
signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3)
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs4)
|
||||
|
||||
rs = cs1.GetRoundState()
|
||||
assert.True(t, rs.TriggeredTimeoutPrecommit)
|
||||
|
||||
ensureNewBlockHeader(newBlockHeader, height, theBlockHash)
|
||||
|
||||
prop, propBlock := decideProposal(cs1, vs2, height+1, 0)
|
||||
propBlockParts := propBlock.MakePartSet(partSize)
|
||||
|
||||
if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ensureNewProposal(proposalCh, height+1, 0)
|
||||
|
||||
rs = cs1.GetRoundState()
|
||||
assert.False(t, rs.TriggeredTimeoutPrecommit, "triggeredTimeoutPrecommit should be false at the beginning of each height")
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------------------
|
||||
// SlashingSuite
|
||||
// TODO: Slashing
|
||||
@@ -1493,7 +1548,7 @@ func TestStateHalt1(t *testing.T) {
|
||||
func TestStateOutputsBlockPartsStats(t *testing.T) {
|
||||
// create dummy peer
|
||||
cs, _ := randConsensusState(1)
|
||||
peer := p2pdummy.NewPeer()
|
||||
peer := p2pmock.NewPeer(nil)
|
||||
|
||||
// 1) new block part
|
||||
parts := types.NewPartSetFromData(cmn.RandBytes(100), 10)
|
||||
@@ -1536,7 +1591,7 @@ func TestStateOutputsBlockPartsStats(t *testing.T) {
|
||||
func TestStateOutputVoteStats(t *testing.T) {
|
||||
cs, vss := randConsensusState(2)
|
||||
// create dummy peer
|
||||
peer := p2pdummy.NewPeer()
|
||||
peer := p2pmock.NewPeer(nil)
|
||||
|
||||
vote := signVote(vss[1], types.PrecommitType, []byte("test"), types.PartSetHeader{})
|
||||
|
||||
|
@@ -2,6 +2,7 @@ package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
@@ -11,8 +12,11 @@ import (
|
||||
|
||||
var config *cfg.Config // NOTE: must be reset for each _test.go file
|
||||
|
||||
func init() {
|
||||
func TestMain(m *testing.M) {
|
||||
config = cfg.ResetTestRoot("consensus_height_vote_set_test")
|
||||
code := m.Run()
|
||||
os.RemoveAll(config.RootDir)
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func TestPeerCatchupRounds(t *testing.T) {
|
||||
@@ -64,7 +68,6 @@ func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivVali
|
||||
err := privVal.SignVote(chainID, vote)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error signing vote: %v", err))
|
||||
return nil
|
||||
}
|
||||
return vote
|
||||
}
|
||||
|
@@ -53,11 +53,8 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) {
|
||||
Data: types.Data{
|
||||
Txs: txs,
|
||||
},
|
||||
Evidence: types.EvidenceData{},
|
||||
LastCommit: &types.Commit{
|
||||
BlockID: blockID,
|
||||
Precommits: precommits,
|
||||
},
|
||||
Evidence: types.EvidenceData{},
|
||||
LastCommit: types.NewCommit(blockID, precommits),
|
||||
}
|
||||
parts := block.MakePartSet(4096)
|
||||
// Random Proposal
|
||||
|
@@ -116,6 +116,7 @@ func (wal *baseWAL) OnStart() error {
|
||||
// Use Wait() to ensure it's finished shutting down
|
||||
// before cleaning up files.
|
||||
func (wal *baseWAL) OnStop() {
|
||||
wal.group.Flush()
|
||||
wal.group.Stop()
|
||||
wal.group.Close()
|
||||
}
|
||||
@@ -228,12 +229,17 @@ func NewWALEncoder(wr io.Writer) *WALEncoder {
|
||||
return &WALEncoder{wr}
|
||||
}
|
||||
|
||||
// Encode writes the custom encoding of v to the stream.
|
||||
// Encode writes the custom encoding of v to the stream. It returns an error if
|
||||
// the amino-encoded size of v is greater than 1MB. Any error encountered
|
||||
// during the write is also returned.
|
||||
func (enc *WALEncoder) Encode(v *TimedWALMessage) error {
|
||||
data := cdc.MustMarshalBinaryBare(v)
|
||||
|
||||
crc := crc32.Checksum(data, crc32c)
|
||||
length := uint32(len(data))
|
||||
if length > maxMsgSizeBytes {
|
||||
return fmt.Errorf("Msg is too big: %d bytes, max: %d bytes", length, maxMsgSizeBytes)
|
||||
}
|
||||
totalLength := 8 + int(length)
|
||||
|
||||
msg := make([]byte, totalLength)
|
||||
@@ -306,15 +312,15 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) {
|
||||
}
|
||||
|
||||
data := make([]byte, length)
|
||||
_, err = dec.rd.Read(data)
|
||||
n, err := dec.rd.Read(data)
|
||||
if err != nil {
|
||||
return nil, DataCorruptionError{fmt.Errorf("failed to read data: %v", err)}
|
||||
return nil, DataCorruptionError{fmt.Errorf("failed to read data: %v (read: %d, wanted: %d)", err, n, length)}
|
||||
}
|
||||
|
||||
// check checksum before decoding data
|
||||
actualCRC := crc32.Checksum(data, crc32c)
|
||||
if actualCRC != crc {
|
||||
return nil, DataCorruptionError{fmt.Errorf("checksums do not match: (read: %v, actual: %v)", crc, actualCRC)}
|
||||
return nil, DataCorruptionError{fmt.Errorf("checksums do not match: read: %v, actual: %v", crc, actualCRC)}
|
||||
}
|
||||
|
||||
var res = new(TimedWALMessage) // nolint: gosimple
|
||||
|
@@ -7,7 +7,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -28,8 +28,9 @@ import (
|
||||
// stripped down version of node (proxy app, event bus, consensus state) with a
|
||||
// persistent kvstore application and special consensus wal instance
|
||||
// (byteBufferWAL) and waits until numBlocks are created. If the node fails to produce given numBlocks, it returns an error.
|
||||
func WALGenerateNBlocks(wr io.Writer, numBlocks int) (err error) {
|
||||
config := getConfig()
|
||||
func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
|
||||
config := getConfig(t)
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
|
||||
app := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), "wal_generator"))
|
||||
|
||||
@@ -102,11 +103,11 @@ func WALGenerateNBlocks(wr io.Writer, numBlocks int) (err error) {
|
||||
}
|
||||
|
||||
//WALWithNBlocks returns a WAL content with numBlocks.
|
||||
func WALWithNBlocks(numBlocks int) (data []byte, err error) {
|
||||
func WALWithNBlocks(t *testing.T, numBlocks int) (data []byte, err error) {
|
||||
var b bytes.Buffer
|
||||
wr := bufio.NewWriter(&b)
|
||||
|
||||
if err := WALGenerateNBlocks(wr, numBlocks); err != nil {
|
||||
if err := WALGenerateNBlocks(t, wr, numBlocks); err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
@@ -114,18 +115,6 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// f**ing long, but unique for each test
|
||||
func makePathname() string {
|
||||
// get path
|
||||
p, err := os.Getwd()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// fmt.Println(p)
|
||||
sep := string(filepath.Separator)
|
||||
return strings.Replace(p, sep, "_", -1)
|
||||
}
|
||||
|
||||
func randPort() int {
|
||||
// returns between base and base + spread
|
||||
base, spread := 20000, 20000
|
||||
@@ -140,9 +129,8 @@ func makeAddrs() (string, string, string) {
|
||||
}
|
||||
|
||||
// getConfig returns a config for test cases
|
||||
func getConfig() *cfg.Config {
|
||||
pathname := makePathname()
|
||||
c := cfg.ResetTestRoot(fmt.Sprintf("%s_%d", pathname, cmn.RandInt()))
|
||||
func getConfig(t *testing.T) *cfg.Config {
|
||||
c := cfg.ResetTestRoot(t.Name())
|
||||
|
||||
// and we use random ports to run in parallel
|
||||
tm, rpc, grpc := makeAddrs()
|
||||
|
@@ -48,7 +48,7 @@ func TestWALTruncate(t *testing.T) {
|
||||
|
||||
//60 block's size nearly 70K, greater than group's headBuf size(4096 * 10), when headBuf is full, truncate content will Flush to the file.
|
||||
//at this time, RotateFile is called, truncate content exist in each file.
|
||||
err = WALGenerateNBlocks(wal.Group(), 60)
|
||||
err = WALGenerateNBlocks(t, wal.Group(), 60)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(1 * time.Millisecond) //wait groupCheckDuration, make sure RotateFile run
|
||||
@@ -95,8 +95,28 @@ func TestWALEncoderDecoder(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestWALWritePanicsIfMsgIsTooBig(t *testing.T) {
|
||||
walDir, err := ioutil.TempDir("", "wal")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(walDir)
|
||||
walFile := filepath.Join(walDir, "wal")
|
||||
|
||||
wal, err := NewWAL(walFile)
|
||||
require.NoError(t, err)
|
||||
err = wal.Start()
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
wal.Stop()
|
||||
// wait for the wal to finish shutting down so we
|
||||
// can safely remove the directory
|
||||
wal.Wait()
|
||||
}()
|
||||
|
||||
assert.Panics(t, func() { wal.Write(make([]byte, maxMsgSizeBytes+1)) })
|
||||
}
|
||||
|
||||
func TestWALSearchForEndHeight(t *testing.T) {
|
||||
walBody, err := WALWithNBlocks(6)
|
||||
walBody, err := WALWithNBlocks(t, 6)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@@ -47,6 +47,8 @@ func checkAminoJSON(t *testing.T, src interface{}, dst interface{}, isNil bool)
|
||||
require.Nil(t, err, "%+v", err)
|
||||
}
|
||||
|
||||
// ExamplePrintRegisteredTypes refers to unknown identifier: PrintRegisteredTypes
|
||||
//nolint:govet
|
||||
func ExamplePrintRegisteredTypes() {
|
||||
cdc.PrintTypes(os.Stdout)
|
||||
// Output: | Type | Name | Prefix | Length | Notes |
|
||||
|
@@ -1,6 +1,8 @@
|
||||
package merkle
|
||||
|
||||
import (
|
||||
// it is ok to use math/rand here: we do not need a cryptographically secure random
|
||||
// number generator here and we can run the tests a bit faster
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
@@ -24,7 +26,7 @@ func TestKeyPath(t *testing.T) {
|
||||
keys[i][j] = alphanum[rand.Intn(len(alphanum))]
|
||||
}
|
||||
case KeyEncodingHex:
|
||||
rand.Read(keys[i])
|
||||
rand.Read(keys[i]) //nolint: gosec
|
||||
default:
|
||||
panic("Unexpected encoding")
|
||||
}
|
||||
|
@@ -6,6 +6,7 @@ import (
|
||||
"crypto/subtle"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
|
||||
"golang.org/x/crypto/ripemd160"
|
||||
|
||||
@@ -65,32 +66,61 @@ func (privKey PrivKeySecp256k1) Equals(other crypto.PrivKey) bool {
|
||||
}
|
||||
|
||||
// GenPrivKey generates a new ECDSA private key on curve secp256k1 private key.
|
||||
// It uses OS randomness in conjunction with the current global random seed
|
||||
// in tendermint/libs/common to generate the private key.
|
||||
// It uses OS randomness to generate the private key.
|
||||
func GenPrivKey() PrivKeySecp256k1 {
|
||||
return genPrivKey(crypto.CReader())
|
||||
}
|
||||
|
||||
// genPrivKey generates a new secp256k1 private key using the provided reader.
|
||||
func genPrivKey(rand io.Reader) PrivKeySecp256k1 {
|
||||
privKeyBytes := [32]byte{}
|
||||
_, err := io.ReadFull(rand, privKeyBytes[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
var privKeyBytes [32]byte
|
||||
d := new(big.Int)
|
||||
for {
|
||||
privKeyBytes = [32]byte{}
|
||||
_, err := io.ReadFull(rand, privKeyBytes[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
d.SetBytes(privKeyBytes[:])
|
||||
// break if we found a valid point (i.e. > 0 and < N == curverOrder)
|
||||
isValidFieldElement := 0 < d.Sign() && d.Cmp(secp256k1.S256().N) < 0
|
||||
if isValidFieldElement {
|
||||
break
|
||||
}
|
||||
}
|
||||
// crypto.CRandBytes is guaranteed to be 32 bytes long, so it can be
|
||||
// casted to PrivKeySecp256k1.
|
||||
|
||||
return PrivKeySecp256k1(privKeyBytes)
|
||||
}
|
||||
|
||||
var one = new(big.Int).SetInt64(1)
|
||||
|
||||
// GenPrivKeySecp256k1 hashes the secret with SHA2, and uses
|
||||
// that 32 byte output to create the private key.
|
||||
//
|
||||
// It makes sure the private key is a valid field element by setting:
|
||||
//
|
||||
// c = sha256(secret)
|
||||
// k = (c mod (n − 1)) + 1, where n = curve order.
|
||||
//
|
||||
// NOTE: secret should be the output of a KDF like bcrypt,
|
||||
// if it's derived from user input.
|
||||
func GenPrivKeySecp256k1(secret []byte) PrivKeySecp256k1 {
|
||||
privKey32 := sha256.Sum256(secret)
|
||||
// sha256.Sum256() is guaranteed to be 32 bytes long, so it can be
|
||||
// casted to PrivKeySecp256k1.
|
||||
secHash := sha256.Sum256(secret)
|
||||
// to guarantee that we have a valid field element, we use the approach of:
|
||||
// "Suite B Implementer’s Guide to FIPS 186-3", A.2.1
|
||||
// https://apps.nsa.gov/iaarchive/library/ia-guidance/ia-solutions-for-classified/algorithm-guidance/suite-b-implementers-guide-to-fips-186-3-ecdsa.cfm
|
||||
// see also https://github.com/golang/go/blob/0380c9ad38843d523d9c9804fe300cb7edd7cd3c/src/crypto/ecdsa/ecdsa.go#L89-L101
|
||||
fe := new(big.Int).SetBytes(secHash[:])
|
||||
n := new(big.Int).Sub(secp256k1.S256().N, one)
|
||||
fe.Mod(fe, n)
|
||||
fe.Add(fe, one)
|
||||
|
||||
feB := fe.Bytes()
|
||||
var privKey32 [32]byte
|
||||
// copy feB over to fixed 32 byte privKey32 and pad (if necessary)
|
||||
copy(privKey32[32-len(feB):32], feB)
|
||||
|
||||
return PrivKeySecp256k1(privKey32)
|
||||
}
|
||||
|
||||
|
39
crypto/secp256k1/secp256k1_cgo_test.go
Normal file
39
crypto/secp256k1/secp256k1_cgo_test.go
Normal file
@@ -0,0 +1,39 @@
|
||||
// +build libsecp256k1
|
||||
|
||||
package secp256k1
|
||||
|
||||
import (
|
||||
"github.com/magiconair/properties/assert"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPrivKeySecp256k1SignVerify(t *testing.T) {
|
||||
msg := []byte("A.1.2 ECC Key Pair Generation by Testing Candidates")
|
||||
priv := GenPrivKey()
|
||||
tests := []struct {
|
||||
name string
|
||||
privKey PrivKeySecp256k1
|
||||
wantSignErr bool
|
||||
wantVerifyPasses bool
|
||||
}{
|
||||
{name: "valid sign-verify round", privKey: priv, wantSignErr: false, wantVerifyPasses: true},
|
||||
{name: "invalid private key", privKey: [32]byte{}, wantSignErr: true, wantVerifyPasses: false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := tt.privKey.Sign(msg)
|
||||
if tt.wantSignErr {
|
||||
require.Error(t, err)
|
||||
t.Logf("Got error: %s", err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, got)
|
||||
|
||||
pub := tt.privKey.PubKey()
|
||||
assert.Equal(t, tt.wantVerifyPasses, pub.VerifyBytes(msg, got))
|
||||
})
|
||||
}
|
||||
}
|
45
crypto/secp256k1/secp256k1_internal_test.go
Normal file
45
crypto/secp256k1/secp256k1_internal_test.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package secp256k1
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
underlyingSecp256k1 "github.com/btcsuite/btcd/btcec"
|
||||
)
|
||||
|
||||
func Test_genPrivKey(t *testing.T) {
|
||||
|
||||
empty := make([]byte, 32)
|
||||
oneB := big.NewInt(1).Bytes()
|
||||
onePadded := make([]byte, 32)
|
||||
copy(onePadded[32-len(oneB):32], oneB)
|
||||
t.Logf("one padded: %v, len=%v", onePadded, len(onePadded))
|
||||
|
||||
validOne := append(empty, onePadded...)
|
||||
tests := []struct {
|
||||
name string
|
||||
notSoRand []byte
|
||||
shouldPanic bool
|
||||
}{
|
||||
{"empty bytes (panics because 1st 32 bytes are zero and 0 is not a valid field element)", empty, true},
|
||||
{"curve order: N", underlyingSecp256k1.S256().N.Bytes(), true},
|
||||
{"valid because 0 < 1 < N", validOne, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.shouldPanic {
|
||||
require.Panics(t, func() {
|
||||
genPrivKey(bytes.NewReader(tt.notSoRand))
|
||||
})
|
||||
return
|
||||
}
|
||||
got := genPrivKey(bytes.NewReader(tt.notSoRand))
|
||||
fe := new(big.Int).SetBytes(got[:])
|
||||
require.True(t, fe.Cmp(underlyingSecp256k1.S256().N) < 0)
|
||||
require.True(t, fe.Sign() > 0)
|
||||
})
|
||||
}
|
||||
}
|
@@ -14,8 +14,7 @@ import (
|
||||
// see:
|
||||
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
|
||||
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39
|
||||
var secp256k1N, _ = new(big.Int).SetString("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", 16)
|
||||
var secp256k1halfN = new(big.Int).Div(secp256k1N, big.NewInt(2))
|
||||
var secp256k1halfN = new(big.Int).Rsh(secp256k1.S256().N, 1)
|
||||
|
||||
// Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg.
|
||||
// The returned signature will be of the form R || S (in lower-S form).
|
||||
@@ -53,8 +52,8 @@ func (pubKey PubKeySecp256k1) VerifyBytes(msg []byte, sigStr []byte) bool {
|
||||
// that len(sigStr) == 64.
|
||||
func signatureFromBytes(sigStr []byte) *secp256k1.Signature {
|
||||
return &secp256k1.Signature{
|
||||
new(big.Int).SetBytes(sigStr[:32]),
|
||||
new(big.Int).SetBytes(sigStr[32:64]),
|
||||
R: new(big.Int).SetBytes(sigStr[:32]),
|
||||
S: new(big.Int).SetBytes(sigStr[32:64]),
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -6,7 +6,6 @@ import (
|
||||
"testing"
|
||||
|
||||
secp256k1 "github.com/btcsuite/btcd/btcec"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@@ -2,6 +2,7 @@ package secp256k1_test
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcutil/base58"
|
||||
@@ -84,3 +85,28 @@ func TestSecp256k1LoadPrivkeyAndSerializeIsIdentity(t *testing.T) {
|
||||
require.Equal(t, privKeyBytes[:], serializedBytes)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenPrivKeySecp256k1(t *testing.T) {
|
||||
// curve oder N
|
||||
N := underlyingSecp256k1.S256().N
|
||||
tests := []struct {
|
||||
name string
|
||||
secret []byte
|
||||
}{
|
||||
{"empty secret", []byte{}},
|
||||
{"some long secret", []byte("We live in a society exquisitely dependent on science and technology, in which hardly anyone knows anything about science and technology.")},
|
||||
{"another seed used in cosmos tests #1", []byte{0}},
|
||||
{"another seed used in cosmos tests #2", []byte("mySecret")},
|
||||
{"another seed used in cosmos tests #3", []byte("")},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotPrivKey := secp256k1.GenPrivKeySecp256k1(tt.secret)
|
||||
require.NotNil(t, gotPrivKey)
|
||||
// interpret as a big.Int and make sure it is a valid field element:
|
||||
fe := new(big.Int).SetBytes(gotPrivKey[:])
|
||||
require.True(t, fe.Cmp(N) < 0)
|
||||
require.True(t, fe.Sign() > 0)
|
||||
})
|
||||
}
|
||||
}
|
@@ -171,6 +171,10 @@ Note that the maximum total power of the validator set is bounded by
|
||||
they do not make changes to the validator set that cause it to exceed this
|
||||
limit.
|
||||
|
||||
Additionally, applications must ensure that a single set of updates does not contain any duplicates -
|
||||
a given public key can only appear in an update once. If an update includes
|
||||
duplicates, the block execution will fail irrecoverably.
|
||||
|
||||
### InitChain
|
||||
|
||||
ResponseInitChain can return a list of validators.
|
||||
|
@@ -28,7 +28,8 @@ type EvidencePool struct {
|
||||
state sm.State
|
||||
}
|
||||
|
||||
func NewEvidencePool(stateDB dbm.DB, evidenceStore *EvidenceStore) *EvidencePool {
|
||||
func NewEvidencePool(stateDB, evidenceDB dbm.DB) *EvidencePool {
|
||||
evidenceStore := NewEvidenceStore(evidenceDB)
|
||||
evpool := &EvidencePool{
|
||||
stateDB: stateDB,
|
||||
state: sm.LoadState(stateDB),
|
||||
@@ -132,6 +133,12 @@ func (evpool *EvidencePool) MarkEvidenceAsCommitted(height int64, evidence []typ
|
||||
|
||||
}
|
||||
|
||||
// IsCommitted returns true if we have already seen this exact evidence and it is already marked as committed.
|
||||
func (evpool *EvidencePool) IsCommitted(evidence types.Evidence) bool {
|
||||
ei := evpool.evidenceStore.getEvidenceInfo(evidence)
|
||||
return ei.Evidence != nil && ei.Committed
|
||||
}
|
||||
|
||||
func (evpool *EvidencePool) removeEvidence(height, maxAge int64, blockEvidenceMap map[string]struct{}) {
|
||||
for e := evpool.evidenceList.Front(); e != nil; e = e.Next() {
|
||||
ev := e.Value.(types.Evidence)
|
||||
|
@@ -56,8 +56,8 @@ func TestEvidencePool(t *testing.T) {
|
||||
valAddr := []byte("val1")
|
||||
height := int64(5)
|
||||
stateDB := initializeValidatorState(valAddr, height)
|
||||
store := NewEvidenceStore(dbm.NewMemDB())
|
||||
pool := NewEvidencePool(stateDB, store)
|
||||
evidenceDB := dbm.NewMemDB()
|
||||
pool := NewEvidencePool(stateDB, evidenceDB)
|
||||
|
||||
goodEvidence := types.NewMockGoodEvidence(height, 0, valAddr)
|
||||
badEvidence := types.MockBadEvidence{goodEvidence}
|
||||
@@ -84,3 +84,24 @@ func TestEvidencePool(t *testing.T) {
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, pool.evidenceList.Len())
|
||||
}
|
||||
|
||||
func TestEvidencePoolIsCommitted(t *testing.T) {
|
||||
// Initialization:
|
||||
valAddr := []byte("validator_address")
|
||||
height := int64(42)
|
||||
stateDB := initializeValidatorState(valAddr, height)
|
||||
evidenceDB := dbm.NewMemDB()
|
||||
pool := NewEvidencePool(stateDB, evidenceDB)
|
||||
|
||||
// evidence not seen yet:
|
||||
evidence := types.NewMockGoodEvidence(height, 0, valAddr)
|
||||
assert.False(t, pool.IsCommitted(evidence))
|
||||
|
||||
// evidence seen but not yet committed:
|
||||
assert.NoError(t, pool.AddEvidence(evidence))
|
||||
assert.False(t, pool.IsCommitted(evidence))
|
||||
|
||||
// evidence seen and committed:
|
||||
pool.MarkEvidenceAsCommitted(height, []types.Evidence{evidence})
|
||||
assert.True(t, pool.IsCommitted(evidence))
|
||||
}
|
||||
|
@@ -37,8 +37,8 @@ func makeAndConnectEvidenceReactors(config *cfg.Config, stateDBs []dbm.DB) []*Ev
|
||||
logger := evidenceLogger()
|
||||
for i := 0; i < N; i++ {
|
||||
|
||||
store := NewEvidenceStore(dbm.NewMemDB())
|
||||
pool := NewEvidencePool(stateDBs[i], store)
|
||||
evidenceDB := dbm.NewMemDB()
|
||||
pool := NewEvidencePool(stateDBs[i], evidenceDB)
|
||||
reactors[i] = NewEvidenceReactor(pool)
|
||||
reactors[i].SetLogger(logger.With("validator", i))
|
||||
}
|
||||
|
@@ -117,32 +117,33 @@ func (store *EvidenceStore) listEvidence(prefixKey string, maxNum int64) (eviden
|
||||
return evidence
|
||||
}
|
||||
|
||||
// GetEvidence fetches the evidence with the given height and hash.
|
||||
func (store *EvidenceStore) GetEvidence(height int64, hash []byte) *EvidenceInfo {
|
||||
// GetEvidenceInfo fetches the EvidenceInfo with the given height and hash.
|
||||
// If not found, ei.Evidence is nil.
|
||||
func (store *EvidenceStore) GetEvidenceInfo(height int64, hash []byte) EvidenceInfo {
|
||||
key := keyLookupFromHeightAndHash(height, hash)
|
||||
val := store.db.Get(key)
|
||||
|
||||
if len(val) == 0 {
|
||||
return nil
|
||||
return EvidenceInfo{}
|
||||
}
|
||||
var ei EvidenceInfo
|
||||
err := cdc.UnmarshalBinaryBare(val, &ei)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &ei
|
||||
return ei
|
||||
}
|
||||
|
||||
// AddNewEvidence adds the given evidence to the database.
|
||||
// It returns false if the evidence is already stored.
|
||||
func (store *EvidenceStore) AddNewEvidence(evidence types.Evidence, priority int64) bool {
|
||||
// check if we already have seen it
|
||||
ei_ := store.GetEvidence(evidence.Height(), evidence.Hash())
|
||||
if ei_ != nil && ei_.Evidence != nil {
|
||||
ei := store.getEvidenceInfo(evidence)
|
||||
if ei.Evidence != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
ei := EvidenceInfo{
|
||||
ei = EvidenceInfo{
|
||||
Committed: false,
|
||||
Priority: priority,
|
||||
Evidence: evidence,
|
||||
@@ -165,6 +166,11 @@ func (store *EvidenceStore) AddNewEvidence(evidence types.Evidence, priority int
|
||||
// MarkEvidenceAsBroadcasted removes evidence from Outqueue.
|
||||
func (store *EvidenceStore) MarkEvidenceAsBroadcasted(evidence types.Evidence) {
|
||||
ei := store.getEvidenceInfo(evidence)
|
||||
if ei.Evidence == nil {
|
||||
// nothing to do; we did not store the evidence yet (AddNewEvidence):
|
||||
return
|
||||
}
|
||||
// remove from the outqueue
|
||||
key := keyOutqueue(evidence, ei.Priority)
|
||||
store.db.Delete(key)
|
||||
}
|
||||
@@ -177,8 +183,12 @@ func (store *EvidenceStore) MarkEvidenceAsCommitted(evidence types.Evidence) {
|
||||
pendingKey := keyPending(evidence)
|
||||
store.db.Delete(pendingKey)
|
||||
|
||||
ei := store.getEvidenceInfo(evidence)
|
||||
ei.Committed = true
|
||||
// committed EvidenceInfo doens't need priority
|
||||
ei := EvidenceInfo{
|
||||
Committed: true,
|
||||
Evidence: evidence,
|
||||
Priority: 0,
|
||||
}
|
||||
|
||||
lookupKey := keyLookup(evidence)
|
||||
store.db.SetSync(lookupKey, cdc.MustMarshalBinaryBare(ei))
|
||||
@@ -187,13 +197,7 @@ func (store *EvidenceStore) MarkEvidenceAsCommitted(evidence types.Evidence) {
|
||||
//---------------------------------------------------
|
||||
// utils
|
||||
|
||||
// getEvidenceInfo is convenience for calling GetEvidenceInfo if we have the full evidence.
|
||||
func (store *EvidenceStore) getEvidenceInfo(evidence types.Evidence) EvidenceInfo {
|
||||
key := keyLookup(evidence)
|
||||
var ei EvidenceInfo
|
||||
b := store.db.Get(key)
|
||||
err := cdc.UnmarshalBinaryBare(b, &ei)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ei
|
||||
return store.GetEvidenceInfo(evidence.Height(), evidence.Hash())
|
||||
}
|
||||
|
@@ -27,6 +27,21 @@ func TestStoreAddDuplicate(t *testing.T) {
|
||||
assert.False(added)
|
||||
}
|
||||
|
||||
func TestStoreCommitDuplicate(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
db := dbm.NewMemDB()
|
||||
store := NewEvidenceStore(db)
|
||||
|
||||
priority := int64(10)
|
||||
ev := types.NewMockGoodEvidence(2, 1, []byte("val1"))
|
||||
|
||||
store.MarkEvidenceAsCommitted(ev)
|
||||
|
||||
added := store.AddNewEvidence(ev, priority)
|
||||
assert.False(added)
|
||||
}
|
||||
|
||||
func TestStoreMark(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
@@ -46,7 +61,7 @@ func TestStoreMark(t *testing.T) {
|
||||
assert.True(added)
|
||||
|
||||
// get the evidence. verify. should be uncommitted
|
||||
ei := store.GetEvidence(ev.Height(), ev.Hash())
|
||||
ei := store.GetEvidenceInfo(ev.Height(), ev.Hash())
|
||||
assert.Equal(ev, ei.Evidence)
|
||||
assert.Equal(priority, ei.Priority)
|
||||
assert.False(ei.Committed)
|
||||
@@ -72,9 +87,10 @@ func TestStoreMark(t *testing.T) {
|
||||
assert.Equal(0, len(pendingEv))
|
||||
|
||||
// evidence should show committed
|
||||
ei = store.GetEvidence(ev.Height(), ev.Hash())
|
||||
newPriority := int64(0)
|
||||
ei = store.GetEvidenceInfo(ev.Height(), ev.Hash())
|
||||
assert.Equal(ev, ei.Evidence)
|
||||
assert.Equal(priority, ei.Priority)
|
||||
assert.Equal(newPriority, ei.Priority)
|
||||
assert.True(ei.Committed)
|
||||
}
|
||||
|
||||
|
@@ -412,6 +412,6 @@ func (bA *BitArray) UnmarshalJSON(bz []byte) error {
|
||||
bA2.SetIndex(i, true)
|
||||
}
|
||||
}
|
||||
*bA = *bA2
|
||||
*bA = *bA2 //nolint:govet
|
||||
return nil
|
||||
}
|
||||
|
@@ -179,6 +179,11 @@ func (mBatch *cLevelDBBatch) WriteSync() {
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
func (mBatch *cLevelDBBatch) Close() {
|
||||
mBatch.batch.Close()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Iterator
|
||||
// NOTE This is almost identical to db/go_level_db.Iterator
|
||||
|
@@ -250,3 +250,8 @@ func (dbch debugBatch) WriteSync() {
|
||||
fmt.Printf("%v.batch.WriteSync()\n", dbch.label)
|
||||
dbch.bch.WriteSync()
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
func (dbch debugBatch) Close() {
|
||||
dbch.bch.Close()
|
||||
}
|
||||
|
@@ -184,6 +184,10 @@ func (mBatch *goLevelDBBatch) WriteSync() {
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
// Close is no-op for goLevelDBBatch.
|
||||
func (mBatch *goLevelDBBatch) Close() {}
|
||||
|
||||
//----------------------------------------
|
||||
// Iterator
|
||||
// NOTE This is almost identical to db/c_level_db.Iterator
|
||||
|
@@ -46,6 +46,10 @@ func (mBatch *memBatch) WriteSync() {
|
||||
mBatch.write(true)
|
||||
}
|
||||
|
||||
func (mBatch *memBatch) Close() {
|
||||
mBatch.ops = nil
|
||||
}
|
||||
|
||||
func (mBatch *memBatch) write(doSync bool) {
|
||||
if mtx := mBatch.db.Mutex(); mtx != nil {
|
||||
mtx.Lock()
|
||||
|
@@ -248,6 +248,10 @@ func (pb prefixBatch) WriteSync() {
|
||||
pb.source.WriteSync()
|
||||
}
|
||||
|
||||
func (pb prefixBatch) Close() {
|
||||
pb.source.Close()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// prefixIterator
|
||||
|
||||
|
@@ -11,7 +11,7 @@ remotedb's RemoteDB implements db.DB so can be used normally
|
||||
like other databases. One just has to explicitly connect to the
|
||||
remote database with a client setup such as:
|
||||
|
||||
client, err := remotedb.NewInsecure(addr)
|
||||
client, err := remotedb.NewRemoteDB(addr, cert)
|
||||
// Make sure to invoke InitRemote!
|
||||
if err := client.InitRemote(&remotedb.Init{Name: "test-remote-db", Type: "leveldb"}); err != nil {
|
||||
log.Fatalf("Failed to initialize the remote db")
|
||||
|
@@ -7,14 +7,6 @@ import (
|
||||
protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto"
|
||||
)
|
||||
|
||||
// Security defines how the client will talk to the gRPC server.
|
||||
type Security uint
|
||||
|
||||
const (
|
||||
Insecure Security = iota
|
||||
Secure
|
||||
)
|
||||
|
||||
// NewClient creates a gRPC client connected to the bound gRPC server at serverAddr.
|
||||
// Use kind to set the level of security to either Secure or Insecure.
|
||||
func NewClient(serverAddr, serverCert string) (protodb.DBClient, error) {
|
||||
|
@@ -180,6 +180,7 @@ func (s *server) BatchWriteSync(c context.Context, b *protodb.Batch) (*protodb.N
|
||||
|
||||
func (s *server) batchWrite(c context.Context, b *protodb.Batch, sync bool) (*protodb.Nothing, error) {
|
||||
bat := s.db.NewBatch()
|
||||
defer bat.Close()
|
||||
for _, op := range b.Ops {
|
||||
switch op.Type {
|
||||
case protodb.Operation_SET:
|
||||
|
@@ -260,3 +260,7 @@ func (bat *batch) WriteSync() {
|
||||
panic(fmt.Sprintf("RemoteDB.BatchWriteSync: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (bat *batch) Close() {
|
||||
bat.ops = nil
|
||||
}
|
||||
|
@@ -14,7 +14,7 @@ import (
|
||||
func TestRemoteDB(t *testing.T) {
|
||||
cert := "test.crt"
|
||||
key := "test.key"
|
||||
ln, err := net.Listen("tcp", "0.0.0.0:0")
|
||||
ln, err := net.Listen("tcp", "localhost:0")
|
||||
require.Nil(t, err, "expecting a port to have been assigned on which we can listen")
|
||||
srv, err := grpcdb.NewServer(cert, key)
|
||||
require.Nil(t, err)
|
||||
|
@@ -1,25 +1,19 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEQTCCAimgAwIBAgIRANqF1HD19i/uvQ3n62TAKTwwDQYJKoZIhvcNAQELBQAw
|
||||
GTEXMBUGA1UEAxMOdGVuZGVybWludC5jb20wHhcNMTgwNzAyMDMwNzMyWhcNMjAw
|
||||
MTAyMDMwNzMwWjANMQswCQYDVQQDEwI6OjCCASIwDQYJKoZIhvcNAQEBBQADggEP
|
||||
ADCCAQoCggEBAOuWUMCSzYJmvKU1vsouDTe7OxnPWO3oV0FjSH8vKYoi2zpZQX35
|
||||
dQDPtLDF2/v/ANZJ5pzMJR8yMMtEQ4tWxKuGzJw1ZgTgHtASPbj/M5fDnDO7Hqg4
|
||||
D09eLTkZAUfiBf6BzDyQIHn22CUexhaS70TbIT9AOAoOsGXMZz9d+iImKIm+gbzf
|
||||
pR52LNbBGesHWGjwIuGF4InstIMsKSwGv2DctzhWI+i/m5Goi3rd1V8z/lzUbsf1
|
||||
0uXqQcSfTyv3ee6YiCWj2W8vcdc5H+B6KzSlGjAR4sRcHTHOQJYO9BgA9evQ3qsJ
|
||||
Pp00iez13RdheJWPtbfUqQy4gdpu8HFeZx8CAwEAAaOBjzCBjDAOBgNVHQ8BAf8E
|
||||
BAMCA7gwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBRc
|
||||
XBo+bJILrLcJiGkTWeMPpXb1TDAfBgNVHSMEGDAWgBQqk1Xu65Ww7EBCROw4KLGw
|
||||
KuToaDAbBgNVHREEFDAShxAAAAAAAAAAAAAAAAAAAAAAMA0GCSqGSIb3DQEBCwUA
|
||||
A4ICAQAbGsIMhL8clczNmhGl9xZhmyNz6FbLq6g163x9LTgfvwHPt+7urthtd++O
|
||||
uy4Ut8zFurh/yk7eooPlzf8jO7QUJBAFVy4vj8IcsvpWbFa7cuEOIulbjIzyAm/v
|
||||
lgy7vUQ6xrWn8x8O9K1ww9z7wugwCyl22BD0wSHZKclJz++AwpL6vUVOD76IIuJO
|
||||
+S6bE6z26/0ndpundh2AkA++2eIleD6ygnTeTl0PWu6aGoCggBmos50f8KgYHZF/
|
||||
OZVef203kDls9xCaOiMzaU91VsgLqq/gNcT+2cBd5r3IZTY3C8Rve6EEHS+/4zxf
|
||||
PKlmiLN7lU9GFZogKecYzY+zPT7OArY7OVFnGTo4qdhdmxnXzHsI+anMCjxLOgEJ
|
||||
381hyplQGPQOouEupCBxFcwa7oMYoGu20+1nLWYEqFcIXCeyH+s77MyteJSsseqL
|
||||
xivG5PT+jKJn9hrnFb39bBmht9Vsa+Th6vk953zi5wCSe1j2wXsxFaENDq6BQZOK
|
||||
f86Kp86M2elYnv3lJ3j2DE2ZTMpw+PA5ThYUnB+HVqYeeB2Y3ErRS8P1FOp1LBE8
|
||||
+eTz7yXQO5OM2wdYhNNL1zDri/41fHXi9b6337PZVqc39GM+N74x/O4Q7xEBiWgQ
|
||||
T0dT8SNwf55kv63MeZh63ImxFV0FNRkCteYLcJMle3ohIY4zyQ==
|
||||
MIIDAjCCAeqgAwIBAgIJAOGCVedOwRbOMA0GCSqGSIb3DQEBBQUAMCExCzAJBgNV
|
||||
BAYTAlVTMRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcNMTkwMjExMTU0NjQ5WhcNMjAw
|
||||
MjExMTU0NjQ5WjAhMQswCQYDVQQGEwJVUzESMBAGA1UEAwwJbG9jYWxob3N0MIIB
|
||||
IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA60S/fNUWoHm1PYI/yrlnZNtr
|
||||
dRqDORHe0hPwl/lttLz7+a7HzQZFnpiXnuxbDJtpIq/h1vhAl0sFy86Ip26LhbWc
|
||||
GjxJL24tVwiOwqYRzTPZ/rK3JYuNcIvcztXjMqdzPrHSZy5YZgrQB6yhTiqpBc4D
|
||||
h/XgWjEt4DhpHwf/zuIK9XkJw0IaTWjFmoyKRoWW3q4bHzoKNxS9bXP117Tz7tn0
|
||||
AdsQCjt1GKcIROkcOGUHqByINJ2XlBkb7SQPjQVBLDVJKdRDUt+yHkkdbn97UDhq
|
||||
HRTCt5UELWs/53Gj1ffNuhjECOVjG1HkZweLgZjJRQYe8X2OOLNOyfVY1KsDnQID
|
||||
AQABoz0wOzAMBgNVHRMEBTADAQH/MCsGA1UdEQQkMCKCCWxvY2FsaG9zdIIJbG9j
|
||||
YWxob3N0hwQAAAAAhwR/AAABMA0GCSqGSIb3DQEBBQUAA4IBAQCe2A5gDc3jiZwT
|
||||
a5TJrc2J2KouqxB/PCddw5VY8jPsZJfsr9gxHi+Xa5g8p3oqmEOIlqM5BVhrZRUG
|
||||
RWHDmL+bCsuzMoA/vGHtHmUIwLeZQLWgT3kv12Dc8M9flNNjmXWxdMR9lOMwcL83
|
||||
F0CdElxSmaEbNvCIJBDetJJ7vMCqS2lnTLWurbH4ZGeGwvjzNgpgGCKwbyK/gU+j
|
||||
UXiTQbVvPQ3WWACDnfH6rg0TpxU9jOBkd+4/9tUrBG7UclQBfGULk3sObLO9kx4N
|
||||
8RxJmtp8jljIXVPX3udExI05pz039pAgvaeZWtP17QSbYcKF1jFtKo6ckrv2GKXX
|
||||
M5OXGXdw
|
||||
-----END CERTIFICATE-----
|
||||
|
@@ -1,27 +1,27 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpgIBAAKCAQEA65ZQwJLNgma8pTW+yi4NN7s7Gc9Y7ehXQWNIfy8piiLbOllB
|
||||
ffl1AM+0sMXb+/8A1knmnMwlHzIwy0RDi1bEq4bMnDVmBOAe0BI9uP8zl8OcM7se
|
||||
qDgPT14tORkBR+IF/oHMPJAgefbYJR7GFpLvRNshP0A4Cg6wZcxnP136IiYoib6B
|
||||
vN+lHnYs1sEZ6wdYaPAi4YXgiey0gywpLAa/YNy3OFYj6L+bkaiLet3VXzP+XNRu
|
||||
x/XS5epBxJ9PK/d57piIJaPZby9x1zkf4HorNKUaMBHixFwdMc5Alg70GAD169De
|
||||
qwk+nTSJ7PXdF2F4lY+1t9SpDLiB2m7wcV5nHwIDAQABAoIBAQCB2/ilPgaUE8d2
|
||||
ldqWHa5hgw4/2uCdO04ll/GVUczm/PG1BxAnvYL2MIfcTSRGkrjGZjP9SDZKLONi
|
||||
mD1XKDv+hK5yiKi0lUnGzddCC0JILKYEieeLOGOQD0yERblEA13kfW20EIomUJ+y
|
||||
TnVIajQD03pPIDoDqTco1fQvpMDFYw5Q//UhH7VBC261GO1akvhT2Gqdb4aKLaYQ
|
||||
iDW9IEButL5cRKIJuRxToB/JbmPVEF7xIZtm0sf9dtYVOlBQLeID0uHXgaci0enc
|
||||
de6GMajmj7NFqc36ypb+Ct18fqEwQBYD+TSQdKs7/lMsAXwRjd5HW4RbYiMZyYnf
|
||||
Dxgh7QVBAoGBAP9aLLIUcIG7+gk1x7xd+8wRhfo+dhsungeCluSigI9AsfDr6dpR
|
||||
G9/0lEJH56noZZKQueACTmj7shmRB40xFFLc8w0IDRZCnofsl+Z15k9K84uFPA3W
|
||||
hdZH9nMieU/mRKdcUYK7pHGqbicHTaJQ5ydZ+xb2E+zYQHOzYpQacHv/AoGBAOwv
|
||||
TjDZSiassnAPYmmfcHtkUF4gf7PTpiZfH0hXHGAb0mJX4cXAoktAeDeHSi2tz3LW
|
||||
dAc0ReP8Pdf3uSNv7wkJ1KpNRxAhU5bhnDFmjRc7gMZknVOU+az2M+4yGOn/SOiJ
|
||||
I6uMHgQDS/VsI+N583n6gbGxVHbQfr9TOc4bLpThAoGBAKin0JmWMnEdzRnEMbZS
|
||||
hPrWIB2Wn794XNws/qjoQ+1aF60+xGhz5etXyYy1nWd1nZDekkZIf62LgKiuR8ST
|
||||
xA6u7MGQrcQkID06oWGQQZvhr1ZZm76wEBnl0ftdq66AMpwvt46XjReeL78LbdVl
|
||||
hidRoSwbQDHQ61EADH4xsFXVAoGBAISXqhXSZsZ/fU1b1avmTod3MYcmR4r07vnr
|
||||
vOwnu05ZUCrVm3IhSvtkHhlOYl5yjVuy+UByICp1mWJ9N/qlBFTWqAVTjOmJTBwQ
|
||||
XFd/cwXv6cN3CLu7js+DCHRYu5PiNVQWaWgNKWynTSViqGM0O3PnJphTLU/mjMFs
|
||||
P69toyEBAoGBALh9YsqxHdYdS5WK9chzDfGlaTQ79jwN+gEzQuP1ooLF0JkMgh5W
|
||||
//2C6kCrgBsGTm1gfHAjEfC04ZDZLFbKLm56YVKUGL6JJNapm6e5kfiZGjbRKWAg
|
||||
ViCeRS2qQnVbH74GfHyimeTPDI9cJMiJfDDTPbfosqWSsPEcg2jfsySJ
|
||||
MIIEogIBAAKCAQEA60S/fNUWoHm1PYI/yrlnZNtrdRqDORHe0hPwl/lttLz7+a7H
|
||||
zQZFnpiXnuxbDJtpIq/h1vhAl0sFy86Ip26LhbWcGjxJL24tVwiOwqYRzTPZ/rK3
|
||||
JYuNcIvcztXjMqdzPrHSZy5YZgrQB6yhTiqpBc4Dh/XgWjEt4DhpHwf/zuIK9XkJ
|
||||
w0IaTWjFmoyKRoWW3q4bHzoKNxS9bXP117Tz7tn0AdsQCjt1GKcIROkcOGUHqByI
|
||||
NJ2XlBkb7SQPjQVBLDVJKdRDUt+yHkkdbn97UDhqHRTCt5UELWs/53Gj1ffNuhjE
|
||||
COVjG1HkZweLgZjJRQYe8X2OOLNOyfVY1KsDnQIDAQABAoIBAAb5n8+8pZIWaags
|
||||
L2X8PzN/Sd1L7u4HOJrz2mM3EuiT3ciWRPgwImpETeJ5UW27Qc+0dTahX5DcuYxE
|
||||
UErefSZ2ru0cMnNEifWVnF3q/IYf7mudss5bJ9NZYi+Dqdu7mTAXp4xFlHtaALbp
|
||||
iFK/8wjoBbTHNmKWKK0IHx27Z/sjK+7QnoKij+rRzvhmNyN2r3dT7EO4VePriesr
|
||||
zyVaGexNPFhtd1HLJLQ5GqRAidtLM4x1ubvp3NLTCvvoQKKYFOg7WqKycZ2VllOg
|
||||
ApcpZb/kB/sNTacLvum5HgMNWuWwgREISuQJR+esz/5WaSTQ04L2+vMVomGM18X+
|
||||
9n4KYwECgYEA/Usajzl3tWv1IIairSk9Md7Z2sbaPVBNKv4IDJy3mLwt+2VN2mqo
|
||||
fpeV5rBaFNWzJR0M0JwLbdlsvSfXgVFkUePg1UiJyFqOKmMO8Bd/nxV9NAewVg1D
|
||||
KXQLsfrojBfka7HtFmfk/GA2swEMCGzUcY23bwah1JUTLhvbl19GNMECgYEA7chW
|
||||
Ip/IvYBiaaD/qgklwJE8QoAVzi9zqlI1MOJJNf1r/BTeZ2R8oXlRk8PVxFglliuA
|
||||
vMgwCkfuqxA8irIdHReLzqcLddPtaHo6R8zKP2cpYBo61C3CPzEAucasaOXQFpjs
|
||||
DPnp4QFeboNPgiEGLVGHFvD5TwZpideBpWTwud0CgYEAy04MDGfJEQKNJ0VJr4mJ
|
||||
R80iubqgk1QwDFEILu9fYiWxFrbSTX0Mr0eGlzp3o39/okt17L9DYTGCWTVwgajN
|
||||
x/kLjsYBaaJdt+H4rHeABTWfYDLHs9pDTTOK65mELGZE/rg6n6BWqMelP/qYKO8J
|
||||
efeRA3mkTVg2o+zSTea4GEECgYEA3DB4EvgD2/fXKhl8puhxnTDgrHQPvS8T3NTj
|
||||
jLD/Oo/CP1zT1sqm3qCJelwOyBMYO0dtn2OBmQOjb6VJauYlL5tuS59EbYgigG0v
|
||||
Ku3pG21cUzH26CS3i+zEz0O6xCiL2WEitaF3gnTSDWRrbAVIww6MGiJru1IkyRBX
|
||||
beFbScECf1n00W9qrXnqsWefk73ucggfV0gQQmDnauMA9J7B96+MvGprE54Tx9vl
|
||||
SBodgvJsCod9Y9Q7QsMcXb4CuEgTgWKDBp5cA/KUOQmK5buOrysosLnnm12LaHiF
|
||||
O7IIh8Cmb9TbdldgW+8ndZ4EQ3lfIS0zN3/7rWD34bs19JDYkRY=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
|
@@ -57,10 +57,12 @@ type DB interface {
|
||||
//----------------------------------------
|
||||
// Batch
|
||||
|
||||
// Batch Close must be called when the program no longer needs the object.
|
||||
type Batch interface {
|
||||
SetDeleter
|
||||
Write()
|
||||
WriteSync()
|
||||
Close()
|
||||
}
|
||||
|
||||
type SetDeleter interface {
|
||||
|
@@ -19,8 +19,7 @@ func TestMain(m *testing.M) {
|
||||
|
||||
code := m.Run()
|
||||
|
||||
node.Stop()
|
||||
node.Wait()
|
||||
rpctest.StopTendermint(node)
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
@@ -28,6 +27,7 @@ func TestProvider(t *testing.T) {
|
||||
assert, require := assert.New(t), require.New(t)
|
||||
|
||||
cfg := rpctest.GetConfig()
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
rpcAddr := cfg.RPC.ListenAddress
|
||||
genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile())
|
||||
if err != nil {
|
||||
|
@@ -54,6 +54,7 @@ func (dbp *DBProvider) SaveFullCommit(fc FullCommit) error {
|
||||
|
||||
dbp.logger.Info("DBProvider.SaveFullCommit()...", "fc", fc)
|
||||
batch := dbp.db.NewBatch()
|
||||
defer batch.Close()
|
||||
|
||||
// Save the fc.validators.
|
||||
// We might be overwriting what we already have, but
|
||||
|
@@ -80,12 +80,8 @@ func (pkz privKeys) signHeader(header *types.Header, first, last int) *types.Com
|
||||
vote := makeVote(header, vset, pkz[i])
|
||||
commitSigs[vote.ValidatorIndex] = vote.CommitSig()
|
||||
}
|
||||
|
||||
res := &types.Commit{
|
||||
BlockID: types.BlockID{Hash: header.Hash()},
|
||||
Precommits: commitSigs,
|
||||
}
|
||||
return res
|
||||
blockID := types.BlockID{Hash: header.Hash()}
|
||||
return types.NewCommit(blockID, commitSigs)
|
||||
}
|
||||
|
||||
func makeVote(header *types.Header, valset *types.ValidatorSet, key crypto.PrivKey) *types.Vote {
|
||||
|
@@ -32,8 +32,7 @@ func TestMain(m *testing.M) {
|
||||
|
||||
code := m.Run()
|
||||
|
||||
node.Stop()
|
||||
node.Wait()
|
||||
rpctest.StopTendermint(node)
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
|
@@ -70,7 +70,7 @@ func TestValidateBlock(t *testing.T) {
|
||||
},
|
||||
signedHeader: types.SignedHeader{
|
||||
Header: &types.Header{Height: 11},
|
||||
Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("0xDEADBEEF")}},
|
||||
Commit: types.NewCommit(types.BlockID{Hash: []byte("0xDEADBEEF")}, nil),
|
||||
},
|
||||
wantErr: "Data hash doesn't match header",
|
||||
},
|
||||
@@ -81,7 +81,7 @@ func TestValidateBlock(t *testing.T) {
|
||||
},
|
||||
signedHeader: types.SignedHeader{
|
||||
Header: &types.Header{Height: 11},
|
||||
Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}},
|
||||
Commit: types.NewCommit(types.BlockID{Hash: []byte("DEADBEEF")}, nil),
|
||||
},
|
||||
},
|
||||
// End Header.Data hash mismatch test
|
||||
@@ -169,7 +169,7 @@ func TestValidateBlockMeta(t *testing.T) {
|
||||
ValidatorsHash: []byte("Tendermint"),
|
||||
Time: testTime2,
|
||||
},
|
||||
Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}},
|
||||
Commit: types.NewCommit(types.BlockID{Hash: []byte("DEADBEEF")}, nil),
|
||||
},
|
||||
wantErr: "Headers don't match",
|
||||
},
|
||||
@@ -188,7 +188,7 @@ func TestValidateBlockMeta(t *testing.T) {
|
||||
ValidatorsHash: []byte("Tendermint-x"),
|
||||
Time: testTime2,
|
||||
},
|
||||
Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}},
|
||||
Commit: types.NewCommit(types.BlockID{Hash: []byte("DEADBEEF")}, nil),
|
||||
},
|
||||
wantErr: "Headers don't match",
|
||||
},
|
||||
|
@@ -11,7 +11,8 @@ import (
|
||||
func BenchmarkReap(b *testing.B) {
|
||||
app := kvstore.NewKVStoreApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool := newMempoolWithApp(cc)
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
|
||||
size := 10000
|
||||
for i := 0; i < size; i++ {
|
||||
|
@@ -1,8 +1,8 @@
|
||||
package mempool
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -25,7 +25,11 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func newMempoolWithApp(cc proxy.ClientCreator) *Mempool {
|
||||
// A cleanupFunc cleans up any config / test files created for a particular
|
||||
// test.
|
||||
type cleanupFunc func()
|
||||
|
||||
func newMempoolWithApp(cc proxy.ClientCreator) (*Mempool, cleanupFunc) {
|
||||
config := cfg.ResetTestRoot("mempool_test")
|
||||
|
||||
appConnMem, _ := cc.NewABCIClient()
|
||||
@@ -36,7 +40,7 @@ func newMempoolWithApp(cc proxy.ClientCreator) *Mempool {
|
||||
}
|
||||
mempool := NewMempool(config.Mempool, appConnMem, 0)
|
||||
mempool.SetLogger(log.TestingLogger())
|
||||
return mempool
|
||||
return mempool, func() { os.RemoveAll(config.RootDir) }
|
||||
}
|
||||
|
||||
func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) {
|
||||
@@ -82,7 +86,8 @@ func checkTxs(t *testing.T, mempool *Mempool, count int) types.Txs {
|
||||
func TestReapMaxBytesMaxGas(t *testing.T) {
|
||||
app := kvstore.NewKVStoreApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool := newMempoolWithApp(cc)
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
|
||||
// Ensure gas calculation behaves as expected
|
||||
checkTxs(t, mempool, 1)
|
||||
@@ -130,7 +135,8 @@ func TestReapMaxBytesMaxGas(t *testing.T) {
|
||||
func TestMempoolFilters(t *testing.T) {
|
||||
app := kvstore.NewKVStoreApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool := newMempoolWithApp(cc)
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
emptyTxArr := []types.Tx{[]byte{}}
|
||||
|
||||
nopPreFilter := func(tx types.Tx) error { return nil }
|
||||
@@ -168,7 +174,8 @@ func TestMempoolFilters(t *testing.T) {
|
||||
func TestMempoolUpdateAddsTxsToCache(t *testing.T) {
|
||||
app := kvstore.NewKVStoreApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool := newMempoolWithApp(cc)
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
mempool.Update(1, []types.Tx{[]byte{0x01}}, nil, nil)
|
||||
err := mempool.CheckTx([]byte{0x01}, nil)
|
||||
if assert.Error(t, err) {
|
||||
@@ -179,7 +186,8 @@ func TestMempoolUpdateAddsTxsToCache(t *testing.T) {
|
||||
func TestTxsAvailable(t *testing.T) {
|
||||
app := kvstore.NewKVStoreApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool := newMempoolWithApp(cc)
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
mempool.EnableTxsAvailable()
|
||||
|
||||
timeoutMS := 500
|
||||
@@ -224,7 +232,9 @@ func TestSerialReap(t *testing.T) {
|
||||
app.SetOption(abci.RequestSetOption{Key: "serial", Value: "on"})
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
|
||||
mempool := newMempoolWithApp(cc)
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
|
||||
appConnCon, _ := cc.NewABCIClient()
|
||||
appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
|
||||
err := appConnCon.Start()
|
||||
@@ -364,6 +374,7 @@ func TestMempoolCloseWAL(t *testing.T) {
|
||||
// 3. Create the mempool
|
||||
wcfg := cfg.DefaultMempoolConfig()
|
||||
wcfg.RootDir = rootDir
|
||||
defer os.RemoveAll(wcfg.RootDir)
|
||||
app := kvstore.NewKVStoreApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
appConnMem, _ := cc.NewABCIClient()
|
||||
@@ -406,7 +417,8 @@ func txMessageSize(tx types.Tx) int {
|
||||
func TestMempoolMaxMsgSize(t *testing.T) {
|
||||
app := kvstore.NewKVStoreApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempl := newMempoolWithApp(cc)
|
||||
mempl, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
|
||||
testCases := []struct {
|
||||
len int
|
||||
@@ -451,7 +463,7 @@ func TestMempoolMaxMsgSize(t *testing.T) {
|
||||
}
|
||||
|
||||
func checksumIt(data []byte) string {
|
||||
h := md5.New()
|
||||
h := sha256.New()
|
||||
h.Write(data)
|
||||
return fmt.Sprintf("%x", h.Sum(nil))
|
||||
}
|
||||
|
@@ -49,7 +49,8 @@ func makeAndConnectMempoolReactors(config *cfg.Config, N int) []*MempoolReactor
|
||||
for i := 0; i < N; i++ {
|
||||
app := kvstore.NewKVStoreApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool := newMempoolWithApp(cc)
|
||||
mempool, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
|
||||
reactors[i] = NewMempoolReactor(config.Mempool, mempool) // so we dont start the consensus states
|
||||
reactors[i].SetLogger(logger.With("validator", i))
|
||||
|
10
node/node.go
10
node/node.go
@@ -345,8 +345,7 @@ func NewNode(config *cfg.Config,
|
||||
return nil, err
|
||||
}
|
||||
evidenceLogger := logger.With("module", "evidence")
|
||||
evidenceStore := evidence.NewEvidenceStore(evidenceDB)
|
||||
evidencePool := evidence.NewEvidencePool(stateDB, evidenceStore)
|
||||
evidencePool := evidence.NewEvidencePool(stateDB, evidenceDB)
|
||||
evidencePool.SetLogger(evidenceLogger)
|
||||
evidenceReactor := evidence.NewEvidenceReactor(evidencePool)
|
||||
evidenceReactor.SetLogger(evidenceLogger)
|
||||
@@ -489,7 +488,7 @@ func NewNode(config *cfg.Config,
|
||||
addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
|
||||
|
||||
// Add ourselves to addrbook to prevent dialing ourselves
|
||||
addrBook.AddOurAddress(nodeInfo.NetAddress())
|
||||
addrBook.AddOurAddress(sw.NetAddress())
|
||||
|
||||
addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
|
||||
if config.P2P.PexReactor {
|
||||
@@ -794,6 +793,11 @@ func (n *Node) ProxyApp() proxy.AppConns {
|
||||
return n.proxyApp
|
||||
}
|
||||
|
||||
// Config returns the Node's config.
|
||||
func (n *Node) Config() *cfg.Config {
|
||||
return n.config
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
func (n *Node) Listeners() []string {
|
||||
|
@@ -31,6 +31,7 @@ import (
|
||||
|
||||
func TestNodeStartStop(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("node_node_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
|
||||
// create & start node
|
||||
n, err := DefaultNewNode(config, log.TestingLogger())
|
||||
@@ -90,6 +91,7 @@ func TestSplitAndTrimEmpty(t *testing.T) {
|
||||
|
||||
func TestNodeDelayedStart(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("node_delayed_start_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
now := tmtime.Now()
|
||||
|
||||
// create & start node
|
||||
@@ -104,6 +106,7 @@ func TestNodeDelayedStart(t *testing.T) {
|
||||
|
||||
func TestNodeSetAppVersion(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("node_app_version_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
|
||||
// create & start node
|
||||
n, err := DefaultNewNode(config, log.TestingLogger())
|
||||
@@ -124,6 +127,7 @@ func TestNodeSetPrivValTCP(t *testing.T) {
|
||||
addr := "tcp://" + testFreeAddr(t)
|
||||
|
||||
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
config.BaseConfig.PrivValidatorListenAddr = addr
|
||||
|
||||
dialer := privval.DialTCPFn(addr, 100*time.Millisecond, ed25519.GenPrivKey())
|
||||
@@ -153,6 +157,7 @@ func TestPrivValidatorListenAddrNoProtocol(t *testing.T) {
|
||||
addrNoPrefix := testFreeAddr(t)
|
||||
|
||||
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
config.BaseConfig.PrivValidatorListenAddr = addrNoPrefix
|
||||
|
||||
_, err := DefaultNewNode(config, log.TestingLogger())
|
||||
@@ -164,6 +169,7 @@ func TestNodeSetPrivValIPC(t *testing.T) {
|
||||
defer os.Remove(tmpfile) // clean up
|
||||
|
||||
config := cfg.ResetTestRoot("node_priv_val_tcp_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
config.BaseConfig.PrivValidatorListenAddr = "unix://" + tmpfile
|
||||
|
||||
dialer := privval.DialUnixFn(tmpfile)
|
||||
@@ -200,6 +206,7 @@ func testFreeAddr(t *testing.T) string {
|
||||
// mempool and evidence pool and validate it.
|
||||
func TestCreateProposalBlock(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("node_create_proposal")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
cc := proxy.NewLocalClientCreator(kvstore.NewKVStoreApplication())
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
@@ -227,11 +234,10 @@ func TestCreateProposalBlock(t *testing.T) {
|
||||
mempool.SetLogger(logger)
|
||||
|
||||
// Make EvidencePool
|
||||
types.RegisterMockEvidencesGlobal()
|
||||
types.RegisterMockEvidencesGlobal() // XXX!
|
||||
evidence.RegisterMockEvidences()
|
||||
evidenceDB := dbm.NewMemDB()
|
||||
evidenceStore := evidence.NewEvidenceStore(evidenceDB)
|
||||
evidencePool := evidence.NewEvidencePool(stateDB, evidenceStore)
|
||||
evidencePool := evidence.NewEvidencePool(stateDB, evidenceDB)
|
||||
evidencePool.SetLogger(logger)
|
||||
|
||||
// fill the evidence pool with more evidence
|
||||
@@ -261,7 +267,7 @@ func TestCreateProposalBlock(t *testing.T) {
|
||||
evidencePool,
|
||||
)
|
||||
|
||||
commit := &types.Commit{}
|
||||
commit := types.NewCommit(types.BlockID{}, nil)
|
||||
block, _ := blockExec.CreateProposalBlock(
|
||||
height,
|
||||
state, commit,
|
||||
@@ -270,7 +276,6 @@ func TestCreateProposalBlock(t *testing.T) {
|
||||
|
||||
err = blockExec.ValidateBlock(state, block)
|
||||
assert.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
func state(nVals int, height int64) (sm.State, dbm.DB) {
|
||||
|
@@ -238,6 +238,10 @@ func TestSecretConnectionReadWrite(t *testing.T) {
|
||||
for {
|
||||
n, err := nodeSecretConn.Read(readBuffer)
|
||||
if err == io.EOF {
|
||||
if err := nodeConn.PipeReader.Close(); err != nil {
|
||||
t.Error(err)
|
||||
return nil, err, true
|
||||
}
|
||||
return nil, nil, false
|
||||
} else if err != nil {
|
||||
t.Errorf("Failed to read from nodeSecretConn: %v", err)
|
||||
@@ -245,11 +249,6 @@ func TestSecretConnectionReadWrite(t *testing.T) {
|
||||
}
|
||||
*nodeReads = append(*nodeReads, string(readBuffer[:n]))
|
||||
}
|
||||
if err := nodeConn.PipeReader.Close(); err != nil {
|
||||
t.Error(err)
|
||||
return nil, err, true
|
||||
}
|
||||
return nil, nil, false
|
||||
},
|
||||
)
|
||||
assert.True(t, ok, "Unexpected task abortion")
|
||||
|
@@ -1,100 +0,0 @@
|
||||
package dummy
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
p2p "github.com/tendermint/tendermint/p2p"
|
||||
tmconn "github.com/tendermint/tendermint/p2p/conn"
|
||||
)
|
||||
|
||||
type peer struct {
|
||||
cmn.BaseService
|
||||
kv map[string]interface{}
|
||||
}
|
||||
|
||||
var _ p2p.Peer = (*peer)(nil)
|
||||
|
||||
// NewPeer creates new dummy peer.
|
||||
func NewPeer() *peer {
|
||||
p := &peer{
|
||||
kv: make(map[string]interface{}),
|
||||
}
|
||||
p.BaseService = *cmn.NewBaseService(nil, "peer", p)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// FlushStop just calls Stop.
|
||||
func (p *peer) FlushStop() {
|
||||
p.Stop()
|
||||
}
|
||||
|
||||
// ID always returns dummy.
|
||||
func (p *peer) ID() p2p.ID {
|
||||
return p2p.ID("dummy")
|
||||
}
|
||||
|
||||
// IsOutbound always returns false.
|
||||
func (p *peer) IsOutbound() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPersistent always returns false.
|
||||
func (p *peer) IsPersistent() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// NodeInfo always returns empty node info.
|
||||
func (p *peer) NodeInfo() p2p.NodeInfo {
|
||||
return p2p.DefaultNodeInfo{}
|
||||
}
|
||||
|
||||
// RemoteIP always returns localhost.
|
||||
func (p *peer) RemoteIP() net.IP {
|
||||
return net.ParseIP("127.0.0.1")
|
||||
}
|
||||
|
||||
// Addr always returns tcp://localhost:8800.
|
||||
func (p *peer) RemoteAddr() net.Addr {
|
||||
return &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8800}
|
||||
}
|
||||
|
||||
// CloseConn always returns nil.
|
||||
func (p *peer) CloseConn() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status always returns empry connection status.
|
||||
func (p *peer) Status() tmconn.ConnectionStatus {
|
||||
return tmconn.ConnectionStatus{}
|
||||
}
|
||||
|
||||
// Send does not do anything and just returns true.
|
||||
func (p *peer) Send(byte, []byte) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// TrySend does not do anything and just returns true.
|
||||
func (p *peer) TrySend(byte, []byte) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Set records value under key specified in the map.
|
||||
func (p *peer) Set(key string, value interface{}) {
|
||||
p.kv[key] = value
|
||||
}
|
||||
|
||||
// Get returns a value associated with the key. Nil is returned if no value
|
||||
// found.
|
||||
func (p *peer) Get(key string) interface{} {
|
||||
if value, ok := p.kv[key]; ok {
|
||||
return value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OriginalAddr always returns nil.
|
||||
func (p *peer) OriginalAddr() *p2p.NetAddress {
|
||||
return nil
|
||||
}
|
68
p2p/mock/peer.go
Normal file
68
p2p/mock/peer.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package mock
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/p2p/conn"
|
||||
)
|
||||
|
||||
type Peer struct {
|
||||
*cmn.BaseService
|
||||
ip net.IP
|
||||
id p2p.ID
|
||||
addr *p2p.NetAddress
|
||||
kv map[string]interface{}
|
||||
Outbound, Persistent bool
|
||||
}
|
||||
|
||||
// NewPeer creates and starts a new mock peer. If the ip
|
||||
// is nil, random routable address is used.
|
||||
func NewPeer(ip net.IP) *Peer {
|
||||
var netAddr *p2p.NetAddress
|
||||
if ip == nil {
|
||||
_, netAddr = p2p.CreateRoutableAddr()
|
||||
} else {
|
||||
netAddr = p2p.NewNetAddressIPPort(ip, 26656)
|
||||
}
|
||||
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
|
||||
netAddr.ID = nodeKey.ID()
|
||||
mp := &Peer{
|
||||
ip: ip,
|
||||
id: nodeKey.ID(),
|
||||
addr: netAddr,
|
||||
kv: make(map[string]interface{}),
|
||||
}
|
||||
mp.BaseService = cmn.NewBaseService(nil, "MockPeer", mp)
|
||||
mp.Start()
|
||||
return mp
|
||||
}
|
||||
|
||||
func (mp *Peer) FlushStop() { mp.Stop() }
|
||||
func (mp *Peer) TrySend(chID byte, msgBytes []byte) bool { return true }
|
||||
func (mp *Peer) Send(chID byte, msgBytes []byte) bool { return true }
|
||||
func (mp *Peer) NodeInfo() p2p.NodeInfo {
|
||||
return p2p.DefaultNodeInfo{
|
||||
ID_: mp.addr.ID,
|
||||
ListenAddr: mp.addr.DialString(),
|
||||
}
|
||||
}
|
||||
func (mp *Peer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} }
|
||||
func (mp *Peer) ID() p2p.ID { return mp.id }
|
||||
func (mp *Peer) IsOutbound() bool { return mp.Outbound }
|
||||
func (mp *Peer) IsPersistent() bool { return mp.Persistent }
|
||||
func (mp *Peer) Get(key string) interface{} {
|
||||
if value, ok := mp.kv[key]; ok {
|
||||
return value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (mp *Peer) Set(key string, value interface{}) {
|
||||
mp.kv[key] = value
|
||||
}
|
||||
func (mp *Peer) RemoteIP() net.IP { return mp.ip }
|
||||
func (mp *Peer) SocketAddr() *p2p.NetAddress { return mp.addr }
|
||||
func (mp *Peer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} }
|
||||
func (mp *Peer) CloseConn() error { return nil }
|
@@ -23,14 +23,8 @@ func MaxNodeInfoSize() int {
|
||||
// NodeInfo exposes basic info of a node
|
||||
// and determines if we're compatible.
|
||||
type NodeInfo interface {
|
||||
nodeInfoAddress
|
||||
nodeInfoTransport
|
||||
}
|
||||
|
||||
// nodeInfoAddress exposes just the core info of a node.
|
||||
type nodeInfoAddress interface {
|
||||
ID() ID
|
||||
NetAddress() *NetAddress
|
||||
nodeInfoTransport
|
||||
}
|
||||
|
||||
// nodeInfoTransport validates a nodeInfo and checks
|
||||
@@ -221,7 +215,7 @@ func (info DefaultNodeInfo) NetAddress() *NetAddress {
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case ErrNetAddressLookup:
|
||||
// XXX If the peer provided a host name and the lookup fails here
|
||||
// XXX If the peer provided a host name and the lookup fails here
|
||||
// we're out of luck.
|
||||
// TODO: use a NetAddress in DefaultNodeInfo
|
||||
default:
|
||||
|
27
p2p/peer.go
27
p2p/peer.go
@@ -29,7 +29,7 @@ type Peer interface {
|
||||
|
||||
NodeInfo() NodeInfo // peer's info
|
||||
Status() tmconn.ConnectionStatus
|
||||
OriginalAddr() *NetAddress // original address for outbound peers
|
||||
SocketAddr() *NetAddress // actual address of the socket
|
||||
|
||||
Send(byte, []byte) bool
|
||||
TrySend(byte, []byte) bool
|
||||
@@ -46,7 +46,7 @@ type peerConn struct {
|
||||
persistent bool
|
||||
conn net.Conn // source connection
|
||||
|
||||
originalAddr *NetAddress // nil for inbound connections
|
||||
socketAddr *NetAddress
|
||||
|
||||
// cached RemoteIP()
|
||||
ip net.IP
|
||||
@@ -55,14 +55,14 @@ type peerConn struct {
|
||||
func newPeerConn(
|
||||
outbound, persistent bool,
|
||||
conn net.Conn,
|
||||
originalAddr *NetAddress,
|
||||
socketAddr *NetAddress,
|
||||
) peerConn {
|
||||
|
||||
return peerConn{
|
||||
outbound: outbound,
|
||||
persistent: persistent,
|
||||
conn: conn,
|
||||
originalAddr: originalAddr,
|
||||
outbound: outbound,
|
||||
persistent: persistent,
|
||||
conn: conn,
|
||||
socketAddr: socketAddr,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -223,13 +223,12 @@ func (p *peer) NodeInfo() NodeInfo {
|
||||
return p.nodeInfo
|
||||
}
|
||||
|
||||
// OriginalAddr returns the original address, which was used to connect with
|
||||
// the peer. Returns nil for inbound peers.
|
||||
func (p *peer) OriginalAddr() *NetAddress {
|
||||
if p.peerConn.outbound {
|
||||
return p.peerConn.originalAddr
|
||||
}
|
||||
return nil
|
||||
// SocketAddr returns the address of the socket.
|
||||
// For outbound peers, it's the address dialed (after DNS resolution).
|
||||
// For inbound peers, it's the address returned by the underlying connection
|
||||
// (not what's reported in the peer's NodeInfo).
|
||||
func (p *peer) SocketAddr() *NetAddress {
|
||||
return p.peerConn.socketAddr
|
||||
}
|
||||
|
||||
// Status returns the peer's ConnectionStatus.
|
||||
|
@@ -29,7 +29,7 @@ func (mp *mockPeer) IsPersistent() bool { return true }
|
||||
func (mp *mockPeer) Get(s string) interface{} { return s }
|
||||
func (mp *mockPeer) Set(string, interface{}) {}
|
||||
func (mp *mockPeer) RemoteIP() net.IP { return mp.ip }
|
||||
func (mp *mockPeer) OriginalAddr() *NetAddress { return nil }
|
||||
func (mp *mockPeer) SocketAddr() *NetAddress { return nil }
|
||||
func (mp *mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} }
|
||||
func (mp *mockPeer) CloseConn() error { return nil }
|
||||
|
||||
|
@@ -109,25 +109,27 @@ func testOutboundPeerConn(
|
||||
persistent bool,
|
||||
ourNodePrivKey crypto.PrivKey,
|
||||
) (peerConn, error) {
|
||||
|
||||
var pc peerConn
|
||||
conn, err := testDial(addr, config)
|
||||
if err != nil {
|
||||
return peerConn{}, cmn.ErrorWrap(err, "Error creating peer")
|
||||
return pc, cmn.ErrorWrap(err, "Error creating peer")
|
||||
}
|
||||
|
||||
pc, err := testPeerConn(conn, config, true, persistent, ourNodePrivKey, addr)
|
||||
pc, err = testPeerConn(conn, config, true, persistent, ourNodePrivKey, addr)
|
||||
if err != nil {
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
return peerConn{}, cmn.ErrorWrap(err, cerr.Error())
|
||||
return pc, cmn.ErrorWrap(err, cerr.Error())
|
||||
}
|
||||
return peerConn{}, err
|
||||
return pc, err
|
||||
}
|
||||
|
||||
// ensure dialed ID matches connection ID
|
||||
if addr.ID != pc.ID() {
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
return peerConn{}, cmn.ErrorWrap(err, cerr.Error())
|
||||
return pc, cmn.ErrorWrap(err, cerr.Error())
|
||||
}
|
||||
return peerConn{}, ErrSwitchAuthenticationFailure{addr, pc.ID()}
|
||||
return pc, ErrSwitchAuthenticationFailure{addr, pc.ID()}
|
||||
}
|
||||
|
||||
return pc, nil
|
||||
|
@@ -167,7 +167,7 @@ func (r *PEXReactor) AddPeer(p Peer) {
|
||||
}
|
||||
} else {
|
||||
// inbound peer is its own source
|
||||
addr := p.NodeInfo().NetAddress()
|
||||
addr := p.SocketAddr()
|
||||
src := addr
|
||||
|
||||
// add to book. dont RequestAddrs right away because
|
||||
@@ -309,7 +309,7 @@ func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error {
|
||||
}
|
||||
r.requestsSent.Delete(id)
|
||||
|
||||
srcAddr := src.NodeInfo().NetAddress()
|
||||
srcAddr := src.SocketAddr()
|
||||
for _, netAddr := range addrs {
|
||||
// Validate netAddr. Disconnect from a peer if it sends us invalid data.
|
||||
if netAddr == nil {
|
||||
|
@@ -2,8 +2,8 @@ package pex
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@@ -12,14 +12,11 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p/mock"
|
||||
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/p2p/conn"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -101,7 +98,7 @@ func TestPEXReactorRunning(t *testing.T) {
|
||||
}
|
||||
|
||||
addOtherNodeAddrToAddrBook := func(switchIndex, otherSwitchIndex int) {
|
||||
addr := switches[otherSwitchIndex].NodeInfo().NetAddress()
|
||||
addr := switches[otherSwitchIndex].NetAddress()
|
||||
books[switchIndex].AddAddress(addr, addr)
|
||||
}
|
||||
|
||||
@@ -132,7 +129,7 @@ func TestPEXReactorReceive(t *testing.T) {
|
||||
r.RequestAddrs(peer)
|
||||
|
||||
size := book.Size()
|
||||
addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()}
|
||||
addrs := []*p2p.NetAddress{peer.SocketAddr()}
|
||||
msg := cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: addrs})
|
||||
r.Receive(PexChannel, peer, msg)
|
||||
assert.Equal(t, size+1, book.Size())
|
||||
@@ -148,7 +145,7 @@ func TestPEXReactorRequestMessageAbuse(t *testing.T) {
|
||||
sw := createSwitchAndAddReactors(r)
|
||||
sw.SetAddrBook(book)
|
||||
|
||||
peer := newMockPeer()
|
||||
peer := mock.NewPeer(nil)
|
||||
p2p.AddPeerToSwitch(sw, peer)
|
||||
assert.True(t, sw.Peers().Has(peer.ID()))
|
||||
|
||||
@@ -178,7 +175,7 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) {
|
||||
sw := createSwitchAndAddReactors(r)
|
||||
sw.SetAddrBook(book)
|
||||
|
||||
peer := newMockPeer()
|
||||
peer := mock.NewPeer(nil)
|
||||
p2p.AddPeerToSwitch(sw, peer)
|
||||
assert.True(t, sw.Peers().Has(peer.ID()))
|
||||
|
||||
@@ -189,7 +186,7 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) {
|
||||
assert.True(t, r.requestsSent.Has(id))
|
||||
assert.True(t, sw.Peers().Has(peer.ID()))
|
||||
|
||||
addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()}
|
||||
addrs := []*p2p.NetAddress{peer.SocketAddr()}
|
||||
msg := cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: addrs})
|
||||
|
||||
// receive some addrs. should clear the request
|
||||
@@ -234,7 +231,7 @@ func TestCheckSeeds(t *testing.T) {
|
||||
badPeerConfig = &PEXReactorConfig{
|
||||
Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657",
|
||||
"d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657",
|
||||
seed.NodeInfo().NetAddress().String()},
|
||||
seed.NetAddress().String()},
|
||||
}
|
||||
peer = testCreatePeerWithConfig(dir, 2, badPeerConfig)
|
||||
require.Nil(t, peer.Start())
|
||||
@@ -268,12 +265,13 @@ func TestConnectionSpeedForPeerReceivedFromSeed(t *testing.T) {
|
||||
defer os.RemoveAll(dir) // nolint: errcheck
|
||||
|
||||
// 1. create peer
|
||||
peer := testCreateDefaultPeer(dir, 1)
|
||||
require.Nil(t, peer.Start())
|
||||
defer peer.Stop()
|
||||
peerSwitch := testCreateDefaultPeer(dir, 1)
|
||||
require.Nil(t, peerSwitch.Start())
|
||||
defer peerSwitch.Stop()
|
||||
|
||||
// 2. Create seed which knows about the peer
|
||||
seed := testCreateSeed(dir, 2, []*p2p.NetAddress{peer.NodeInfo().NetAddress()}, []*p2p.NetAddress{peer.NodeInfo().NetAddress()})
|
||||
peerAddr := peerSwitch.NetAddress()
|
||||
seed := testCreateSeed(dir, 2, []*p2p.NetAddress{peerAddr}, []*p2p.NetAddress{peerAddr})
|
||||
require.Nil(t, seed.Start())
|
||||
defer seed.Stop()
|
||||
|
||||
@@ -300,7 +298,7 @@ func TestPEXReactorCrawlStatus(t *testing.T) {
|
||||
// Create a peer, add it to the peer set and the addrbook.
|
||||
peer := p2p.CreateRandomPeer(false)
|
||||
p2p.AddPeerToSwitch(pexR.Switch, peer)
|
||||
addr1 := peer.NodeInfo().NetAddress()
|
||||
addr1 := peer.SocketAddr()
|
||||
pexR.book.AddAddress(addr1, addr1)
|
||||
|
||||
// Add a non-connected address to the book.
|
||||
@@ -364,7 +362,7 @@ func TestPEXReactorSeedModeFlushStop(t *testing.T) {
|
||||
reactor := switches[0].Reactors()["pex"].(*PEXReactor)
|
||||
peerID := switches[1].NodeInfo().ID()
|
||||
|
||||
err = switches[1].DialPeerWithAddress(switches[0].NodeInfo().NetAddress(), false)
|
||||
err = switches[1].DialPeerWithAddress(switches[0].NetAddress(), false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// sleep up to a second while waiting for the peer to send us a message.
|
||||
@@ -402,7 +400,7 @@ func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) {
|
||||
pexR.RequestAddrs(peer)
|
||||
|
||||
size := book.Size()
|
||||
addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()}
|
||||
addrs := []*p2p.NetAddress{peer.SocketAddr()}
|
||||
msg := cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: addrs})
|
||||
pexR.Receive(PexChannel, peer, msg)
|
||||
assert.Equal(t, size, book.Size())
|
||||
@@ -418,8 +416,8 @@ func TestPEXReactorDialPeer(t *testing.T) {
|
||||
sw := createSwitchAndAddReactors(pexR)
|
||||
sw.SetAddrBook(book)
|
||||
|
||||
peer := newMockPeer()
|
||||
addr := peer.NodeInfo().NetAddress()
|
||||
peer := mock.NewPeer(nil)
|
||||
addr := peer.SocketAddr()
|
||||
|
||||
assert.Equal(t, 0, pexR.AttemptsToDial(addr))
|
||||
|
||||
@@ -444,44 +442,6 @@ func TestPEXReactorDialPeer(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type mockPeer struct {
|
||||
*cmn.BaseService
|
||||
pubKey crypto.PubKey
|
||||
addr *p2p.NetAddress
|
||||
outbound, persistent bool
|
||||
}
|
||||
|
||||
func newMockPeer() mockPeer {
|
||||
_, netAddr := p2p.CreateRoutableAddr()
|
||||
mp := mockPeer{
|
||||
addr: netAddr,
|
||||
pubKey: ed25519.GenPrivKey().PubKey(),
|
||||
}
|
||||
mp.BaseService = cmn.NewBaseService(nil, "MockPeer", mp)
|
||||
mp.Start()
|
||||
return mp
|
||||
}
|
||||
|
||||
func (mp mockPeer) FlushStop() { mp.Stop() }
|
||||
func (mp mockPeer) ID() p2p.ID { return mp.addr.ID }
|
||||
func (mp mockPeer) IsOutbound() bool { return mp.outbound }
|
||||
func (mp mockPeer) IsPersistent() bool { return mp.persistent }
|
||||
func (mp mockPeer) NodeInfo() p2p.NodeInfo {
|
||||
return p2p.DefaultNodeInfo{
|
||||
ID_: mp.addr.ID,
|
||||
ListenAddr: mp.addr.DialString(),
|
||||
}
|
||||
}
|
||||
func (mockPeer) RemoteIP() net.IP { return net.ParseIP("127.0.0.1") }
|
||||
func (mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} }
|
||||
func (mockPeer) Send(byte, []byte) bool { return false }
|
||||
func (mockPeer) TrySend(byte, []byte) bool { return false }
|
||||
func (mockPeer) Set(string, interface{}) {}
|
||||
func (mockPeer) Get(string) interface{} { return nil }
|
||||
func (mockPeer) OriginalAddr() *p2p.NetAddress { return nil }
|
||||
func (mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8800} }
|
||||
func (mockPeer) CloseConn() error { return nil }
|
||||
|
||||
func assertPeersWithTimeout(
|
||||
t *testing.T,
|
||||
switches []*p2p.Switch,
|
||||
@@ -590,7 +550,7 @@ func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*p2p.NetAddress)
|
||||
// Starting and stopping the peer is left to the caller
|
||||
func testCreatePeerWithSeed(dir string, id int, seed *p2p.Switch) *p2p.Switch {
|
||||
conf := &PEXReactorConfig{
|
||||
Seeds: []string{seed.NodeInfo().NetAddress().String()},
|
||||
Seeds: []string{seed.NetAddress().String()},
|
||||
}
|
||||
return testCreatePeerWithConfig(dir, id, conf)
|
||||
}
|
||||
|
@@ -86,6 +86,12 @@ type Switch struct {
|
||||
metrics *Metrics
|
||||
}
|
||||
|
||||
// NetAddress returns the address the switch is listening on.
|
||||
func (sw *Switch) NetAddress() *NetAddress {
|
||||
addr := sw.transport.NetAddress()
|
||||
return &addr
|
||||
}
|
||||
|
||||
// SwitchOption sets an optional parameter on the Switch.
|
||||
type SwitchOption func(*Switch)
|
||||
|
||||
@@ -284,13 +290,7 @@ func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) {
|
||||
sw.stopAndRemovePeer(peer, reason)
|
||||
|
||||
if peer.IsPersistent() {
|
||||
addr := peer.OriginalAddr()
|
||||
if addr == nil {
|
||||
// FIXME: persistent peers can't be inbound right now.
|
||||
// self-reported address for inbound persistent peers
|
||||
addr = peer.NodeInfo().NetAddress()
|
||||
}
|
||||
go sw.reconnectToPeer(addr)
|
||||
go sw.reconnectToPeer(peer.SocketAddr())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -378,7 +378,7 @@ func (sw *Switch) SetAddrBook(addrBook AddrBook) {
|
||||
// like contributed to consensus.
|
||||
func (sw *Switch) MarkPeerAsGood(peer Peer) {
|
||||
if sw.addrBook != nil {
|
||||
sw.addrBook.MarkGood(peer.NodeInfo().NetAddress())
|
||||
sw.addrBook.MarkGood(peer.SocketAddr())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -395,7 +395,7 @@ func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent b
|
||||
sw.Logger.Error("Error in peer's address", "err", err)
|
||||
}
|
||||
|
||||
ourAddr := sw.nodeInfo.NetAddress()
|
||||
ourAddr := sw.NetAddress()
|
||||
|
||||
// TODO: this code feels like it's in the wrong place.
|
||||
// The integration tests depend on the addrBook being saved
|
||||
@@ -524,7 +524,7 @@ func (sw *Switch) acceptRoutine() {
|
||||
if in >= sw.config.MaxNumInboundPeers {
|
||||
sw.Logger.Info(
|
||||
"Ignoring inbound connection: already have enough inbound peers",
|
||||
"address", p.NodeInfo().NetAddress().String(),
|
||||
"address", p.SocketAddr(),
|
||||
"have", in,
|
||||
"max", sw.config.MaxNumInboundPeers,
|
||||
)
|
||||
@@ -641,7 +641,7 @@ func (sw *Switch) addPeer(p Peer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
p.SetLogger(sw.Logger.With("peer", p.NodeInfo().NetAddress()))
|
||||
p.SetLogger(sw.Logger.With("peer", p.SocketAddr()))
|
||||
|
||||
// Handle the shut down case where the switch has stopped but we're
|
||||
// concurrently trying to add a peer.
|
||||
|
@@ -160,10 +160,6 @@ func assertMsgReceivedWithTimeout(t *testing.T, msgBytes []byte, channel byte, r
|
||||
|
||||
func TestSwitchFiltersOutItself(t *testing.T) {
|
||||
s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc)
|
||||
// addr := s1.NodeInfo().NetAddress()
|
||||
|
||||
// // add ourselves like we do in node.go#427
|
||||
// s1.addrBook.AddOurAddress(addr)
|
||||
|
||||
// simulate s1 having a public IP by creating a remote peer with the same ID
|
||||
rp := &remotePeer{PrivKey: s1.nodeKey.PrivKey, Config: cfg}
|
||||
@@ -495,7 +491,7 @@ func TestSwitchAcceptRoutine(t *testing.T) {
|
||||
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
|
||||
remotePeers = append(remotePeers, rp)
|
||||
rp.Start()
|
||||
c, err := rp.Dial(sw.NodeInfo().NetAddress())
|
||||
c, err := rp.Dial(sw.NetAddress())
|
||||
require.NoError(t, err)
|
||||
// spawn a reading routine to prevent connection from closing
|
||||
go func(c net.Conn) {
|
||||
@@ -514,7 +510,7 @@ func TestSwitchAcceptRoutine(t *testing.T) {
|
||||
// 2. check we close new connections if we already have MaxNumInboundPeers peers
|
||||
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
|
||||
rp.Start()
|
||||
conn, err := rp.Dial(sw.NodeInfo().NetAddress())
|
||||
conn, err := rp.Dial(sw.NetAddress())
|
||||
require.NoError(t, err)
|
||||
// check conn is closed
|
||||
one := make([]byte, 1)
|
||||
|
@@ -35,7 +35,8 @@ func CreateRandomPeer(outbound bool) *peer {
|
||||
addr, netAddr := CreateRoutableAddr()
|
||||
p := &peer{
|
||||
peerConn: peerConn{
|
||||
outbound: outbound,
|
||||
outbound: outbound,
|
||||
socketAddr: netAddr,
|
||||
},
|
||||
nodeInfo: mockNodeInfo{netAddr},
|
||||
mconn: &conn.MConnection{},
|
||||
@@ -174,10 +175,15 @@ func MakeSwitch(
|
||||
PrivKey: ed25519.GenPrivKey(),
|
||||
}
|
||||
nodeInfo := testNodeInfo(nodeKey.ID(), fmt.Sprintf("node%d", i))
|
||||
addr, err := NewNetAddressString(
|
||||
IDAddressString(nodeKey.ID(), nodeInfo.(DefaultNodeInfo).ListenAddr),
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
t := NewMultiplexTransport(nodeInfo, nodeKey, MConnConfig(cfg))
|
||||
|
||||
addr := nodeInfo.NetAddress()
|
||||
if err := t.Listen(*addr); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -214,7 +220,7 @@ func testPeerConn(
|
||||
cfg *config.P2PConfig,
|
||||
outbound, persistent bool,
|
||||
ourNodePrivKey crypto.PrivKey,
|
||||
originalAddr *NetAddress,
|
||||
socketAddr *NetAddress,
|
||||
) (pc peerConn, err error) {
|
||||
conn := rawConn
|
||||
|
||||
@@ -231,12 +237,7 @@ func testPeerConn(
|
||||
}
|
||||
|
||||
// Only the information we already have
|
||||
return peerConn{
|
||||
outbound: outbound,
|
||||
persistent: persistent,
|
||||
conn: conn,
|
||||
originalAddr: originalAddr,
|
||||
}, nil
|
||||
return newPeerConn(outbound, persistent, conn, socketAddr), nil
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------
|
||||
|
@@ -24,6 +24,7 @@ type IPResolver interface {
|
||||
// accept is the container to carry the upgraded connection and NodeInfo from an
|
||||
// asynchronously running routine to the Accept method.
|
||||
type accept struct {
|
||||
netAddr *NetAddress
|
||||
conn net.Conn
|
||||
nodeInfo NodeInfo
|
||||
err error
|
||||
@@ -47,6 +48,9 @@ type peerConfig struct {
|
||||
// the transport. Each transport is also responsible to filter establishing
|
||||
// peers specific to its domain.
|
||||
type Transport interface {
|
||||
// Listening address.
|
||||
NetAddress() NetAddress
|
||||
|
||||
// Accept returns a newly connected Peer.
|
||||
Accept(peerConfig) (Peer, error)
|
||||
|
||||
@@ -115,6 +119,7 @@ func MultiplexTransportResolver(resolver IPResolver) MultiplexTransportOption {
|
||||
// MultiplexTransport accepts and dials tcp connections and upgrades them to
|
||||
// multiplexed peers.
|
||||
type MultiplexTransport struct {
|
||||
netAddr NetAddress
|
||||
listener net.Listener
|
||||
|
||||
acceptc chan accept
|
||||
@@ -161,6 +166,11 @@ func NewMultiplexTransport(
|
||||
}
|
||||
}
|
||||
|
||||
// NetAddress implements Transport.
|
||||
func (mt *MultiplexTransport) NetAddress() NetAddress {
|
||||
return mt.netAddr
|
||||
}
|
||||
|
||||
// Accept implements Transport.
|
||||
func (mt *MultiplexTransport) Accept(cfg peerConfig) (Peer, error) {
|
||||
select {
|
||||
@@ -173,7 +183,7 @@ func (mt *MultiplexTransport) Accept(cfg peerConfig) (Peer, error) {
|
||||
|
||||
cfg.outbound = false
|
||||
|
||||
return mt.wrapPeer(a.conn, a.nodeInfo, cfg, nil), nil
|
||||
return mt.wrapPeer(a.conn, a.nodeInfo, cfg, a.netAddr), nil
|
||||
case <-mt.closec:
|
||||
return nil, &ErrTransportClosed{}
|
||||
}
|
||||
@@ -194,7 +204,7 @@ func (mt *MultiplexTransport) Dial(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
secretConn, nodeInfo, err := mt.upgrade(c)
|
||||
secretConn, nodeInfo, err := mt.upgrade(c, &addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -224,6 +234,7 @@ func (mt *MultiplexTransport) Listen(addr NetAddress) error {
|
||||
return err
|
||||
}
|
||||
|
||||
mt.netAddr = addr
|
||||
mt.listener = ln
|
||||
|
||||
go mt.acceptPeers()
|
||||
@@ -258,15 +269,21 @@ func (mt *MultiplexTransport) acceptPeers() {
|
||||
var (
|
||||
nodeInfo NodeInfo
|
||||
secretConn *conn.SecretConnection
|
||||
netAddr *NetAddress
|
||||
)
|
||||
|
||||
err := mt.filterConn(c)
|
||||
if err == nil {
|
||||
secretConn, nodeInfo, err = mt.upgrade(c)
|
||||
secretConn, nodeInfo, err = mt.upgrade(c, nil)
|
||||
if err == nil {
|
||||
addr := c.RemoteAddr()
|
||||
id := PubKeyToID(secretConn.RemotePubKey())
|
||||
netAddr = NewNetAddress(id, addr)
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case mt.acceptc <- accept{secretConn, nodeInfo, err}:
|
||||
case mt.acceptc <- accept{netAddr, secretConn, nodeInfo, err}:
|
||||
// Make the upgraded peer available.
|
||||
case <-mt.closec:
|
||||
// Give up if the transport was closed.
|
||||
@@ -279,9 +296,9 @@ func (mt *MultiplexTransport) acceptPeers() {
|
||||
|
||||
// Cleanup removes the given address from the connections set and
|
||||
// closes the connection.
|
||||
func (mt *MultiplexTransport) Cleanup(peer Peer) {
|
||||
mt.conns.RemoveAddr(peer.RemoteAddr())
|
||||
_ = peer.CloseConn()
|
||||
func (mt *MultiplexTransport) Cleanup(p Peer) {
|
||||
mt.conns.RemoveAddr(p.RemoteAddr())
|
||||
_ = p.CloseConn()
|
||||
}
|
||||
|
||||
func (mt *MultiplexTransport) cleanup(c net.Conn) error {
|
||||
@@ -335,6 +352,7 @@ func (mt *MultiplexTransport) filterConn(c net.Conn) (err error) {
|
||||
|
||||
func (mt *MultiplexTransport) upgrade(
|
||||
c net.Conn,
|
||||
dialedAddr *NetAddress,
|
||||
) (secretConn *conn.SecretConnection, nodeInfo NodeInfo, err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
@@ -351,6 +369,23 @@ func (mt *MultiplexTransport) upgrade(
|
||||
}
|
||||
}
|
||||
|
||||
// For outgoing conns, ensure connection key matches dialed key.
|
||||
connID := PubKeyToID(secretConn.RemotePubKey())
|
||||
if dialedAddr != nil {
|
||||
if dialedID := dialedAddr.ID; connID != dialedID {
|
||||
return nil, nil, ErrRejected{
|
||||
conn: c,
|
||||
id: connID,
|
||||
err: fmt.Errorf(
|
||||
"conn.ID (%v) dialed ID (%v) missmatch",
|
||||
connID,
|
||||
dialedID,
|
||||
),
|
||||
isAuthFailure: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nodeInfo, err = handshake(secretConn, mt.handshakeTimeout, mt.nodeInfo)
|
||||
if err != nil {
|
||||
return nil, nil, ErrRejected{
|
||||
@@ -369,7 +404,7 @@ func (mt *MultiplexTransport) upgrade(
|
||||
}
|
||||
|
||||
// Ensure connection key matches self reported key.
|
||||
if connID := PubKeyToID(secretConn.RemotePubKey()); connID != nodeInfo.ID() {
|
||||
if connID != nodeInfo.ID() {
|
||||
return nil, nil, ErrRejected{
|
||||
conn: c,
|
||||
id: connID,
|
||||
@@ -408,14 +443,14 @@ func (mt *MultiplexTransport) wrapPeer(
|
||||
c net.Conn,
|
||||
ni NodeInfo,
|
||||
cfg peerConfig,
|
||||
dialedAddr *NetAddress,
|
||||
socketAddr *NetAddress,
|
||||
) Peer {
|
||||
|
||||
peerConn := newPeerConn(
|
||||
cfg.outbound,
|
||||
cfg.persistent,
|
||||
c,
|
||||
dialedAddr,
|
||||
socketAddr,
|
||||
)
|
||||
|
||||
p := newPeer(
|
||||
|
@@ -8,6 +8,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/p2p/conn"
|
||||
)
|
||||
@@ -142,44 +144,23 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) {
|
||||
|
||||
func TestTransportMultiplexAcceptMultiple(t *testing.T) {
|
||||
mt := testSetupMultiplexTransport(t)
|
||||
id, addr := mt.nodeKey.ID(), mt.listener.Addr().String()
|
||||
laddr, err := NewNetAddressStringWithOptionalID(IDAddressString(id, addr))
|
||||
require.NoError(t, err)
|
||||
|
||||
var (
|
||||
seed = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
errc = make(chan error, seed.Intn(64)+64)
|
||||
seed = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
nDialers = seed.Intn(64) + 64
|
||||
errc = make(chan error, nDialers)
|
||||
)
|
||||
|
||||
// Setup dialers.
|
||||
for i := 0; i < cap(errc); i++ {
|
||||
go func() {
|
||||
var (
|
||||
pv = ed25519.GenPrivKey()
|
||||
dialer = newMultiplexTransport(
|
||||
testNodeInfo(PubKeyToID(pv.PubKey()), defaultNodeName),
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String())
|
||||
if err != nil {
|
||||
errc <- err
|
||||
return
|
||||
}
|
||||
|
||||
_, err = dialer.Dial(*addr, peerConfig{})
|
||||
if err != nil {
|
||||
errc <- err
|
||||
return
|
||||
}
|
||||
|
||||
// Signal that the connection was established.
|
||||
errc <- nil
|
||||
}()
|
||||
for i := 0; i < nDialers; i++ {
|
||||
go testDialer(*laddr, errc)
|
||||
}
|
||||
|
||||
// Catch connection errors.
|
||||
for i := 0; i < cap(errc); i++ {
|
||||
for i := 0; i < nDialers; i++ {
|
||||
if err := <-errc; err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -217,6 +198,27 @@ func TestTransportMultiplexAcceptMultiple(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testDialer(dialAddr NetAddress, errc chan error) {
|
||||
var (
|
||||
pv = ed25519.GenPrivKey()
|
||||
dialer = newMultiplexTransport(
|
||||
testNodeInfo(PubKeyToID(pv.PubKey()), defaultNodeName),
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
_, err := dialer.Dial(dialAddr, peerConfig{})
|
||||
if err != nil {
|
||||
errc <- err
|
||||
return
|
||||
}
|
||||
|
||||
// Signal that the connection was established.
|
||||
errc <- nil
|
||||
}
|
||||
|
||||
func TestTransportMultiplexAcceptNonBlocking(t *testing.T) {
|
||||
mt := testSetupMultiplexTransport(t)
|
||||
|
||||
@@ -230,7 +232,7 @@ func TestTransportMultiplexAcceptNonBlocking(t *testing.T) {
|
||||
|
||||
// Simulate slow Peer.
|
||||
go func() {
|
||||
addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String())
|
||||
addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String()))
|
||||
if err != nil {
|
||||
errc <- err
|
||||
return
|
||||
@@ -281,8 +283,7 @@ func TestTransportMultiplexAcceptNonBlocking(t *testing.T) {
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String())
|
||||
addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String()))
|
||||
if err != nil {
|
||||
errc <- err
|
||||
return
|
||||
@@ -328,7 +329,7 @@ func TestTransportMultiplexValidateNodeInfo(t *testing.T) {
|
||||
)
|
||||
)
|
||||
|
||||
addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String())
|
||||
addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String()))
|
||||
if err != nil {
|
||||
errc <- err
|
||||
return
|
||||
@@ -371,8 +372,7 @@ func TestTransportMultiplexRejectMissmatchID(t *testing.T) {
|
||||
PrivKey: ed25519.GenPrivKey(),
|
||||
},
|
||||
)
|
||||
|
||||
addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String())
|
||||
addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String()))
|
||||
if err != nil {
|
||||
errc <- err
|
||||
return
|
||||
@@ -401,6 +401,38 @@ func TestTransportMultiplexRejectMissmatchID(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestTransportMultiplexDialRejectWrongID(t *testing.T) {
|
||||
mt := testSetupMultiplexTransport(t)
|
||||
|
||||
var (
|
||||
pv = ed25519.GenPrivKey()
|
||||
dialer = newMultiplexTransport(
|
||||
testNodeInfo(PubKeyToID(pv.PubKey()), ""), // Should not be empty
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
wrongID := PubKeyToID(ed25519.GenPrivKey().PubKey())
|
||||
addr, err := NewNetAddressStringWithOptionalID(IDAddressString(wrongID, mt.listener.Addr().String()))
|
||||
if err != nil {
|
||||
t.Fatalf("invalid address with ID: %v", err)
|
||||
}
|
||||
|
||||
_, err = dialer.Dial(*addr, peerConfig{})
|
||||
if err != nil {
|
||||
t.Logf("connection failed: %v", err)
|
||||
if err, ok := err.(ErrRejected); ok {
|
||||
if !err.IsAuthFailure() {
|
||||
t.Errorf("expected auth failure")
|
||||
}
|
||||
} else {
|
||||
t.Errorf("expected ErrRejected")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTransportMultiplexRejectIncompatible(t *testing.T) {
|
||||
mt := testSetupMultiplexTransport(t)
|
||||
|
||||
@@ -416,8 +448,7 @@ func TestTransportMultiplexRejectIncompatible(t *testing.T) {
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String())
|
||||
addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String()))
|
||||
if err != nil {
|
||||
errc <- err
|
||||
return
|
||||
@@ -448,7 +479,7 @@ func TestTransportMultiplexRejectSelf(t *testing.T) {
|
||||
errc := make(chan error)
|
||||
|
||||
go func() {
|
||||
addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String())
|
||||
addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String()))
|
||||
if err != nil {
|
||||
errc <- err
|
||||
return
|
||||
@@ -466,7 +497,7 @@ func TestTransportMultiplexRejectSelf(t *testing.T) {
|
||||
if err := <-errc; err != nil {
|
||||
if err, ok := err.(ErrRejected); ok {
|
||||
if !err.IsSelf() {
|
||||
t.Errorf("expected to reject self")
|
||||
t.Errorf("expected to reject self, got: %v", err)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("expected ErrRejected")
|
||||
@@ -478,7 +509,7 @@ func TestTransportMultiplexRejectSelf(t *testing.T) {
|
||||
_, err := mt.Accept(peerConfig{})
|
||||
if err, ok := err.(ErrRejected); ok {
|
||||
if !err.IsSelf() {
|
||||
t.Errorf("expected to reject self")
|
||||
t.Errorf("expected to reject self, got: %v", err)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("expected ErrRejected")
|
||||
@@ -563,12 +594,14 @@ func TestTransportHandshake(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// create listener
|
||||
func testSetupMultiplexTransport(t *testing.T) *MultiplexTransport {
|
||||
var (
|
||||
pv = ed25519.GenPrivKey()
|
||||
id = PubKeyToID(pv.PubKey())
|
||||
mt = newMultiplexTransport(
|
||||
testNodeInfo(
|
||||
PubKeyToID(pv.PubKey()), "transport",
|
||||
id, "transport",
|
||||
),
|
||||
NodeKey{
|
||||
PrivKey: pv,
|
||||
@@ -576,7 +609,7 @@ func testSetupMultiplexTransport(t *testing.T) *MultiplexTransport {
|
||||
)
|
||||
)
|
||||
|
||||
addr, err := NewNetAddressStringWithOptionalID("127.0.0.1:0")
|
||||
addr, err := NewNetAddressStringWithOptionalID(IDAddressString(id, "127.0.0.1:0"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@@ -31,7 +31,6 @@ func voteToStep(vote *types.Vote) int8 {
|
||||
return stepPrecommit
|
||||
default:
|
||||
panic("Unknown vote type")
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -67,7 +67,7 @@ func DialTCPFn(addr string, connTimeout time.Duration, privKey ed25519.PrivKeyEd
|
||||
// DialUnixFn dials the given unix socket.
|
||||
func DialUnixFn(addr string) Dialer {
|
||||
return func() (net.Conn, error) {
|
||||
unixAddr := &net.UnixAddr{addr, "unix"}
|
||||
unixAddr := &net.UnixAddr{Name: addr, Net: "unix"}
|
||||
return net.DialUnix("unix", nil, unixAddr)
|
||||
}
|
||||
}
|
||||
|
@@ -15,10 +15,10 @@ func TestMain(m *testing.M) {
|
||||
// start a tendermint node (and kvstore) in the background to test against
|
||||
app := kvstore.NewKVStoreApplication()
|
||||
node = rpctest.StartTendermint(app)
|
||||
|
||||
code := m.Run()
|
||||
|
||||
// and shut down proper at the end
|
||||
node.Stop()
|
||||
node.Wait()
|
||||
rpctest.StopTendermint(node)
|
||||
os.Exit(code)
|
||||
}
|
||||
|
@@ -23,7 +23,7 @@ var (
|
||||
)
|
||||
|
||||
func (a ABCIApp) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
|
||||
return &ctypes.ResultABCIInfo{a.App.Info(proxy.RequestInfo)}, nil
|
||||
return &ctypes.ResultABCIInfo{Response: a.App.Info(proxy.RequestInfo)}, nil
|
||||
}
|
||||
|
||||
func (a ABCIApp) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) {
|
||||
@@ -37,7 +37,7 @@ func (a ABCIApp) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts clien
|
||||
Height: opts.Height,
|
||||
Prove: opts.Prove,
|
||||
})
|
||||
return &ctypes.ResultABCIQuery{q}, nil
|
||||
return &ctypes.ResultABCIQuery{Response: q}, nil
|
||||
}
|
||||
|
||||
// NOTE: Caller should call a.App.Commit() separately,
|
||||
@@ -60,7 +60,7 @@ func (a ABCIApp) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error
|
||||
if !c.IsErr() {
|
||||
go func() { a.App.DeliverTx(tx) }() // nolint: errcheck
|
||||
}
|
||||
return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil
|
||||
return &ctypes.ResultBroadcastTx{Code: c.Code, Data: c.Data, Log: c.Log, Hash: tx.Hash()}, nil
|
||||
}
|
||||
|
||||
func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
|
||||
@@ -69,7 +69,7 @@ func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error)
|
||||
if !c.IsErr() {
|
||||
go func() { a.App.DeliverTx(tx) }() // nolint: errcheck
|
||||
}
|
||||
return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil
|
||||
return &ctypes.ResultBroadcastTx{Code: c.Code, Data: c.Data, Log: c.Log, Hash: tx.Hash()}, nil
|
||||
}
|
||||
|
||||
// ABCIMock will send all abci related request to the named app,
|
||||
@@ -87,7 +87,7 @@ func (m ABCIMock) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ctypes.ResultABCIInfo{res.(abci.ResponseInfo)}, nil
|
||||
return &ctypes.ResultABCIInfo{Response: res.(abci.ResponseInfo)}, nil
|
||||
}
|
||||
|
||||
func (m ABCIMock) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) {
|
||||
@@ -100,7 +100,7 @@ func (m ABCIMock) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts clie
|
||||
return nil, err
|
||||
}
|
||||
resQuery := res.(abci.ResponseQuery)
|
||||
return &ctypes.ResultABCIQuery{resQuery}, nil
|
||||
return &ctypes.ResultABCIQuery{Response: resQuery}, nil
|
||||
}
|
||||
|
||||
func (m ABCIMock) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
|
||||
|
@@ -42,9 +42,9 @@ func TestCorsEnabled(t *testing.T) {
|
||||
req.Header.Set("Origin", origin)
|
||||
c := &http.Client{}
|
||||
resp, err := c.Do(req)
|
||||
require.Nil(t, err, "%+v", err)
|
||||
defer resp.Body.Close()
|
||||
|
||||
require.Nil(t, err, "%+v", err)
|
||||
assert.Equal(t, resp.Header.Get("Access-Control-Allow-Origin"), origin)
|
||||
}
|
||||
|
||||
|
@@ -63,7 +63,7 @@ func ABCIQuery(path string, data cmn.HexBytes, height int64, prove bool) (*ctype
|
||||
return nil, err
|
||||
}
|
||||
logger.Info("ABCIQuery", "path", path, "data", data, "result", resQuery)
|
||||
return &ctypes.ResultABCIQuery{*resQuery}, nil
|
||||
return &ctypes.ResultABCIQuery{Response: *resQuery}, nil
|
||||
}
|
||||
|
||||
// Get some info about the application.
|
||||
@@ -101,5 +101,5 @@ func ABCIInfo() (*ctypes.ResultABCIInfo, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ctypes.ResultABCIInfo{*resInfo}, nil
|
||||
return &ctypes.ResultABCIInfo{Response: *resInfo}, nil
|
||||
}
|
||||
|
@@ -85,7 +85,9 @@ func BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, e
|
||||
blockMetas = append(blockMetas, blockMeta)
|
||||
}
|
||||
|
||||
return &ctypes.ResultBlockchainInfo{blockStore.Height(), blockMetas}, nil
|
||||
return &ctypes.ResultBlockchainInfo{
|
||||
LastHeight: blockStore.Height(),
|
||||
BlockMetas: blockMetas}, nil
|
||||
}
|
||||
|
||||
// error if either min or max are negative or min < max
|
||||
@@ -233,7 +235,7 @@ func Block(heightPtr *int64) (*ctypes.ResultBlock, error) {
|
||||
|
||||
blockMeta := blockStore.LoadBlockMeta(height)
|
||||
block := blockStore.LoadBlock(height)
|
||||
return &ctypes.ResultBlock{blockMeta, block}, nil
|
||||
return &ctypes.ResultBlock{BlockMeta: blockMeta, Block: block}, nil
|
||||
}
|
||||
|
||||
// Get block commit at a given height.
|
||||
|
@@ -60,7 +60,9 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ctypes.ResultValidators{height, validators.Validators}, nil
|
||||
return &ctypes.ResultValidators{
|
||||
BlockHeight: height,
|
||||
Validators: validators.Validators}, nil
|
||||
}
|
||||
|
||||
// DumpConsensusState dumps consensus state.
|
||||
@@ -213,7 +215,7 @@ func DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
|
||||
}
|
||||
peerStates[i] = ctypes.PeerStateInfo{
|
||||
// Peer basic info.
|
||||
NodeAddress: peer.NodeInfo().NetAddress().String(),
|
||||
NodeAddress: peer.SocketAddr().String(),
|
||||
// Peer consensus state.
|
||||
PeerState: peerStateJSON,
|
||||
}
|
||||
@@ -223,7 +225,9 @@ func DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ctypes.ResultDumpConsensusState{roundState, peerStates}, nil
|
||||
return &ctypes.ResultDumpConsensusState{
|
||||
RoundState: roundState,
|
||||
Peers: peerStates}, nil
|
||||
}
|
||||
|
||||
// ConsensusState returns a concise summary of the consensus state.
|
||||
@@ -276,7 +280,7 @@ func DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
|
||||
func ConsensusState() (*ctypes.ResultConsensusState, error) {
|
||||
// Get self round state.
|
||||
bz, err := consensusState.GetRoundStateSimpleJSON()
|
||||
return &ctypes.ResultConsensusState{bz}, err
|
||||
return &ctypes.ResultConsensusState{RoundState: bz}, err
|
||||
}
|
||||
|
||||
// Get the consensus parameters at the given block height.
|
||||
@@ -327,5 +331,7 @@ func ConsensusParams(heightPtr *int64) (*ctypes.ResultConsensusParams, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ctypes.ResultConsensusParams{BlockHeight: height, ConsensusParams: consensusparams}, nil
|
||||
return &ctypes.ResultConsensusParams{
|
||||
BlockHeight: height,
|
||||
ConsensusParams: consensusparams}, nil
|
||||
}
|
||||
|
@@ -109,7 +109,7 @@ func Subscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultSubscri
|
||||
|
||||
go func() {
|
||||
for event := range ch {
|
||||
tmResult := &ctypes.ResultEvent{query, event.(tmtypes.TMEventData)}
|
||||
tmResult := &ctypes.ResultEvent{Query: query, Data: event.(tmtypes.TMEventData)}
|
||||
wsCtx.TryWriteRPCResponse(rpctypes.NewRPCSuccessResponse(wsCtx.Codec(), rpctypes.JSONRPCStringID(fmt.Sprintf("%v#event", wsCtx.Request.ID)), tmResult))
|
||||
}
|
||||
}()
|
||||
|
@@ -275,7 +275,7 @@ func UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) {
|
||||
limit = validatePerPage(limit)
|
||||
|
||||
txs := mempool.ReapMaxTxs(limit)
|
||||
return &ctypes.ResultUnconfirmedTxs{len(txs), txs}, nil
|
||||
return &ctypes.ResultUnconfirmedTxs{N: len(txs), Txs: txs}, nil
|
||||
}
|
||||
|
||||
// Get number of unconfirmed transactions.
|
||||
|
146
rpc/core/net.go
146
rpc/core/net.go
@@ -29,21 +29,133 @@ import (
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "error": "",
|
||||
// "result": {
|
||||
// "n_peers": "0",
|
||||
// "peers": [],
|
||||
// "listeners": [
|
||||
// "Listener(@10.0.2.15:26656)"
|
||||
// ],
|
||||
// "listening": true
|
||||
// },
|
||||
// "id": "",
|
||||
// "jsonrpc": "2.0"
|
||||
// }
|
||||
// "jsonrpc": "2.0",
|
||||
// "id": "",
|
||||
// "result": {
|
||||
// "listening": true,
|
||||
// "listeners": [
|
||||
// "Listener(@)"
|
||||
// ],
|
||||
// "n_peers": "3",
|
||||
// "peers": [
|
||||
// {
|
||||
// "node_info": {
|
||||
// "protocol_version": {
|
||||
// "p2p": "7",
|
||||
// "block": "8",
|
||||
// "app": "1"
|
||||
// },
|
||||
// "id": "93529da3435c090d02251a050342b6a488d4ab56",
|
||||
// "listen_addr": "tcp://0.0.0.0:26656",
|
||||
// "network": "chain-RFo6qC",
|
||||
// "version": "0.30.0",
|
||||
// "channels": "4020212223303800",
|
||||
// "moniker": "fc89e4ed23f2",
|
||||
// "other": {
|
||||
// "tx_index": "on",
|
||||
// "rpc_address": "tcp://0.0.0.0:26657"
|
||||
// }
|
||||
// },
|
||||
// "is_outbound": true,
|
||||
// "connection_status": {
|
||||
// "Duration": "3475230558",
|
||||
// "SendMonitor": {
|
||||
// "Active": true,
|
||||
// "Start": "2019-02-14T12:40:47.52Z",
|
||||
// "Duration": "3480000000",
|
||||
// "Idle": "240000000",
|
||||
// "Bytes": "4512",
|
||||
// "Samples": "9",
|
||||
// "InstRate": "1338",
|
||||
// "CurRate": "2046",
|
||||
// "AvgRate": "1297",
|
||||
// "PeakRate": "6570",
|
||||
// "BytesRem": "0",
|
||||
// "TimeRem": "0",
|
||||
// "Progress": 0
|
||||
// },
|
||||
// "RecvMonitor": {
|
||||
// "Active": true,
|
||||
// "Start": "2019-02-14T12:40:47.52Z",
|
||||
// "Duration": "3480000000",
|
||||
// "Idle": "280000000",
|
||||
// "Bytes": "4489",
|
||||
// "Samples": "10",
|
||||
// "InstRate": "1821",
|
||||
// "CurRate": "1663",
|
||||
// "AvgRate": "1290",
|
||||
// "PeakRate": "5512",
|
||||
// "BytesRem": "0",
|
||||
// "TimeRem": "0",
|
||||
// "Progress": 0
|
||||
// },
|
||||
// "Channels": [
|
||||
// {
|
||||
// "ID": 48,
|
||||
// "SendQueueCapacity": "1",
|
||||
// "SendQueueSize": "0",
|
||||
// "Priority": "5",
|
||||
// "RecentlySent": "0"
|
||||
// },
|
||||
// {
|
||||
// "ID": 64,
|
||||
// "SendQueueCapacity": "1000",
|
||||
// "SendQueueSize": "0",
|
||||
// "Priority": "10",
|
||||
// "RecentlySent": "14"
|
||||
// },
|
||||
// {
|
||||
// "ID": 32,
|
||||
// "SendQueueCapacity": "100",
|
||||
// "SendQueueSize": "0",
|
||||
// "Priority": "5",
|
||||
// "RecentlySent": "619"
|
||||
// },
|
||||
// {
|
||||
// "ID": 33,
|
||||
// "SendQueueCapacity": "100",
|
||||
// "SendQueueSize": "0",
|
||||
// "Priority": "10",
|
||||
// "RecentlySent": "1363"
|
||||
// },
|
||||
// {
|
||||
// "ID": 34,
|
||||
// "SendQueueCapacity": "100",
|
||||
// "SendQueueSize": "0",
|
||||
// "Priority": "5",
|
||||
// "RecentlySent": "2145"
|
||||
// },
|
||||
// {
|
||||
// "ID": 35,
|
||||
// "SendQueueCapacity": "2",
|
||||
// "SendQueueSize": "0",
|
||||
// "Priority": "1",
|
||||
// "RecentlySent": "0"
|
||||
// },
|
||||
// {
|
||||
// "ID": 56,
|
||||
// "SendQueueCapacity": "1",
|
||||
// "SendQueueSize": "0",
|
||||
// "Priority": "5",
|
||||
// "RecentlySent": "0"
|
||||
// },
|
||||
// {
|
||||
// "ID": 0,
|
||||
// "SendQueueCapacity": "10",
|
||||
// "SendQueueSize": "0",
|
||||
// "Priority": "1",
|
||||
// "RecentlySent": "10"
|
||||
// }
|
||||
// ]
|
||||
// },
|
||||
// "remote_ip": "192.167.10.3"
|
||||
// },
|
||||
// ...
|
||||
// }
|
||||
// ```
|
||||
func NetInfo() (*ctypes.ResultNetInfo, error) {
|
||||
peers := []ctypes.Peer{}
|
||||
out, in, _ := p2pPeers.NumPeers()
|
||||
peers := make([]ctypes.Peer, 0, out+in)
|
||||
for _, peer := range p2pPeers.Peers().List() {
|
||||
nodeInfo, ok := peer.NodeInfo().(p2p.DefaultNodeInfo)
|
||||
if !ok {
|
||||
@@ -53,7 +165,7 @@ func NetInfo() (*ctypes.ResultNetInfo, error) {
|
||||
NodeInfo: nodeInfo,
|
||||
IsOutbound: peer.IsOutbound(),
|
||||
ConnectionStatus: peer.Status(),
|
||||
RemoteIP: peer.RemoteIP(),
|
||||
RemoteIP: peer.RemoteIP().String(),
|
||||
})
|
||||
}
|
||||
// TODO: Should we include PersistentPeers and Seeds in here?
|
||||
@@ -77,7 +189,7 @@ func UnsafeDialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) {
|
||||
if err != nil {
|
||||
return &ctypes.ResultDialSeeds{}, err
|
||||
}
|
||||
return &ctypes.ResultDialSeeds{"Dialing seeds in progress. See /net_info for details"}, nil
|
||||
return &ctypes.ResultDialSeeds{Log: "Dialing seeds in progress. See /net_info for details"}, nil
|
||||
}
|
||||
|
||||
func UnsafeDialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, error) {
|
||||
@@ -90,7 +202,7 @@ func UnsafeDialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers,
|
||||
if err != nil {
|
||||
return &ctypes.ResultDialPeers{}, err
|
||||
}
|
||||
return &ctypes.ResultDialPeers{"Dialing peers in progress. See /net_info for details"}, nil
|
||||
return &ctypes.ResultDialPeers{Log: "Dialing peers in progress. See /net_info for details"}, nil
|
||||
}
|
||||
|
||||
// Get genesis file.
|
||||
@@ -136,5 +248,5 @@ func UnsafeDialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers,
|
||||
// }
|
||||
// ```
|
||||
func Genesis() (*ctypes.ResultGenesis, error) {
|
||||
return &ctypes.ResultGenesis{genDoc}, nil
|
||||
return &ctypes.ResultGenesis{Genesis: genDoc}, nil
|
||||
}
|
||||
|
@@ -2,7 +2,6 @@ package core_types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
@@ -111,7 +110,7 @@ type Peer struct {
|
||||
NodeInfo p2p.DefaultNodeInfo `json:"node_info"`
|
||||
IsOutbound bool `json:"is_outbound"`
|
||||
ConnectionStatus p2p.ConnectionStatus `json:"connection_status"`
|
||||
RemoteIP net.IP `json:"remote_ip"`
|
||||
RemoteIP string `json:"remote_ip"`
|
||||
}
|
||||
|
||||
// Validators for a height
|
||||
|
@@ -16,11 +16,11 @@ func TestMain(m *testing.M) {
|
||||
// start a tendermint node in the background to test against
|
||||
app := kvstore.NewKVStoreApplication()
|
||||
node := rpctest.StartTendermint(app)
|
||||
|
||||
code := m.Run()
|
||||
|
||||
// and shut down proper at the end
|
||||
node.Stop()
|
||||
node.Wait()
|
||||
rpctest.StopTendermint(node)
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
|
@@ -116,6 +116,14 @@ func StartTendermint(app abci.Application) *nm.Node {
|
||||
return node
|
||||
}
|
||||
|
||||
// StopTendermint stops a test tendermint server, waits until it's stopped and
|
||||
// cleans up test/config files.
|
||||
func StopTendermint(node *nm.Node) {
|
||||
node.Stop()
|
||||
node.Wait()
|
||||
os.RemoveAll(node.Config().RootDir)
|
||||
}
|
||||
|
||||
// NewTendermint creates a new tendermint server and sleeps forever
|
||||
func NewTendermint(app abci.Application) *nm.Node {
|
||||
// Create & start node
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user