Compare commits

..

1 Commits

Author SHA1 Message Date
Zarko Milosevic
4accdb5f59 Example of client send task and monitor task 2018-04-19 12:34:42 +02:00
951 changed files with 18332 additions and 104110 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -3,17 +3,10 @@ version: 2
defaults: &defaults
working_directory: /go/src/github.com/tendermint/tendermint
docker:
- image: circleci/golang:1.10.3
- image: circleci/golang:1.10.0
environment:
GOBIN: /tmp/workspace/bin
docs_update_config: &docs_update_config
working_directory: ~/repo
docker:
- image: tendermint/docs_deployment
environment:
AWS_REGION: us-east-1
jobs:
setup_dependencies:
<<: *defaults
@@ -23,7 +16,7 @@ jobs:
- checkout
- restore_cache:
keys:
- v3-pkg-cache
- v1-pkg-cache
- run:
name: tools
command: |
@@ -38,51 +31,51 @@ jobs:
name: binaries
command: |
export PATH="$GOBIN:$PATH"
make install install_abci
make install
- persist_to_workspace:
root: /tmp/workspace
paths:
- bin
- profiles
- save_cache:
key: v3-pkg-cache
key: v1-pkg-cache
paths:
- /go/pkg
# - save_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
# paths:
# - /go/src/github.com/tendermint/tendermint
- save_cache:
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
paths:
- /go/src/github.com/tendermint/tendermint
build_slate:
setup_abci:
<<: *defaults
steps:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
# https://discuss.circleci.com/t/saving-cache-stopped-working-warning-skipping-this-step-disabled-in-configuration/24423/2
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
key: v1-pkg-cache
- restore_cache:
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
- run:
name: tools
name: Checkout abci
command: |
export PATH="$GOBIN:$PATH"
make get_tools
commit=$(bash scripts/dep_utils/parse.sh abci)
go get -v -u -d github.com/tendermint/abci/...
cd /go/src/github.com/tendermint/abci
git checkout "$commit"
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run:
name: slate docs
working_directory: /go/src/github.com/tendermint/abci
name: Install abci
command: |
set -ex
export PATH="$GOBIN:$PATH"
make build-slate
make get_tools
make get_vendor_deps
make install
- run: ls -lah /tmp/workspace/bin
- persist_to_workspace:
root: /tmp/workspace
paths:
- "bin/abci*"
lint:
<<: *defaults
@@ -90,96 +83,15 @@ jobs:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
make get_dev_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
key: v1-pkg-cache
- restore_cache:
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
- run:
name: metalinter
command: |
set -ex
export PATH="$GOBIN:$PATH"
make metalinter
- run:
name: check_dep
command: |
set -ex
export PATH="$GOBIN:$PATH"
make check_dep
test_abci_apps:
<<: *defaults
steps:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run:
name: Run abci apps tests
command: |
export PATH="$GOBIN:$PATH"
bash abci/tests/test_app/test.sh
# if this test fails, fix it and update the docs at:
# https://github.com/tendermint/tendermint/blob/develop/docs/abci-cli.md
test_abci_cli:
<<: *defaults
steps:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run:
name: Run abci-cli tests
command: |
export PATH="$GOBIN:$PATH"
bash abci/tests/test_cli/test.sh
test_apps:
<<: *defaults
@@ -187,23 +99,9 @@ jobs:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
key: v1-pkg-cache
- restore_cache:
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
- run: sudo apt-get update && sudo apt-get install -y --no-install-recommends bsdmainutils
- run:
name: Run tests
@@ -216,38 +114,21 @@ jobs:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run: mkdir -p /tmp/logs
key: v1-pkg-cache
- restore_cache:
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
- run:
name: Run tests
command: |
for pkg in $(go list github.com/tendermint/tendermint/... | circleci tests split --split-by=timings); do
for pkg in $(go list github.com/tendermint/tendermint/... | grep -v /vendor/ | circleci tests split --split-by=timings); do
id=$(basename "$pkg")
GOCACHE=off go test -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log"
go test -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg"
done
- persist_to_workspace:
root: /tmp/workspace
paths:
- "profiles/*"
- store_artifacts:
path: /tmp/logs
test_persistence:
<<: *defaults
@@ -255,49 +136,13 @@ jobs:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
key: v1-pkg-cache
- restore_cache:
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
- run:
name: Run tests
command: bash test/persist/test_failure_indices.sh
localnet:
working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint
machine:
image: circleci/classic:latest
environment:
GOBIN: /home/circleci/.go_workspace/bin
GOPATH: /home/circleci/.go_workspace/
GOOS: linux
GOARCH: amd64
parallelism: 1
steps:
- checkout
- run:
name: run localnet and exit on failure
command: |
set -x
make get_tools
make get_vendor_deps
make build-linux
make localnet-start &
./scripts/localnet-blocks-test.sh 40 5 10 localhost
test_p2p:
environment:
GOBIN: /home/circleci/.go_workspace/bin
@@ -308,31 +153,15 @@ jobs:
- checkout
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run: bash test/p2p/circleci.sh
- store_artifacts:
path: /home/circleci/project/test/p2p/logs
- run: bash test/circleci/p2p.sh
upload_coverage:
<<: *defaults
steps:
- attach_workspace:
at: /tmp/workspace
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- restore_cache:
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
- run:
name: gather
command: |
@@ -344,49 +173,28 @@ jobs:
done
- run:
name: upload
command: bash .circleci/codecov.sh -f coverage.txt
deploy_docs:
<<: *docs_update_config
steps:
- checkout
- run:
name: Trigger website build
command: |
chamber exec tendermint -- start_website_build
command: bash <(curl -s https://codecov.io/bash) -f coverage.txt
workflows:
version: 2
test-suite:
jobs:
- deploy_docs:
filters:
branches:
only:
- master
- develop
- setup_dependencies
- setup_abci:
requires:
- setup_dependencies
- lint:
requires:
- setup_dependencies
- test_abci_apps:
requires:
- setup_dependencies
- test_abci_cli:
requires:
- setup_dependencies
- test_apps:
requires:
- setup_dependencies
- setup_abci
- test_cover:
requires:
- setup_dependencies
- test_persistence:
requires:
- setup_dependencies
- localnet:
requires:
- setup_dependencies
- setup_abci
- test_p2p
- upload_coverage:
requires:

7
.github/CODEOWNERS vendored
View File

@@ -1,7 +1,4 @@
# CODEOWNERS: https://help.github.com/articles/about-codeowners/
# Everything goes through Bucky, Anton, Alex. For now.
* @ebuchman @melekes @xla
# Precious documentation
/docs/ @zramsay
# Everything goes through Bucky and Anton. For now.
* @ebuchman @melekes

44
.github/ISSUE_TEMPLATE vendored Normal file
View File

@@ -0,0 +1,44 @@
<!-- Thanks for filing an issue! Before hitting the button, please answer these questions.-->
**Is this a BUG REPORT or FEATURE REQUEST?** (choose one):
<!--
If this is a BUG REPORT, please:
- Fill in as much of the template below as you can.
If this is a FEATURE REQUEST, please:
- Describe *in detail* the feature/behavior/change you'd like to see.
In both cases, be ready for followup questions, and please respond in a timely
manner. We might ask you to provide additional logs and data (tendermint & app)
in a case of bug.
-->
**Tendermint version** (use `tendermint version` or `git rev-parse --verify HEAD` if installed from source):
**ABCI app** (name for built-in, URL for self-written if it's publicly available):
**Merkleeyes version** (use `git rev-parse --verify HEAD`, skip if you don't use it):
**Environment**:
- **OS** (e.g. from /etc/os-release):
- **Install tools**:
- **Others**:
**What happened**:
**What you expected to happen**:
**How to reproduce it** (as minimally and precisely as possible):
**Logs (you can paste a part showing an error or attach the whole file)**:
**`/dump_consensus_state` output for consensus bugs**
**Anything else do we need to know**:

View File

@@ -1,42 +0,0 @@
---
name: Bug Report
about: Create a report to help us squash bugs!
---
<!--
Please fill in as much of the template below as you can.
Be ready for followup questions, and please respond in a timely
manner. We might ask you to provide additional logs and data (tendermint & app).
-->
**Tendermint version** (use `tendermint version` or `git rev-parse --verify HEAD` if installed from source):
**ABCI app** (name for built-in, URL for self-written if it's publicly available):
**Environment**:
- **OS** (e.g. from /etc/os-release):
- **Install tools**:
- **Others**:
**What happened**:
**What you expected to happen**:
**Have you tried the latest version**: yes/no
**How to reproduce it** (as minimally and precisely as possible):
**Logs (paste a small part showing an error (< 10 lines) or link a pastebin, gist, etc. containing more of the log file)**:
**Config (you can paste only the changes you've made)**:
**node command runtime flags**:
**`/dump_consensus_state` output for consensus bugs**
**Anything else we need to know**:

View File

@@ -1,13 +0,0 @@
---
name: Feature Request
about: Create a proposal to request a feature
---
<!--
Please describe *in detail* the feature/behavior/change you'd like to see.
Be ready for followup questions, and please respond in a timely
manner.
Word of caution: poorly thought out proposals may be rejected without deliberation
-->

View File

@@ -3,4 +3,4 @@
* [ ] Updated all relevant documentation in docs
* [ ] Updated all code comments where relevant
* [ ] Wrote tests
* [ ] Updated CHANGELOG_PENDING.md
* [ ] Updated CHANGELOG.md

25
.gitignore vendored
View File

@@ -5,6 +5,7 @@
.DS_Store
build/*
rpc/test/.tendermint
.debora
.tendermint
remote_dump
.revision
@@ -12,33 +13,15 @@ vendor
.vagrant
test/p2p/data/
test/logs
.glide
coverage.txt
docs/_build
docs/dist
docs/tools
docs/abci-spec.rst
*.log
abci-cli
docs/node_modules/
index.html.md
scripts/wal2json/wal2json
scripts/cutWALUntil/cutWALUntil
.idea/
*.iml
.vscode/
libs/pubsub/query/fuzz_test/output
shunit2
.tendermint-lite
addrbook.json
*/vendor
*/.glide
.terraform
terraform.tfstate
terraform.tfstate.backup
terraform.tfstate.d
.vscode

File diff suppressed because it is too large Load Diff

View File

@@ -1,23 +0,0 @@
## v0.27.2
*TBD*
Special thanks to external contributors on this release:
### BREAKING CHANGES:
* CLI/RPC/Config
* Apps
* Go API
* Blockchain Protocol
* P2P Protocol
### FEATURES:
### IMPROVEMENTS:
### BUG FIXES:

View File

@@ -6,7 +6,7 @@ This code of conduct applies to all projects run by the Tendermint/COSMOS team a
# Conduct
## Contact: conduct@tendermint.com
## Contact: adrian@tendermint.com
* We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, nationality, or other similar characteristic.

View File

@@ -17,7 +17,7 @@ Instead, we use `git remote` to add the fork as a new remote for the original re
For instance, to create a fork and work on a branch of it, I would:
* Create the fork on github, using the fork button.
* Go to the original repo checked out locally (i.e. `$GOPATH/src/github.com/tendermint/tendermint`)
* Go to the original repo checked out locally (ie. `$GOPATH/src/github.com/tendermint/tendermint`)
* `git remote rename origin upstream`
* `git remote add origin git@github.com:ebuchman/basecoin.git`
@@ -27,8 +27,8 @@ Of course, replace `ebuchman` with your git handle.
To pull in updates from the origin repo, run
* `git fetch upstream`
* `git rebase upstream/master` (or whatever branch you want)
* `git fetch upstream`
* `git rebase upstream/master` (or whatever branch you want)
Please don't make Pull Requests to `master`.
@@ -47,14 +47,9 @@ get_vendor_deps`). Even for dependencies under our control, dep helps us to
keep multiple repos in sync as they evolve. Anything with an executable, such
as apps, tools, and the core, should use dep.
Run `dep status` to get a list of vendor dependencies that may not be
Run `dep status` to get a list of vendored dependencies that may not be
up-to-date.
When updating dependencies, please only update the particular dependencies you
need. Instead of running `dep ensure -update`, which will update anything,
specify exactly the dependency you want to update, eg.
`dep ensure -update github.com/tendermint/go-amino`.
## Vagrant
If you are a [Vagrant](https://www.vagrantup.com/) user, you can get started
@@ -69,74 +64,43 @@ vagrant ssh
make test
```
## Changelog
## Testing
Every fix, improvement, feature, or breaking change should be made in a
pull-request that includes an update to the `CHANGELOG_PENDING.md` file.
All repos should be hooked up to [CircleCI](https://circleci.com/).
Changelog entries should be formatted as follows:
```
- [module] \#xxx Some description about the change (@contributor)
```
Here, `module` is the part of the code that changed (typically a
top-level Go package), `xxx` is the pull-request number, and `contributor`
is the author/s of the change.
It's also acceptable for `xxx` to refer to the relevent issue number, but pull-request
numbers are preferred.
Note this means pull-requests should be opened first so the changelog can then
be updated with the pull-request's number.
There is no need to include the full link, as this will be added
automatically during release. But please include the backslash and pound, eg. `\#2313`.
Changelog entries should be ordered alphabetically according to the
`module`, and numerically according to the pull-request number.
Changes with multiple classifications should be doubly included (eg. a bug fix
that is also a breaking change should be recorded under both).
Breaking changes are further subdivided according to the APIs/users they impact.
Any change that effects multiple APIs/users should be recorded multiply - for
instance, a change to the `Blockchain Protocol` that removes a field from the
header should also be recorded under `CLI/RPC/Config` since the field will be
removed from the header in rpc responses as well.
If they have `.go` files in the root directory, they will be automatically
tested by circle using `go test -v -race ./...`. If not, they will need a
`circle.yml`. Ideally, every repo has a `Makefile` that defines `make test` and
includes its continuous integration status using a badge in the `README.md`.
## Branching Model and Release
All repos should adhere to the branching model: http://nvie.com/posts/a-successful-git-branching-model/.
This means that all pull-requests should be made against develop. Any merge to
master constitutes a tagged release.
User-facing repos should adhere to the branching model: http://nvie.com/posts/a-successful-git-branching-model/.
That is, these repos should be well versioned, and any merge to master requires a version bump and tagged release.
Libraries need not follow the model strictly, but would be wise to,
especially `go-p2p` and `go-rpc`, as their versions are referenced in tendermint core.
### Development Procedure:
- the latest state of development is on `develop`
- `develop` must never fail `make test`
- never --force onto `develop` (except when reverting a broken commit, which should seldom happen)
- create a development branch either on github.com/tendermint/tendermint, or your fork (using `git remote add origin`)
- make changes and update the `CHANGELOG_PENDING.md` to record your change
- before submitting a pull request, run `git rebase` on top of the latest `develop`
- no --force onto `develop` (except when reverting a broken commit, which should seldom happen)
- create a development branch either on github.com/tendermint/tendermint, or your fork (using `git add origin`)
- before submitting a pull request, begin `git rebase` on top of `develop`
### Pull Merge Procedure:
- ensure pull branch is based on a recent develop
- ensure pull branch is rebased on develop
- run `make test` to ensure that all tests pass
- merge pull request
- the `unstable` branch may be used to aggregate pull merges before fixing tests
- the `unstable` branch may be used to aggregate pull merges before testing once
- push master may request that pull requests be rebased on top of `unstable`
### Release Procedure:
- start on `develop`
- run integration tests (see `test_integrations` in Makefile)
- prepare changelog:
- copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
- run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
all issues
- run `bash ./scripts/authors.sh` to get a list of authors since the latest
release, and add the github aliases of external contributors to the top of
the changelog. To lookup an alias from an email, try `bash
./scripts/authors.sh <email>`
- reset the `CHANGELOG_PENDING.md`
- prepare changelog/release issue
- bump versions
- push to release/vX.X.X to run the extended integration tests on the CI
- push to release-vX.X.X to run the extended integration tests on the CI
- merge to master
- merge master back to develop
@@ -146,18 +110,8 @@ master constitutes a tagged release.
- make the required changes
- these changes should be small and an absolute necessity
- add a note to CHANGELOG.md
- bump versions
- bumb versions
- push to hotfix-vX.X.X to run the extended integration tests on the CI
- merge hotfix-vX.X.X to master
- merge hotfix-vX.X.X to develop
- delete the hotfix-vX.X.X branch
## Testing
All repos should be hooked up to [CircleCI](https://circleci.com/).
If they have `.go` files in the root directory, they will be automatically
tested by circle using `go test -v -race ./...`. If not, they will need a
`circle.yml`. Ideally, every repo has a `Makefile` that defines `make test` and
includes its continuous integration status using a badge in the `README.md`.

1
DOCKER/.gitignore vendored
View File

@@ -1 +0,0 @@
tendermint

View File

@@ -1,39 +1,45 @@
FROM alpine:3.7
MAINTAINER Greg Szabo <greg@tendermint.com>
# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json
# (unless you change `genesis_file` in config.toml). You can put your config.toml and
# private validator file into /tendermint/config.
# This is the release of tendermint to pull in.
ENV TM_VERSION 0.17.1
ENV TM_SHA256SUM d57008c63d2d9176861137e38ed203da486febf20ae7d388fb810a75afff8f24
# Tendermint will be looking for genesis file in /tendermint (unless you change
# `genesis_file` in config.toml). You can put your config.toml and private
# validator file into /tendermint.
#
# The /tendermint/data dir is used by tendermint to store state.
ENV TMHOME /tendermint
ENV DATA_ROOT /tendermint
ENV TMHOME $DATA_ROOT
# Set user right away for determinism
RUN addgroup tmuser && \
adduser -S -G tmuser tmuser
# Create directory for persistence and give our user ownership
RUN mkdir -p $DATA_ROOT && \
chown -R tmuser:tmuser $DATA_ROOT
# OS environment setup
# Set user right away for determinism, create directory for persistence and give our user ownership
# jq and curl used for extracting `pub_key` from private validator while
# deploying tendermint with Kubernetes. It is nice to have bash so the users
# could execute bash commands.
RUN apk update && \
apk upgrade && \
apk --no-cache add curl jq bash && \
addgroup tmuser && \
adduser -S -G tmuser tmuser -h "$TMHOME"
RUN apk add --no-cache bash curl jq
# Run the container with tmuser by default. (UID=100, GID=1000)
USER tmuser
RUN apk add --no-cache openssl && \
wget https://github.com/tendermint/tendermint/releases/download/v${TM_VERSION}/tendermint_${TM_VERSION}_linux_amd64.zip && \
echo "${TM_SHA256SUM} tendermint_${TM_VERSION}_linux_amd64.zip" | sha256sum -c && \
unzip -d /bin tendermint_${TM_VERSION}_linux_amd64.zip && \
apk del openssl && \
rm -f tendermint_${TM_VERSION}_linux_amd64.zip
# Expose the data directory as a volume since there's mutable state in there
VOLUME [ $TMHOME ]
VOLUME $DATA_ROOT
WORKDIR $TMHOME
# p2p port
EXPOSE 46656
# rpc port
EXPOSE 46657
# p2p and rpc port
EXPOSE 26656 26657
ENTRYPOINT ["tendermint"]
ENTRYPOINT ["/usr/bin/tendermint"]
CMD ["node", "--moniker=`hostname`"]
STOPSIGNAL SIGTERM
ARG BINARY=tendermint
COPY $BINARY /usr/bin/tendermint

View File

@@ -1,23 +0,0 @@
FROM golang:latest
RUN mkdir -p /go/src/github.com/tendermint/abci
WORKDIR /go/src/github.com/tendermint/abci
COPY Makefile /go/src/github.com/tendermint/abci/
# see make protoc for details on ldconfig
RUN make get_protoc && ldconfig
# killall is used in tests
RUN apt-get update && apt-get install -y \
psmisc \
&& rm -rf /var/lib/apt/lists/*
COPY Gopkg.toml /go/src/github.com/tendermint/abci/
COPY Gopkg.lock /go/src/github.com/tendermint/abci/
RUN make get_tools
# see https://github.com/golang/dep/issues/1312
RUN dep ensure -vendor-only
COPY . /go/src/github.com/tendermint/abci

View File

@@ -27,8 +27,8 @@ RUN mkdir -p /go/src/github.com/tendermint/tendermint && \
VOLUME $DATA_ROOT
EXPOSE 26656
EXPOSE 26657
EXPOSE 46656
EXPOSE 46657
ENTRYPOINT ["tendermint"]

View File

@@ -1,18 +0,0 @@
FROM golang:1.10.1
# Grab deps (jq, hexdump, xxd, killall)
RUN apt-get update && \
apt-get install -y --no-install-recommends \
jq bsdmainutils vim-common psmisc netcat
# Add testing deps for curl
RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list && \
apt-get update && \
apt-get install -y --no-install-recommends curl
VOLUME /go
EXPOSE 26656
EXPOSE 26657

View File

@@ -7,9 +7,6 @@ push:
build_develop:
docker build -t "tendermint/tendermint:develop" -f Dockerfile.develop .
build_testing:
docker build --tag tendermint/testing -f ./Dockerfile.testing .
push_develop:
docker push "tendermint/tendermint:develop"

View File

@@ -1,6 +1,4 @@
# Docker
## Supported tags and respective `Dockerfile` links
# Supported tags and respective `Dockerfile` links
- `0.17.1`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/208ac32fa266657bd6c304e84ec828aa252bb0b8/DOCKER/Dockerfile)
- `0.15.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/170777300ea92dc21a8aec1abc16cb51812513a4/DOCKER/Dockerfile)
@@ -16,10 +14,10 @@
`develop` tag points to the [develop](https://github.com/tendermint/tendermint/tree/develop) branch.
## Quick reference
# Quick reference
* **Where to get help:**
https://cosmos.network/community
https://tendermint.com/community
* **Where to file issues:**
https://github.com/tendermint/tendermint/issues
@@ -27,7 +25,7 @@
* **Supported Docker versions:**
[the latest release](https://github.com/moby/moby/releases) (down to 1.6 on a best-effort basis)
## Tendermint
# Tendermint
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language, and securely replicates it on many machines.
@@ -35,33 +33,29 @@ For more background, see the [introduction](https://tendermint.readthedocs.io/en
To get started developing applications, see the [application developers guide](https://tendermint.readthedocs.io/en/master/getting-started.html).
## How to use this image
# How to use this image
### Start one instance of the Tendermint core with the `kvstore` app
## Start one instance of the Tendermint core with the `kvstore` app
A quick example of a built-in app and Tendermint core in one container.
A very simple example of a built-in app and Tendermint core in one container.
```
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app=kvstore
```
## Local cluster
## mintnet-kubernetes
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/master/Makefile) and run:
If you want to see many containers talking to each other, consider using [mintnet-kubernetes](https://github.com/tendermint/tools/tree/master/mintnet-kubernetes), which is a tool for running Tendermint-based applications on a Kubernetes cluster.
```
make build-linux
make build-docker-localnode
make localnet-start
```
# License
Note that this will build and use a different image than the ones provided here.
View [license information](https://raw.githubusercontent.com/tendermint/tendermint/master/LICENSE) for the software contained in this image.
## License
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/master/LICENSE).
# User Feedback
## Contributing
Contributions are most welcome! See the [contributing file](https://github.com/tendermint/tendermint/blob/master/CONTRIBUTING.md) for more information.
You are invited to contribute new features, fixes, or updates, large or small; we are always thrilled to receive pull requests, and do our best to process them as fast as we can.
Before you start to code, we recommend discussing your plans through a [GitHub](https://github.com/tendermint/tendermint/issues) issue, especially for more ambitious contributions. This gives other contributors a chance to point you in the right direction, give you feedback on your design, and help you find out if someone else is working on the same thing.

407
Gopkg.lock generated
View File

@@ -3,88 +3,57 @@
[[projects]]
branch = "master"
digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
name = "github.com/beorn7/perks"
packages = ["quantile"]
pruneopts = "UT"
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
name = "github.com/btcsuite/btcd"
packages = ["btcec"]
revision = "2be2f12b358dc57d70b8f501b00be450192efbc3"
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
branch = "master"
digest = "1:c0decf632843204d2b8781de7b26e7038584e2dcccc7e2f401e88ae85b1df2b7"
name = "github.com/btcsuite/btcd"
packages = ["btcec"]
pruneopts = "UT"
revision = "67e573d211ace594f1366b4ce9d39726c4b19bd0"
name = "github.com/ebuchman/fail-test"
packages = ["."]
revision = "95f809107225be108efcf10a3509e4ea6ceef3c4"
[[projects]]
digest = "1:1d8e1cb71c33a9470bbbae09bfec09db43c6bf358dfcae13cd8807c4e2a9a2bf"
name = "github.com/btcsuite/btcutil"
packages = [
"base58",
"bech32",
]
pruneopts = "UT"
revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4"
[[projects]]
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
name = "github.com/davecgh/go-spew"
packages = ["spew"]
pruneopts = "UT"
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
version = "v1.1.1"
[[projects]]
digest = "1:544229a3ca0fb2dd5ebc2896d3d2ff7ce096d9751635301e44e37e761349ee70"
name = "github.com/fortytw2/leaktest"
packages = ["."]
pruneopts = "UT"
revision = "a5ef70473c97b71626b9abeda80ee92ba2a7de9e"
version = "v1.2.0"
[[projects]]
digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd"
name = "github.com/fsnotify/fsnotify"
packages = ["."]
pruneopts = "UT"
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
version = "v1.4.7"
[[projects]]
digest = "1:fdf5169073fb0ad6dc12a70c249145e30f4058647bea25f0abd48b6d9f228a11"
name = "github.com/go-kit/kit"
packages = [
"log",
"log/level",
"log/term",
"metrics",
"metrics/discard",
"metrics/internal/lv",
"metrics/prometheus",
"log/term"
]
pruneopts = "UT"
revision = "4dc7be5d2d12881735283bcab7352178e190fc71"
version = "v0.6.0"
[[projects]]
digest = "1:31a18dae27a29aa074515e43a443abfd2ba6deb6d69309d8d7ce789c45f34659"
name = "github.com/go-logfmt/logfmt"
packages = ["."]
pruneopts = "UT"
revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5"
version = "v0.3.0"
[[projects]]
digest = "1:586ea76dbd0374d6fb649a91d70d652b7fe0ccffb8910a77468e7702e7901f3d"
name = "github.com/go-stack/stack"
packages = ["."]
pruneopts = "UT"
revision = "2fee6af1a9795aafbe0253a0cfbdf668e1fb8a9a"
version = "v1.8.0"
revision = "259ab82a6cad3992b4e21ff5cac294ccb06474bc"
version = "v1.7.0"
[[projects]]
digest = "1:35621fe20f140f05a0c4ef662c26c0ab4ee50bca78aa30fe87d33120bd28165e"
name = "github.com/gogo/protobuf"
packages = [
"gogoproto",
@@ -92,256 +61,156 @@
"proto",
"protoc-gen-gogo/descriptor",
"sortkeys",
"types",
"types"
]
pruneopts = "UT"
revision = "636bf0302bc95575d69441b25a2603156ffdddf1"
version = "v1.1.1"
revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
version = "v1.0.0"
[[projects]]
digest = "1:17fe264ee908afc795734e8c4e63db2accabaf57326dbf21763a7d6b86096260"
name = "github.com/golang/protobuf"
packages = [
"proto",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp",
"ptypes/timestamp"
]
pruneopts = "UT"
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
version = "v1.1.0"
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
version = "v1.0.0"
[[projects]]
branch = "master"
digest = "1:4a0c6bb4805508a6287675fac876be2ac1182539ca8a32468d8128882e9d5009"
name = "github.com/golang/snappy"
packages = ["."]
pruneopts = "UT"
revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
revision = "553a641470496b2327abcac10b36396bd98e45c9"
[[projects]]
digest = "1:43dd08a10854b2056e615d1b1d22ac94559d822e1f8b6fcc92c1a1057e85188e"
name = "github.com/gorilla/websocket"
packages = ["."]
pruneopts = "UT"
revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b"
version = "v1.2.0"
[[projects]]
digest = "1:ea40c24cdbacd054a6ae9de03e62c5f252479b96c716375aace5c120d68647c8"
branch = "master"
name = "github.com/hashicorp/hcl"
packages = [
".",
"hcl/ast",
"hcl/parser",
"hcl/printer",
"hcl/scanner",
"hcl/strconv",
"hcl/token",
"json/parser",
"json/scanner",
"json/token",
"json/token"
]
pruneopts = "UT"
revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241"
version = "v1.0.0"
revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168"
[[projects]]
digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
name = "github.com/inconshreveable/mousetrap"
packages = ["."]
pruneopts = "UT"
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
[[projects]]
digest = "1:39b27d1381a30421f9813967a5866fba35dc1d4df43a6eefe3b7a5444cb07214"
branch = "master"
name = "github.com/jmhodges/levigo"
packages = ["."]
pruneopts = "UT"
revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9"
[[projects]]
branch = "master"
digest = "1:a64e323dc06b73892e5bb5d040ced475c4645d456038333883f58934abbf6f72"
name = "github.com/kr/logfmt"
packages = ["."]
pruneopts = "UT"
revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0"
[[projects]]
digest = "1:c568d7727aa262c32bdf8a3f7db83614f7af0ed661474b24588de635c20024c7"
name = "github.com/magiconair/properties"
packages = ["."]
pruneopts = "UT"
revision = "c2353362d570a7bfa228149c62842019201cfb71"
version = "v1.8.0"
revision = "c3beff4c2358b44d0493c7dda585e7db7ff28ae6"
version = "v1.7.6"
[[projects]]
digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
name = "github.com/matttproud/golang_protobuf_extensions"
packages = ["pbutil"]
pruneopts = "UT"
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
version = "v1.0.1"
[[projects]]
digest = "1:53bc4cd4914cd7cd52139990d5170d6dc99067ae31c56530621b18b35fc30318"
branch = "master"
name = "github.com/mitchellh/mapstructure"
packages = ["."]
pruneopts = "UT"
revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe"
version = "v1.1.2"
revision = "00c29f56e2386353d58c599509e8dc3801b0d716"
[[projects]]
digest = "1:95741de3af260a92cc5c7f3f3061e85273f5a81b5db20d4bd68da74bd521675e"
name = "github.com/pelletier/go-toml"
packages = ["."]
pruneopts = "UT"
revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194"
version = "v1.2.0"
revision = "acdc4509485b587f5e675510c4f2c63e90ff68a8"
version = "v1.1.0"
[[projects]]
digest = "1:40e195917a951a8bf867cd05de2a46aaf1806c50cf92eebf4c16f78cd196f747"
name = "github.com/pkg/errors"
packages = ["."]
pruneopts = "UT"
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
pruneopts = "UT"
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
digest = "1:26663fafdea73a38075b07e8e9d82fc0056379d2be8bb4e13899e8fda7c7dd23"
name = "github.com/prometheus/client_golang"
packages = [
"prometheus",
"prometheus/internal",
"prometheus/promhttp",
]
pruneopts = "UT"
revision = "abad2d1bd44235a26707c172eab6bca5bf2dbad3"
version = "v0.9.1"
[[projects]]
branch = "master"
digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
name = "github.com/prometheus/client_model"
packages = ["go"]
pruneopts = "UT"
revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
[[projects]]
branch = "master"
digest = "1:db712fde5d12d6cdbdf14b777f0c230f4ff5ab0be8e35b239fc319953ed577a4"
name = "github.com/prometheus/common"
packages = [
"expfmt",
"internal/bitbucket.org/ww/goautoneg",
"model",
]
pruneopts = "UT"
revision = "7e9e6cabbd393fc208072eedef99188d0ce788b6"
[[projects]]
branch = "master"
digest = "1:ef74914912f99c79434d9c09658274678bc85080ebe3ab32bec3940ebce5e1fc"
name = "github.com/prometheus/procfs"
packages = [
".",
"internal/util",
"nfs",
"xfs",
]
pruneopts = "UT"
revision = "185b4288413d2a0dd0806f78c90dde719829e5ae"
[[projects]]
digest = "1:c4556a44e350b50a490544d9b06e9fba9c286c21d6c0e47f54f3a9214597298c"
name = "github.com/rcrowley/go-metrics"
packages = ["."]
pruneopts = "UT"
revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
revision = "d932a24a8ccb8fcadc993e5c6c58f93dac168294"
[[projects]]
digest = "1:b0c25f00bad20d783d259af2af8666969e2fc343fa0dc9efe52936bbd67fb758"
name = "github.com/rs/cors"
packages = ["."]
pruneopts = "UT"
revision = "9a47f48565a795472d43519dd49aac781f3034fb"
version = "v1.6.0"
[[projects]]
digest = "1:6a4a11ba764a56d2758899ec6f3848d24698d48442ebce85ee7a3f63284526cd"
name = "github.com/spf13/afero"
packages = [
".",
"mem",
"mem"
]
pruneopts = "UT"
revision = "d40851caa0d747393da1ffb28f7f9d8b4eeffebd"
version = "v1.1.2"
revision = "63644898a8da0bc22138abf860edaf5277b6102e"
version = "v1.1.0"
[[projects]]
digest = "1:08d65904057412fc0270fc4812a1c90c594186819243160dc779a402d4b6d0bc"
name = "github.com/spf13/cast"
packages = ["."]
pruneopts = "UT"
revision = "8c9545af88b134710ab1cd196795e7f2388358d7"
version = "v1.3.0"
revision = "8965335b8c7107321228e3e3702cab9832751bac"
version = "v1.2.0"
[[projects]]
digest = "1:7ffc0983035bc7e297da3688d9fe19d60a420e9c38bef23f845c53788ed6a05e"
name = "github.com/spf13/cobra"
packages = ["."]
pruneopts = "UT"
revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b"
version = "v0.0.1"
revision = "a1f051bc3eba734da4772d60e2d677f47cf93ef4"
version = "v0.0.2"
[[projects]]
digest = "1:68ea4e23713989dc20b1bded5d9da2c5f9be14ff9885beef481848edd18c26cb"
branch = "master"
name = "github.com/spf13/jwalterweatherman"
packages = ["."]
pruneopts = "UT"
revision = "4a4406e478ca629068e7768fc33f3f044173c0a6"
version = "v1.0.0"
revision = "7c0cea34c8ece3fbeb2b27ab9b59511d360fb394"
[[projects]]
digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2"
name = "github.com/spf13/pflag"
packages = ["."]
pruneopts = "UT"
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
version = "v1.0.3"
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
version = "v1.0.1"
[[projects]]
digest = "1:f8e1a678a2571e265f4bf91a3e5e32aa6b1474a55cb0ea849750cc177b664d96"
name = "github.com/spf13/viper"
packages = ["."]
pruneopts = "UT"
revision = "25b30aa063fc18e48662b86996252eabdcf2f0c7"
version = "v1.0.0"
revision = "b5e8006cbee93ec955a89ab31e0e3ce3204f3736"
version = "v1.0.2"
[[projects]]
digest = "1:7e8d267900c7fa7f35129a2a37596e38ed0f11ca746d6d9ba727980ee138f9f6"
name = "github.com/stretchr/testify"
packages = [
"assert",
"require",
"require"
]
pruneopts = "UT"
revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71"
version = "v1.2.1"
[[projects]]
branch = "master"
digest = "1:59483b8e8183f10ab21a85ba1f4cbb4a2335d48891801f79ed7b9499f44d383c"
name = "github.com/syndtr/goleveldb"
packages = [
"leveldb",
@@ -355,80 +224,106 @@
"leveldb/opt",
"leveldb/storage",
"leveldb/table",
"leveldb/util",
"leveldb/util"
]
pruneopts = "UT"
revision = "6b91fda63f2e36186f1c9d0e48578defb69c5d43"
revision = "714f901b98fdb3aa954b4193d8cbd64a28d80cad"
[[projects]]
digest = "1:605b6546f3f43745695298ec2d342d3e952b6d91cdf9f349bea9315f677d759f"
name = "github.com/tendermint/btcd"
packages = ["btcec"]
pruneopts = "UT"
revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df"
name = "github.com/tendermint/abci"
packages = [
"client",
"example/code",
"example/counter",
"example/kvstore",
"server",
"types"
]
revision = "78a8905690ef54f9d57e3b2b0ee7ad3a04ef3f1f"
version = "v0.10.3"
[[projects]]
branch = "master"
name = "github.com/tendermint/ed25519"
packages = [
".",
"edwards25519",
"extra25519"
]
revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057"
[[projects]]
digest = "1:ad9c4c1a4e7875330b1f62906f2830f043a23edb5db997e3a5ac5d3e6eadf80a"
name = "github.com/tendermint/go-amino"
packages = ["."]
pruneopts = "UT"
revision = "dc14acf9ef15f85828bfbc561ed9dd9d2a284885"
version = "v0.14.1"
revision = "42246108ff925a457fb709475070a03dfd3e2b5c"
version = "0.9.6"
[[projects]]
digest = "1:72b71e3a29775e5752ed7a8012052a3dee165e27ec18cedddae5288058f09acf"
name = "github.com/tendermint/go-crypto"
packages = ["."]
revision = "915416979bf70efa4bcbf1c6cd5d64c5fff9fc19"
version = "v0.6.2"
[[projects]]
name = "github.com/tendermint/go-wire"
packages = ["."]
revision = "fa721242b042ecd4c6ed1a934ee740db4f74e45c"
version = "v0.7.3"
[[projects]]
name = "github.com/tendermint/tmlibs"
packages = [
"autofile",
"cli",
"cli/flags",
"clist",
"common",
"db",
"flowrate",
"log",
"merkle",
"pubsub",
"pubsub/query",
"test"
]
revision = "97e1f1ad3f510048929a51475811a18686c894df"
version = "0.8.2-rc0"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = [
"bcrypt",
"blowfish",
"chacha20poly1305",
"curve25519",
"ed25519",
"ed25519/internal/edwards25519",
"hkdf",
"internal/chacha20",
"internal/subtle",
"nacl/box",
"nacl/secretbox",
"openpgp/armor",
"openpgp/errors",
"poly1305",
"ripemd160",
"salsa20/salsa",
"salsa20/salsa"
]
pruneopts = "UT"
revision = "3764759f34a542a3aef74d6b02e35be7ab893bba"
source = "github.com/tendermint/crypto"
revision = "d6449816ce06963d9d136eee5a56fca5b0616e7e"
[[projects]]
digest = "1:d36f55a999540d29b6ea3c2ea29d71c76b1d9853fdcd3e5c5cb4836f2ba118f1"
branch = "master"
name = "golang.org/x/net"
packages = [
"context",
"http/httpguts",
"http2",
"http2/hpack",
"idna",
"internal/timeseries",
"netutil",
"trace",
"lex/httplex",
"trace"
]
pruneopts = "UT"
revision = "292b43bbf7cb8d35ddf40f8d5100ef3837cced3f"
revision = "61147c48b25b599e5b561d2e9c4f3e1ef489ca41"
[[projects]]
branch = "master"
digest = "1:6f86e2f2e2217cd4d74dec6786163cf80e4d2b99adb341ecc60a45113b844dca"
name = "golang.org/x/sys"
packages = [
"cpu",
"unix",
]
pruneopts = "UT"
revision = "7e31e0c00fa05cb5fbf4347b585621d6709e19a4"
packages = ["unix"]
revision = "3b87a42e500a6dc65dae1a55d0b641295971163e"
[[projects]]
digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18"
name = "golang.org/x/text"
packages = [
"collate",
@@ -444,112 +339,50 @@
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable",
"unicode/rangetable"
]
pruneopts = "UT"
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
branch = "master"
digest = "1:56b0bca90b7e5d1facf5fbdacba23e4e0ce069d25381b8e2f70ef1e7ebfb9c1a"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
pruneopts = "UT"
revision = "b69ba1387ce2108ac9bc8e8e5e5a46e7d5c72313"
revision = "51d0944304c3cbce4afe9e5247e21100037bff78"
[[projects]]
digest = "1:2dab32a43451e320e49608ff4542fdfc653c95dcc35d0065ec9c6c3dd540ed74"
name = "google.golang.org/grpc"
packages = [
".",
"balancer",
"balancer/base",
"balancer/roundrobin",
"codes",
"connectivity",
"credentials",
"encoding",
"encoding/proto",
"grpclb/grpc_lb_v1/messages",
"grpclog",
"internal",
"internal/backoff",
"internal/channelz",
"internal/grpcrand",
"keepalive",
"metadata",
"naming",
"peer",
"resolver",
"resolver/dns",
"resolver/passthrough",
"stats",
"status",
"tap",
"transport",
"transport"
]
pruneopts = "UT"
revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
version = "v1.13.0"
revision = "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
version = "v1.7.5"
[[projects]]
digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202"
name = "gopkg.in/yaml.v2"
packages = ["."]
pruneopts = "UT"
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"github.com/btcsuite/btcutil/base58",
"github.com/btcsuite/btcutil/bech32",
"github.com/fortytw2/leaktest",
"github.com/go-kit/kit/log",
"github.com/go-kit/kit/log/level",
"github.com/go-kit/kit/log/term",
"github.com/go-kit/kit/metrics",
"github.com/go-kit/kit/metrics/discard",
"github.com/go-kit/kit/metrics/prometheus",
"github.com/go-logfmt/logfmt",
"github.com/gogo/protobuf/gogoproto",
"github.com/gogo/protobuf/jsonpb",
"github.com/gogo/protobuf/proto",
"github.com/gogo/protobuf/types",
"github.com/golang/protobuf/proto",
"github.com/golang/protobuf/ptypes/timestamp",
"github.com/gorilla/websocket",
"github.com/jmhodges/levigo",
"github.com/pkg/errors",
"github.com/prometheus/client_golang/prometheus",
"github.com/prometheus/client_golang/prometheus/promhttp",
"github.com/rcrowley/go-metrics",
"github.com/rs/cors",
"github.com/spf13/cobra",
"github.com/spf13/viper",
"github.com/stretchr/testify/assert",
"github.com/stretchr/testify/require",
"github.com/syndtr/goleveldb/leveldb",
"github.com/syndtr/goleveldb/leveldb/errors",
"github.com/syndtr/goleveldb/leveldb/iterator",
"github.com/syndtr/goleveldb/leveldb/opt",
"github.com/tendermint/btcd/btcec",
"github.com/tendermint/go-amino",
"golang.org/x/crypto/bcrypt",
"golang.org/x/crypto/chacha20poly1305",
"golang.org/x/crypto/curve25519",
"golang.org/x/crypto/ed25519",
"golang.org/x/crypto/hkdf",
"golang.org/x/crypto/nacl/box",
"golang.org/x/crypto/nacl/secretbox",
"golang.org/x/crypto/openpgp/armor",
"golang.org/x/crypto/ripemd160",
"golang.org/x/net/context",
"golang.org/x/net/netutil",
"google.golang.org/grpc",
"google.golang.org/grpc/credentials",
]
inputs-digest = "e70f8692c825e80ae8510546e297840b9560d00e11b2272749a55cc2ffd147f0"
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -10,6 +10,11 @@
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
@@ -18,92 +23,71 @@
# non-go = false
# go-tests = true
# unused-packages = true
#
###########################################################
# Allow only patch releases for serialization libraries
[[constraint]]
name = "github.com/tendermint/go-amino"
version = "~0.14.1"
[[constraint]]
name = "github.com/gogo/protobuf"
version = "~1.1.1"
[[constraint]]
name = "github.com/golang/protobuf"
version = "~1.1.0"
# Allow only minor releases for other libraries
[[constraint]]
name = "github.com/go-kit/kit"
version = "^0.6.0"
[[constraint]]
name = "github.com/gorilla/websocket"
version = "^1.2.0"
[[constraint]]
name = "github.com/rs/cors"
version = "^1.6.0"
[[constraint]]
name = "github.com/pkg/errors"
version = "^0.8.0"
[[constraint]]
name = "github.com/spf13/cobra"
version = "^0.0.1"
[[constraint]]
name = "github.com/spf13/viper"
version = "^1.0.0"
[[constraint]]
name = "github.com/stretchr/testify"
version = "^1.2.1"
[[constraint]]
name = "google.golang.org/grpc"
version = "^1.13.0"
name = "github.com/ebuchman/fail-test"
branch = "master"
[[constraint]]
name = "github.com/fortytw2/leaktest"
version = "^1.2.0"
branch = "master"
[[constraint]]
name = "github.com/prometheus/client_golang"
version = "^0.9.1"
###################################
## Some repos dont have releases.
## Pin to revision
name = "github.com/go-kit/kit"
version = "~0.6.0"
[[constraint]]
name = "golang.org/x/crypto"
source = "github.com/tendermint/crypto"
revision = "3764759f34a542a3aef74d6b02e35be7ab893bba"
[[override]]
name = "github.com/jmhodges/levigo"
revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9"
# last revision used by go-crypto
[[constraint]]
name = "github.com/btcsuite/btcutil"
revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4"
name = "github.com/gogo/protobuf"
version = "~1.0.0"
[[constraint]]
name = "github.com/tendermint/btcd"
revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df"
name = "github.com/golang/protobuf"
version = "~1.0.0"
[[constraint]]
name = "github.com/gorilla/websocket"
version = "~1.2.0"
[[constraint]]
name = "github.com/pkg/errors"
version = "~0.8.0"
[[constraint]]
name = "github.com/rcrowley/go-metrics"
revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
branch = "master"
[[constraint]]
name = "golang.org/x/net"
revision = "292b43bbf7cb8d35ddf40f8d5100ef3837cced3f"
name = "github.com/spf13/cobra"
version = "~0.0.1"
[[constraint]]
name = "github.com/spf13/viper"
version = "~1.0.0"
[[constraint]]
name = "github.com/stretchr/testify"
version = "~1.2.1"
[[constraint]]
name = "github.com/tendermint/abci"
version = "~0.10.3"
[[constraint]]
name = "github.com/tendermint/go-crypto"
version = "~0.6.2"
[[constraint]]
name = "github.com/tendermint/go-amino"
version = "~0.9.6"
[[constraint]]
name = "github.com/tendermint/tmlibs"
version = "~0.8.2-rc0"
[[constraint]]
name = "google.golang.org/grpc"
version = "~1.7.3"
[prune]
go-tests = true

207
Makefile
View File

@@ -1,74 +1,33 @@
GOTOOLS = \
github.com/mitchellh/gox \
github.com/golang/dep/cmd/dep \
github.com/alecthomas/gometalinter \
github.com/gogo/protobuf/protoc-gen-gogo \
github.com/square/certstrap
GOBIN?=${GOPATH}/bin
PACKAGES=$(shell go list ./...)
INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf
BUILD_TAGS?='tendermint'
gopkg.in/alecthomas/gometalinter.v2
PACKAGES=$(shell go list ./... | grep -v '/vendor/')
BUILD_TAGS?=tendermint
BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`"
LINT_FLAGS = --exclude '.*\.pb\.go' --exclude 'vendor/*' --vendor --deadline=600s
all: check build test install
check: check_tools get_vendor_deps
check: check_tools ensure_deps
########################################
### Build Tendermint
### Build
build:
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/
build_c:
CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" -o build/tendermint ./cmd/tendermint/
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/
build_race:
CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint
CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint
install:
CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
install_c:
CGO_ENABLED=1 go install $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" ./cmd/tendermint
########################################
### Protobuf
protoc_all: protoc_libs protoc_merkle protoc_abci protoc_grpc protoc_proto3types
%.pb.go: %.proto
## If you get the following error,
## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory"
## See https://stackoverflow.com/a/25518702
## Note the $< here is substituted for the %.proto
## Note the $@ here is substituted for the %.pb.go
protoc $(INCLUDE) $< --gogo_out=Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,plugins=grpc:.
########################################
### Build ABCI
# see protobuf section above
protoc_abci: abci/types/types.pb.go
protoc_proto3types: types/proto3/block.pb.go
build_abci:
@go build -i ./abci/cmd/...
install_abci:
@go install ./abci/cmd/...
CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' ./cmd/tendermint
########################################
### Distribution
# dist builds binaries for all platforms and packages them for distribution
# TODO add abci to these scripts
dist:
@BUILD_TAGS=$(BUILD_TAGS) sh -c "'$(CURDIR)/scripts/dist.sh'"
@BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/dist.sh'"
########################################
### Tools & dependencies
@@ -80,34 +39,26 @@ check_tools:
get_tools:
@echo "--> Installing tools"
./scripts/get_tools.sh
get_dev_tools:
@echo "--> Downloading linters (this may take awhile)"
$(GOPATH)/src/github.com/alecthomas/gometalinter/scripts/install.sh -b $(GOBIN)
go get -u -v $(GOTOOLS)
@gometalinter.v2 --install
update_tools:
@echo "--> Updating tools"
./scripts/get_tools.sh
@go get -u $(GOTOOLS)
#Update dependencies
#Run this from CI
get_vendor_deps:
@rm -rf vendor/
@echo "--> Running dep"
@dep ensure -vendor-only
#Run this locally.
ensure_deps:
@rm -rf vendor/
@echo "--> Running dep"
@dep ensure
#For ABCI and libs
get_protoc:
@# https://github.com/google/protobuf/releases
curl -L https://github.com/google/protobuf/releases/download/v3.6.1/protobuf-cpp-3.6.1.tar.gz | tar xvz && \
cd protobuf-3.6.1 && \
DIST_LANG=cpp ./configure && \
make && \
make check && \
sudo make install && \
sudo ldconfig && \
cd .. && \
rm -rf protobuf-3.6.1
draw_deps:
@# requires brew install graphviz or apt-get install graphviz
go get github.com/RobotsAndPencils/goviz
@@ -115,38 +66,10 @@ draw_deps:
get_deps_bin_size:
@# Copy of build recipe with additional flags to perform binary size analysis
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/ 2>&1))
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/ 2>&1))
@find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log
@echo "Results can be found here: $(CURDIR)/deps_bin_size.log"
########################################
### Libs
protoc_libs: libs/common/types.pb.go
gen_certs: clean_certs
## Generating certificates for TLS testing...
certstrap init --common-name "tendermint.com" --passphrase ""
certstrap request-cert -ip "::" --passphrase ""
certstrap sign "::" --CA "tendermint.com" --passphrase ""
mv out/::.crt out/::.key db/remotedb
clean_certs:
## Cleaning TLS testing certificates...
rm -rf out
rm -f db/remotedb/::.crt db/remotedb/::.key
test_libs: gen_certs
GOCACHE=off go test -tags gcc $(PACKAGES)
make clean_certs
grpc_dbserver:
protoc -I db/remotedb/proto/ db/remotedb/proto/defs.proto --go_out=plugins=grpc:db/remotedb/proto
protoc_grpc: rpc/grpc/types.pb.go
protoc_merkle: crypto/merkle/merkle.pb.go
########################################
### Testing
@@ -164,15 +87,6 @@ test_apps:
# requires `abci-cli` and `tendermint` binaries installed
bash test/app/test.sh
test_abci_apps:
bash abci/tests/test_app/test.sh
test_abci_cli:
# test the cli against the examples in the tutorial at:
# ./docs/abci-cli.md
# if test fails, update the docs ^
@ bash abci/tests/test_cli/test.sh
test_persistence:
# run the persistence tests using bash
# requires `abci-cli` installed
@@ -190,20 +104,18 @@ test_p2p:
cd ..
# requires 'tester' the image from above
bash test/p2p/test.sh tester
# the `docker cp` takes a really long time; uncomment for debugging
#
# mkdir -p test/p2p/logs && docker cp rsyslog:/var/log test/p2p/logs
need_abci:
bash scripts/install_abci_apps.sh
test_integrations:
make build_docker_test_image
make get_tools
make get_vendor_deps
make install
make need_abci
make test_cover
make test_apps
make test_abci_apps
make test_abci_cli
make test_libs
make test_persistence
make test_p2p
@@ -220,11 +132,11 @@ vagrant_test:
### go tests
test:
@echo "--> Running go test"
@GOCACHE=off go test -p 1 $(PACKAGES)
@go test $(PACKAGES)
test_race:
@echo "--> Running go test --race"
@GOCACHE=off go test -p 1 -v -race $(PACKAGES)
@go test -v -race $(PACKAGES)
########################################
@@ -235,7 +147,7 @@ fmt:
metalinter:
@echo "--> Running linter"
@gometalinter $(LINT_FLAGS) --disable-all \
@gometalinter.v2 --vendor --deadline=600s --disable-all \
--enable=deadcode \
--enable=gosimple \
--enable=misspell \
@@ -264,25 +176,7 @@ metalinter:
metalinter_all:
@echo "--> Running linter (all)"
gometalinter $(LINT_FLAGS) --enable-all --disable=lll ./...
DESTINATION = ./index.html.md
rpc-docs:
cat rpc/core/slate_header.txt > $(DESTINATION)
godoc2md -template rpc/core/doc_template.txt github.com/tendermint/tendermint/rpc/core | grep -v -e "pipe.go" -e "routes.go" -e "dev.go" | sed 's,/src/target,https://github.com/tendermint/tendermint/tree/master/rpc/core,' >> $(DESTINATION)
check_dep:
dep status >> /dev/null
!(grep -n branch Gopkg.toml)
###########################################################
### Docker image
build-docker:
cp build/tendermint DOCKER/tendermint
docker build --label=tendermint --tag="tendermint/tendermint" DOCKER
rm -rf DOCKER/tendermint
gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./...
###########################################################
### Local testnet using docker
@@ -291,45 +185,18 @@ build-docker:
build-linux:
GOOS=linux GOARCH=amd64 $(MAKE) build
build-docker-localnode:
cd networks/local
make
cd -
# Run a 4-node testnet locally
localnet-start: localnet-stop
@if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi
docker-start:
@echo "Wait until 'Attaching to node0, node1, node2, node3' message appears"
@if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v `pwd`/build:/tendermint:Z tendermint/localnode testnet --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi
docker-compose up
# Stop testnet
localnet-stop:
docker-stop:
docker-compose down
###########################################################
### Remote full-nodes (sentry) using terraform and ansible
# Server management
sentry-start:
@if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi
@if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi
cd networks/remote/terraform && terraform init && terraform apply -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_KEY_FILE="$(HOME)/.ssh/id_rsa.pub"
@if ! [ -f $(CURDIR)/build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 0 --n 4 --o . ; fi
cd networks/remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml
@echo "Next step: Add your validator setup in the genesis.json and config.tml files and run \"make sentry-config\". (Public key of validator, chain ID, peer IP and node ID.)"
# Configuration management
sentry-config:
cd networks/remote/ansible && ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$(CURDIR)/build/tendermint -e CONFIGDIR=$(CURDIR)/build
sentry-stop:
@if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi
cd networks/remote/terraform && terraform destroy -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_KEY_FILE="$(HOME)/.ssh/id_rsa.pub"
# meant for the CI, inspect script & adapt accordingly
build-slate:
bash scripts/slate.sh
# To avoid unintended conflicts with file names, always add to .PHONY
# unless there is a reason not to.
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
.PHONY: check build build_race build_abci dist install install_abci check_dep check_tools get_tools get_dev_tools update_tools get_vendor_deps draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt rpc-docs build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate protoc_grpc protoc_all build_c install_c
.PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt build-linux docker-start docker-stop

View File

@@ -8,8 +8,8 @@ Or [Blockchain](https://en.wikipedia.org/wiki/Blockchain_(database)) for short.
[![API Reference](
https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
)](https://godoc.org/github.com/tendermint/tendermint)
[![Go version](https://img.shields.io/badge/go-1.10.4-blue.svg)](https://github.com/moovweb/gvm)
[![riot.im](https://img.shields.io/badge/riot.im-JOIN%20CHAT-green.svg)](https://riot.im/app/#/room/#tendermint:matrix.org)
[![Go version](https://img.shields.io/badge/go-1.9.2-blue.svg)](https://github.com/moovweb/gvm)
[![Rocket.Chat](https://demo.rocket.chat/images/join-chat.svg)](https://cosmos.rocket.chat/)
[![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE)
[![](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint)
@@ -19,86 +19,58 @@ Branch | Tests | Coverage
master | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/master.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/master) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint)
develop | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/develop.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/develop) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/develop/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint)
_NOTE: This is alpha software. Please contact us if you intend to run it in production._
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
and securely replicates it on many machines.
For protocol details, see [the specification](/docs/spec).
For detailed analysis of the consensus protocol, including safety and liveness proofs,
see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
## A Note on Production Readiness
While Tendermint is being used in production in private, permissioned
environments, we are still working actively to harden and audit it in preparation
for use in public blockchains, such as the [Cosmos Network](https://cosmos.network/).
We are also still making breaking changes to the protocol and the APIs.
Thus, we tag the releases as *alpha software*.
In any case, if you intend to run Tendermint in production,
please [contact us](mailto:partners@tendermint.com) and [join the chat](https://riot.im/app/#/room/#tendermint:matrix.org).
## Security
To report a security vulnerability, see our [bug bounty
program](https://hackerone.com/tendermint)
For examples of the kinds of bugs we're looking for, see [SECURITY.md](SECURITY.md)
For more information, from introduction to install to application development, [Read The Docs](https://tendermint.readthedocs.io/en/master/).
## Minimum requirements
Requirement|Notes
---|---
Go version | Go1.10 or higher
Go version | Go1.9 or higher
## Documentation
## Install
Complete documentation can be found on the [website](https://tendermint.com/docs/).
To download pre-built binaries, see our [downloads page](https://tendermint.com/downloads).
### Install
To install from source, you should be able to:
See the [install instructions](/docs/introduction/install.md)
`go get -u github.com/tendermint/tendermint/cmd/tendermint`
### Quick Start
- [Single node](/docs/introduction/quick-start.md)
- [Local cluster using docker-compose](/docs/networks/docker-compose.md)
- [Remote cluster using terraform and ansible](/docs/networks/terraform-and-ansible.md)
- [Join the Cosmos testnet](https://cosmos.network/testnet)
For more details (or if it fails), [read the docs](https://tendermint.readthedocs.io/en/master/install.html).
## Resources
### Tendermint Core
For details about the blockchain data structures and the p2p protocols, see the
the [Tendermint specification](/docs/spec).
For details on using the software, see the [documentation](/docs/) which is also
hosted at: https://tendermint.com/docs/
### Tools
Benchmarking and monitoring is provided by `tm-bench` and `tm-monitor`, respectively.
Their code is found [here](/tools) and these binaries need to be built seperately.
Additional documentation is found [here](/docs/tools).
All resources involving the use of, building application on, or developing for, tendermint, can be found at [Read The Docs](https://tendermint.readthedocs.io/en/master/). Additional information about some - and eventually all - of the sub-projects below, can be found at Read The Docs.
### Sub-projects
* [Amino](http://github.com/tendermint/go-amino), a reflection-based improvement on proto3
* [ABCI](http://github.com/tendermint/abci), the Application Blockchain Interface
* [Go-Wire](http://github.com/tendermint/go-wire), a deterministic serialization library
* [Go-Crypto](http://github.com/tendermint/go-crypto), an elliptic curve cryptography library
* [TmLibs](http://github.com/tendermint/tmlibs), an assortment of Go libraries used internally
* [IAVL](http://github.com/tendermint/iavl), Merkleized IAVL+ Tree implementation
### Tools
* [Deployment, Benchmarking, and Monitoring](http://tendermint.readthedocs.io/projects/tools/en/develop/index.html#tendermint-tools)
### Applications
* [Ethermint](http://github.com/tendermint/ethermint); Ethereum on Tendermint
* [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
* [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint
* [Many more](https://tendermint.com/ecosystem)
* [Many more](https://tendermint.readthedocs.io/en/master/ecosystem.html#abci-applications)
### Research
### More
* [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)
* [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769)
* [Original Whitepaper](https://tendermint.com/static/docs/tendermint.pdf)
* [Blog](https://blog.cosmos.network/tendermint/home)
* [Tendermint Blog](https://blog.cosmos.network/tendermint/home)
* [Cosmos Blog](https://blog.cosmos.network)
## Contributing
@@ -113,26 +85,16 @@ According to SemVer, anything in the public API can change at any time before ve
To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used
to signal breaking changes across a subset of the total public API. This subset includes all
interfaces exposed to other processes (cli, rpc, p2p, etc.), but does not
include the in-process Go APIs.
That said, breaking changes in the following packages will be documented in the
CHANGELOG even if they don't lead to MINOR version bumps:
interfaces exposed to other processes (cli, rpc, p2p, etc.), as well as parts of the following packages:
- types
- rpc/client
- config
- node
- libs
- bech32
- common
- db
- errors
- log
Exported objects in these packages that are not covered by the versioning scheme
are explicitly marked by `// UNSTABLE` in their go doc comment and may change at any
time without notice. Functions, types, and values in any other package may also change at any time.
are explicitly marked by `// UNSTABLE` in their go doc comment and may change at any time.
Functions, types, and values in any other package may also change at any time.
### Upgrades
@@ -145,8 +107,6 @@ data into the new chain.
However, any bump in the PATCH version should be compatible with existing histories
(if not please open an [issue](https://github.com/tendermint/tendermint/issues)).
For more information on upgrading, see [here](./UPGRADING.md)
## Code of Conduct
Please read, understand and adhere to our [code of conduct](CODE_OF_CONDUCT.md).

View File

@@ -1,23 +0,0 @@
# Roadmap
BREAKING CHANGES:
- Better support for injecting randomness
- Upgrade consensus for more real-time use of evidence
FEATURES:
- Use the chain as its own CA for nodes and validators
- Tooling to run multiple blockchains/apps, possibly in a single process
- State syncing (without transaction replay)
- Add authentication and rate-limitting to the RPC
IMPROVEMENTS:
- Improve subtleties around mempool caching and logic
- Consensus optimizations:
- cache block parts for faster agreement after round changes
- propagate block parts rarest first
- Better testing of the consensus state machine (ie. use a DSL)
- Auto compiled serialization/deserialization code instead of go-wire reflection
BUG FIXES:
- Graceful handling/recovery for apps that have non-determinism or fail to halt
- Graceful handling/recovery for violations of safety, or liveness

View File

@@ -1,72 +0,0 @@
# Security
As part of our [Coordinated Vulnerability Disclosure
Policy](https://tendermint.com/security), we operate a [bug
bounty](https://hackerone.com/tendermint).
See the policy for more details on submissions and rewards.
Here is a list of examples of the kinds of bugs we're most interested in:
## Specification
- Conceptual flaws
- Ambiguities, inconsistencies, or incorrect statements
- Mis-match between specification and implementation of any component
## Consensus
Assuming less than 1/3 of the voting power is Byzantine (malicious):
- Validation of blockchain data structures, including blocks, block parts,
votes, and so on
- Execution of blocks
- Validator set changes
- Proposer round robin
- Two nodes committing conflicting blocks for the same height (safety failure)
- A correct node signing conflicting votes
- A node halting (liveness failure)
- Syncing new and old nodes
## Networking
- Authenticated encryption (MITM, information leakage)
- Eclipse attacks
- Sybil attacks
- Long-range attacks
- Denial-of-Service
## RPC
- Write-access to anything besides sending transactions
- Denial-of-Service
- Leakage of secrets
## Denial-of-Service
Attacks may come through the P2P network or the RPC:
- Amplification attacks
- Resource abuse
- Deadlocks and race conditions
- Panics and unhandled errors
## Libraries
- Serialization (Amino)
- Reading/Writing files and databases
- Logging and monitoring
## Cryptography
- Elliptic curves for validator signatures
- Hash algorithms and Merkle trees for block validation
- Authenticated encryption for P2P connections
## Light Client
- Validation of blockchain data structures
- Correctly validating an incorrect proof
- Incorrectly validating a correct proof
- Syncing validator set changes

View File

@@ -1,193 +0,0 @@
# Upgrading Tendermint Core
This guide provides steps to be followed when you upgrade your applications to
a newer version of Tendermint Core.
## v0.27.0
This release contains some breaking changes to the block and p2p protocols,
but does not change any core data structures, so it should be compatible with
existing blockchains from the v0.26 series that only used Ed25519 validator keys.
Blockchains using Secp256k1 for validators will not be compatible. This is due
to the fact that we now enforce which key types validators can use as a
consensus param. The default is Ed25519, and Secp256k1 must be activated
explicitly.
It is recommended to upgrade all nodes at once to avoid incompatibilities at the
peer layer - namely, the heartbeat consensus message has been removed (only
relevant if `create_empty_blocks=false` or `create_empty_blocks_interval > 0`),
and the proposer selection algorithm has changed. Since proposer information is
never included in the blockchain, this change only affects the peer layer.
### Go API Changes
#### libs/db
The ReverseIterator API has changed the meaning of `start` and `end`.
Before, iteration was from `start` to `end`, where
`start > end`. Now, iteration is from `end` to `start`, where `start < end`.
The iterator also excludes `end`. This change allows a simplified and more
intuitive logic, aligning the semantic meaning of `start` and `end` in the
`Iterator` and `ReverseIterator`.
### Applications
This release enforces a new consensus parameter, the
ValidatorParams.PubKeyTypes. Applications must ensure that they only return
validator updates with the allowed PubKeyTypes. If a validator update includes a
pubkey type that is not included in the ConsensusParams.Validator.PubKeyTypes,
block execution will fail and the consensus will halt.
By default, only Ed25519 pubkeys may be used for validators. Enabling
Secp256k1 requires explicit modification of the ConsensusParams.
Please update your application accordingly (ie. restrict validators to only be
able to use Ed25519 keys, or explicitly add additional key types to the genesis
file).
## v0.26.0
This release contains a lot of changes to core data types and protocols. It is not
compatible to the old versions and there is no straight forward way to update
old data to be compatible with the new version.
To reset the state do:
```
$ tendermint unsafe_reset_all
```
Here we summarize some other notable changes to be mindful of.
### Config Changes
All timeouts must be changed from integers to strings with their duration, for
instance `flush_throttle_timeout = 100` would be changed to
`flush_throttle_timeout = "100ms"` and `timeout_propose = 3000` would be changed
to `timeout_propose = "3s"`.
### RPC Changes
The default behaviour of `/abci_query` has been changed to not return a proof,
and the name of the parameter that controls this has been changed from `trusted`
to `prove`. To get proofs with your queries, ensure you set `prove=true`.
Various version fields like `amino_version`, `p2p_version`, `consensus_version`,
and `rpc_version` have been removed from the `node_info.other` and are
consolidated under the tendermint semantic version (ie. `node_info.version`) and
the new `block` and `p2p` protocol versions under `node_info.protocol_version`.
### ABCI Changes
Field numbers were bumped in the `Header` and `ResponseInfo` messages to make
room for new `version` fields. It should be straight forward to recompile the
protobuf file for these changes.
#### Proofs
The `ResponseQuery.Proof` field is now structured as a `[]ProofOp` to support
generalized Merkle tree constructions where the leaves of one Merkle tree are
the root of another. If you don't need this functionality, and you used to
return `<proof bytes>` here, you should instead return a single `ProofOp` with
just the `Data` field set:
```
[]ProofOp{
ProofOp{
Data: <proof bytes>,
}
}
```
For more information, see:
- [ADR-026](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/docs/architecture/adr-026-general-merkle-proof.md)
- [Relevant ABCI
documentation](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/docs/spec/abci/apps.md#query-proofs)
- [Description of
keys](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/crypto/merkle/proof_key_path.go#L14)
### Go API Changes
#### crypto/merkle
The `merkle.Hasher` interface was removed. Functions which used to take `Hasher`
now simply take `[]byte`. This means that any objects being Merklized should be
serialized before they are passed in.
#### node
The `node.RunForever` function was removed. Signal handling and running forever
should instead be explicitly configured by the caller. See how we do it
[here](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/cmd/tendermint/commands/run_node.go#L60).
### Other
All hashes, except for public key addresses, are now 32-bytes.
## v0.25.0
This release has minimal impact.
If you use GasWanted in ABCI and want to enforce it, set the MaxGas in the genesis file (default is no max).
## v0.24.0
New 0.24.0 release contains a lot of changes to the state and types. It's not
compatible to the old versions and there is no straight forward way to update
old data to be compatible with the new version.
To reset the state do:
```
$ tendermint unsafe_reset_all
```
Here we summarize some other notable changes to be mindful of.
### Config changes
`p2p.max_num_peers` was removed in favor of `p2p.max_num_inbound_peers` and
`p2p.max_num_outbound_peers`.
```
# Maximum number of inbound peers
max_num_inbound_peers = 40
# Maximum number of outbound peers to connect to, excluding persistent peers
max_num_outbound_peers = 10
```
As you can see, the default ratio of inbound/outbound peers is 4/1. The reason
is we want it to be easier for new nodes to connect to the network. You can
tweak these parameters to alter the network topology.
### RPC Changes
The result of `/commit` used to contain `header` and `commit` fields at the top level. These are now contained under the `signed_header` field.
### ABCI Changes
The header has been upgraded and contains new fields, but none of the existing
fields were changed, except their order.
The `Validator` type was split into two, one containing an `Address` and one
containing a `PubKey`. When processing `RequestBeginBlock`, use the `Validator`
type, which contains just the `Address`. When returning `ResponseEndBlock`, use
the `ValidatorUpdate` type, which contains just the `PubKey`.
### Validator Set Updates
Validator set updates returned in ResponseEndBlock for height `H` used to take
effect immediately at height `H+1`. Now they will be delayed one block, to take
effect at height `H+2`. Note this means that the change will be seen by the ABCI
app in the `RequestBeginBlock.LastCommitInfo` at block `H+3`. Apps were already
required to maintain a map from validator addresses to pubkeys since v0.23 (when
pubkeys were removed from RequestBeginBlock), but now they may need to track
multiple validator sets at once to accomodate this delay.
### Block Size
The `ConsensusParams.BlockSize.MaxTxs` was removed in favour of
`ConsensusParams.BlockSize.MaxBytes`, which is now enforced. This means blocks
are limitted only by byte-size, not by number of transactions.

36
Vagrantfile vendored
View File

@@ -10,37 +10,31 @@ Vagrant.configure("2") do |config|
end
config.vm.provision "shell", inline: <<-SHELL
apt-get update
# add docker repo
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable"
# and golang 1.9 support
# official repo doesn't have race detection runtime...
# add-apt-repository ppa:gophers/archive
add-apt-repository ppa:longsleep/golang-backports
# install base requirements
apt-get update
apt-get install -y --no-install-recommends wget curl jq zip \
make shellcheck bsdmainutils psmisc
apt-get install -y docker-ce golang-1.9-go
apt-get install -y language-pack-en
# install docker
apt-get install -y --no-install-recommends apt-transport-https \
ca-certificates curl software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
apt-get install -y docker-ce
usermod -a -G docker vagrant
# install go
wget -q https://dl.google.com/go/go1.11.linux-amd64.tar.gz
tar -xvf go1.11.linux-amd64.tar.gz
mv go /usr/local
rm -f go1.11.linux-amd64.tar.gz
# cleanup
apt-get autoremove -y
# needed for docker
usermod -a -G docker vagrant
# set env variables
echo 'export GOROOT=/usr/local/go' >> /home/vagrant/.bash_profile
echo 'export PATH=$PATH:/usr/lib/go-1.9/bin:/home/vagrant/go/bin' >> /home/vagrant/.bash_profile
echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.bash_profile
echo 'export PATH=$PATH:$GOROOT/bin:$GOPATH/bin' >> /home/vagrant/.bash_profile
echo 'export LC_ALL=en_US.UTF-8' >> /home/vagrant/.bash_profile
echo 'cd go/src/github.com/tendermint/tendermint' >> /home/vagrant/.bash_profile
@@ -53,6 +47,6 @@ Vagrant.configure("2") do |config|
# get all deps and tools, ready to install/test
su - vagrant -c 'source /home/vagrant/.bash_profile'
su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make get_tools && make get_dev_tools && make get_vendor_deps'
su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make get_tools && make get_vendor_deps'
SHELL
end

View File

@@ -1,37 +0,0 @@
# Application BlockChain Interface (ABCI)
Blockchains are systems for multi-master state machine replication.
**ABCI** is an interface that defines the boundary between the replication engine (the blockchain),
and the state machine (the application).
Using a socket protocol, a consensus engine running in one process
can manage an application state running in another.
Previously, the ABCI was referred to as TMSP.
The community has provided a number of addtional implementations, see the [Tendermint Ecosystem](https://tendermint.com/ecosystem)
## Installation & Usage
To get up and running quickly, see the [getting started guide](../docs/app-dev/getting-started.md) along with the [abci-cli documentation](../docs/app-dev/abci-cli.md) which will go through the examples found in the [examples](./example/) directory.
## Specification
A detailed description of the ABCI methods and message types is contained in:
- [The main spec](../docs/spec/abci/abci.md)
- [A protobuf file](./types/types.proto)
- [A Go interface](./types/application.go)
## Protocol Buffers
To compile the protobuf file, run (from the root of the repo):
```
make protoc_abci
```
See `protoc --help` and [the Protocol Buffers site](https://developers.google.com/protocol-buffers)
for details on compiling for other languages. Note we also include a [GRPC](https://www.grpc.io/docs)
service definition.

View File

@@ -1,129 +0,0 @@
package abcicli
import (
"fmt"
"sync"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
const (
dialRetryIntervalSeconds = 3
echoRetryIntervalSeconds = 1
)
// Client defines an interface for an ABCI client.
// All `Async` methods return a `ReqRes` object.
// All `Sync` methods return the appropriate protobuf ResponseXxx struct and an error.
// Note these are client errors, eg. ABCI socket connectivity issues.
// Application-related errors are reflected in response via ABCI error codes and logs.
type Client interface {
cmn.Service
SetResponseCallback(Callback)
Error() error
FlushAsync() *ReqRes
EchoAsync(msg string) *ReqRes
InfoAsync(types.RequestInfo) *ReqRes
SetOptionAsync(types.RequestSetOption) *ReqRes
DeliverTxAsync(tx []byte) *ReqRes
CheckTxAsync(tx []byte) *ReqRes
QueryAsync(types.RequestQuery) *ReqRes
CommitAsync() *ReqRes
InitChainAsync(types.RequestInitChain) *ReqRes
BeginBlockAsync(types.RequestBeginBlock) *ReqRes
EndBlockAsync(types.RequestEndBlock) *ReqRes
FlushSync() error
EchoSync(msg string) (*types.ResponseEcho, error)
InfoSync(types.RequestInfo) (*types.ResponseInfo, error)
SetOptionSync(types.RequestSetOption) (*types.ResponseSetOption, error)
DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error)
CheckTxSync(tx []byte) (*types.ResponseCheckTx, error)
QuerySync(types.RequestQuery) (*types.ResponseQuery, error)
CommitSync() (*types.ResponseCommit, error)
InitChainSync(types.RequestInitChain) (*types.ResponseInitChain, error)
BeginBlockSync(types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
EndBlockSync(types.RequestEndBlock) (*types.ResponseEndBlock, error)
}
//----------------------------------------
// NewClient returns a new ABCI client of the specified transport type.
// It returns an error if the transport is not "socket" or "grpc"
func NewClient(addr, transport string, mustConnect bool) (client Client, err error) {
switch transport {
case "socket":
client = NewSocketClient(addr, mustConnect)
case "grpc":
client = NewGRPCClient(addr, mustConnect)
default:
err = fmt.Errorf("Unknown abci transport %s", transport)
}
return
}
//----------------------------------------
type Callback func(*types.Request, *types.Response)
//----------------------------------------
type ReqRes struct {
*types.Request
*sync.WaitGroup
*types.Response // Not set atomically, so be sure to use WaitGroup.
mtx sync.Mutex
done bool // Gets set to true once *after* WaitGroup.Done().
cb func(*types.Response) // A single callback that may be set.
}
func NewReqRes(req *types.Request) *ReqRes {
return &ReqRes{
Request: req,
WaitGroup: waitGroup1(),
Response: nil,
done: false,
cb: nil,
}
}
// Sets the callback for this ReqRes atomically.
// If reqRes is already done, calls cb immediately.
// NOTE: reqRes.cb should not change if reqRes.done.
// NOTE: only one callback is supported.
func (reqRes *ReqRes) SetCallback(cb func(res *types.Response)) {
reqRes.mtx.Lock()
if reqRes.done {
reqRes.mtx.Unlock()
cb(reqRes.Response)
return
}
reqRes.cb = cb
reqRes.mtx.Unlock()
}
func (reqRes *ReqRes) GetCallback() func(*types.Response) {
reqRes.mtx.Lock()
defer reqRes.mtx.Unlock()
return reqRes.cb
}
// NOTE: it should be safe to read reqRes.cb without locks after this.
func (reqRes *ReqRes) SetDone() {
reqRes.mtx.Lock()
reqRes.done = true
reqRes.mtx.Unlock()
}
func waitGroup1() (wg *sync.WaitGroup) {
wg = &sync.WaitGroup{}
wg.Add(1)
return
}

View File

@@ -1,301 +0,0 @@
package abcicli
import (
"fmt"
"net"
"sync"
"time"
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
var _ Client = (*grpcClient)(nil)
// A stripped copy of the remoteClient that makes
// synchronous calls using grpc
type grpcClient struct {
cmn.BaseService
mustConnect bool
client types.ABCIApplicationClient
conn *grpc.ClientConn
mtx sync.Mutex
addr string
err error
resCb func(*types.Request, *types.Response) // listens to all callbacks
}
func NewGRPCClient(addr string, mustConnect bool) *grpcClient {
cli := &grpcClient{
addr: addr,
mustConnect: mustConnect,
}
cli.BaseService = *cmn.NewBaseService(nil, "grpcClient", cli)
return cli
}
func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) {
return cmn.Connect(addr)
}
func (cli *grpcClient) OnStart() error {
if err := cli.BaseService.OnStart(); err != nil {
return err
}
RETRY_LOOP:
for {
conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithDialer(dialerFunc))
if err != nil {
if cli.mustConnect {
return err
}
cli.Logger.Error(fmt.Sprintf("abci.grpcClient failed to connect to %v. Retrying...\n", cli.addr), "err", err)
time.Sleep(time.Second * dialRetryIntervalSeconds)
continue RETRY_LOOP
}
cli.Logger.Info("Dialed server. Waiting for echo.", "addr", cli.addr)
client := types.NewABCIApplicationClient(conn)
cli.conn = conn
ENSURE_CONNECTED:
for {
_, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.FailFast(true))
if err == nil {
break ENSURE_CONNECTED
}
cli.Logger.Error("Echo failed", "err", err)
time.Sleep(time.Second * echoRetryIntervalSeconds)
}
cli.client = client
return nil
}
}
func (cli *grpcClient) OnStop() {
cli.BaseService.OnStop()
if cli.conn != nil {
cli.conn.Close()
}
}
func (cli *grpcClient) StopForError(err error) {
cli.mtx.Lock()
if !cli.IsRunning() {
return
}
if cli.err == nil {
cli.err = err
}
cli.mtx.Unlock()
cli.Logger.Error(fmt.Sprintf("Stopping abci.grpcClient for error: %v", err.Error()))
cli.Stop()
}
func (cli *grpcClient) Error() error {
cli.mtx.Lock()
defer cli.mtx.Unlock()
return cli.err
}
// Set listener for all responses
// NOTE: callback may get internally generated flush responses.
func (cli *grpcClient) SetResponseCallback(resCb Callback) {
cli.mtx.Lock()
cli.resCb = resCb
cli.mtx.Unlock()
}
//----------------------------------------
// GRPC calls are synchronous, but some callbacks expect to be called asynchronously
// (eg. the mempool expects to be able to lock to remove bad txs from cache).
// To accommodate, we finish each call in its own go-routine,
// which is expensive, but easy - if you want something better, use the socket protocol!
// maybe one day, if people really want it, we use grpc streams,
// but hopefully not :D
func (cli *grpcClient) EchoAsync(msg string) *ReqRes {
req := types.ToRequestEcho(msg)
res, err := cli.client.Echo(context.Background(), req.GetEcho(), grpc.FailFast(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Echo{res}})
}
func (cli *grpcClient) FlushAsync() *ReqRes {
req := types.ToRequestFlush()
res, err := cli.client.Flush(context.Background(), req.GetFlush(), grpc.FailFast(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Flush{res}})
}
func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
req := types.ToRequestInfo(params)
res, err := cli.client.Info(context.Background(), req.GetInfo(), grpc.FailFast(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{res}})
}
func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
req := types.ToRequestSetOption(params)
res, err := cli.client.SetOption(context.Background(), req.GetSetOption(), grpc.FailFast(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{res}})
}
func (cli *grpcClient) DeliverTxAsync(tx []byte) *ReqRes {
req := types.ToRequestDeliverTx(tx)
res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.FailFast(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{res}})
}
func (cli *grpcClient) CheckTxAsync(tx []byte) *ReqRes {
req := types.ToRequestCheckTx(tx)
res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.FailFast(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{res}})
}
func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
req := types.ToRequestQuery(params)
res, err := cli.client.Query(context.Background(), req.GetQuery(), grpc.FailFast(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Query{res}})
}
func (cli *grpcClient) CommitAsync() *ReqRes {
req := types.ToRequestCommit()
res, err := cli.client.Commit(context.Background(), req.GetCommit(), grpc.FailFast(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Commit{res}})
}
func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
req := types.ToRequestInitChain(params)
res, err := cli.client.InitChain(context.Background(), req.GetInitChain(), grpc.FailFast(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_InitChain{res}})
}
func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
req := types.ToRequestBeginBlock(params)
res, err := cli.client.BeginBlock(context.Background(), req.GetBeginBlock(), grpc.FailFast(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_BeginBlock{res}})
}
func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes {
req := types.ToRequestEndBlock(params)
res, err := cli.client.EndBlock(context.Background(), req.GetEndBlock(), grpc.FailFast(true))
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{res}})
}
func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes {
reqres := NewReqRes(req)
reqres.Response = res // Set response
reqres.Done() // Release waiters
reqres.SetDone() // so reqRes.SetCallback will run the callback
// go routine for callbacks
go func() {
// Notify reqRes listener if set
if cb := reqres.GetCallback(); cb != nil {
cb(res)
}
// Notify client listener if set
if cli.resCb != nil {
cli.resCb(reqres.Request, res)
}
}()
return reqres
}
//----------------------------------------
func (cli *grpcClient) FlushSync() error {
return nil
}
func (cli *grpcClient) EchoSync(msg string) (*types.ResponseEcho, error) {
reqres := cli.EchoAsync(msg)
// StopForError should already have been called if error is set
return reqres.Response.GetEcho(), cli.Error()
}
func (cli *grpcClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
reqres := cli.InfoAsync(req)
return reqres.Response.GetInfo(), cli.Error()
}
func (cli *grpcClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
reqres := cli.SetOptionAsync(req)
return reqres.Response.GetSetOption(), cli.Error()
}
func (cli *grpcClient) DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error) {
reqres := cli.DeliverTxAsync(tx)
return reqres.Response.GetDeliverTx(), cli.Error()
}
func (cli *grpcClient) CheckTxSync(tx []byte) (*types.ResponseCheckTx, error) {
reqres := cli.CheckTxAsync(tx)
return reqres.Response.GetCheckTx(), cli.Error()
}
func (cli *grpcClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
reqres := cli.QueryAsync(req)
return reqres.Response.GetQuery(), cli.Error()
}
func (cli *grpcClient) CommitSync() (*types.ResponseCommit, error) {
reqres := cli.CommitAsync()
return reqres.Response.GetCommit(), cli.Error()
}
func (cli *grpcClient) InitChainSync(params types.RequestInitChain) (*types.ResponseInitChain, error) {
reqres := cli.InitChainAsync(params)
return reqres.Response.GetInitChain(), cli.Error()
}
func (cli *grpcClient) BeginBlockSync(params types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
reqres := cli.BeginBlockAsync(params)
return reqres.Response.GetBeginBlock(), cli.Error()
}
func (cli *grpcClient) EndBlockSync(params types.RequestEndBlock) (*types.ResponseEndBlock, error) {
reqres := cli.EndBlockAsync(params)
return reqres.Response.GetEndBlock(), cli.Error()
}

View File

@@ -1,255 +0,0 @@
package abcicli
import (
"sync"
types "github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
var _ Client = (*localClient)(nil)
// NOTE: use defer to unlock mutex because Application might panic (e.g., in
// case of malicious tx or query). It only makes sense for publicly exposed
// methods like CheckTx (/broadcast_tx_* RPC endpoint) or Query (/abci_query
// RPC endpoint), but defers are used everywhere for the sake of consistency.
type localClient struct {
cmn.BaseService
mtx *sync.Mutex
types.Application
Callback
}
func NewLocalClient(mtx *sync.Mutex, app types.Application) *localClient {
if mtx == nil {
mtx = new(sync.Mutex)
}
cli := &localClient{
mtx: mtx,
Application: app,
}
cli.BaseService = *cmn.NewBaseService(nil, "localClient", cli)
return cli
}
func (app *localClient) SetResponseCallback(cb Callback) {
app.mtx.Lock()
app.Callback = cb
app.mtx.Unlock()
}
// TODO: change types.Application to include Error()?
func (app *localClient) Error() error {
return nil
}
func (app *localClient) FlushAsync() *ReqRes {
// Do nothing
return newLocalReqRes(types.ToRequestFlush(), nil)
}
func (app *localClient) EchoAsync(msg string) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
return app.callback(
types.ToRequestEcho(msg),
types.ToResponseEcho(msg),
)
}
func (app *localClient) InfoAsync(req types.RequestInfo) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.Info(req)
return app.callback(
types.ToRequestInfo(req),
types.ToResponseInfo(res),
)
}
func (app *localClient) SetOptionAsync(req types.RequestSetOption) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.SetOption(req)
return app.callback(
types.ToRequestSetOption(req),
types.ToResponseSetOption(res),
)
}
func (app *localClient) DeliverTxAsync(tx []byte) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.DeliverTx(tx)
return app.callback(
types.ToRequestDeliverTx(tx),
types.ToResponseDeliverTx(res),
)
}
func (app *localClient) CheckTxAsync(tx []byte) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.CheckTx(tx)
return app.callback(
types.ToRequestCheckTx(tx),
types.ToResponseCheckTx(res),
)
}
func (app *localClient) QueryAsync(req types.RequestQuery) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.Query(req)
return app.callback(
types.ToRequestQuery(req),
types.ToResponseQuery(res),
)
}
func (app *localClient) CommitAsync() *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.Commit()
return app.callback(
types.ToRequestCommit(),
types.ToResponseCommit(res),
)
}
func (app *localClient) InitChainAsync(req types.RequestInitChain) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.InitChain(req)
return app.callback(
types.ToRequestInitChain(req),
types.ToResponseInitChain(res),
)
}
func (app *localClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.BeginBlock(req)
return app.callback(
types.ToRequestBeginBlock(req),
types.ToResponseBeginBlock(res),
)
}
func (app *localClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.EndBlock(req)
return app.callback(
types.ToRequestEndBlock(req),
types.ToResponseEndBlock(res),
)
}
//-------------------------------------------------------
func (app *localClient) FlushSync() error {
return nil
}
func (app *localClient) EchoSync(msg string) (*types.ResponseEcho, error) {
return &types.ResponseEcho{Message: msg}, nil
}
func (app *localClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.Info(req)
return &res, nil
}
func (app *localClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.SetOption(req)
return &res, nil
}
func (app *localClient) DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.DeliverTx(tx)
return &res, nil
}
func (app *localClient) CheckTxSync(tx []byte) (*types.ResponseCheckTx, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.CheckTx(tx)
return &res, nil
}
func (app *localClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.Query(req)
return &res, nil
}
func (app *localClient) CommitSync() (*types.ResponseCommit, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.Commit()
return &res, nil
}
func (app *localClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.InitChain(req)
return &res, nil
}
func (app *localClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.BeginBlock(req)
return &res, nil
}
func (app *localClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.EndBlock(req)
return &res, nil
}
//-------------------------------------------------------
func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes {
app.Callback(req, res)
return newLocalReqRes(req, res)
}
func newLocalReqRes(req *types.Request, res *types.Response) *ReqRes {
reqRes := NewReqRes(req)
reqRes.Response = res
reqRes.SetDone()
return reqRes
}

View File

@@ -1,406 +0,0 @@
package abcicli
import (
"bufio"
"container/list"
"errors"
"fmt"
"net"
"reflect"
"sync"
"time"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
const reqQueueSize = 256 // TODO make configurable
// const maxResponseSize = 1048576 // 1MB TODO make configurable
const flushThrottleMS = 20 // Don't wait longer than...
var _ Client = (*socketClient)(nil)
// This is goroutine-safe, but users should beware that
// the application in general is not meant to be interfaced
// with concurrent callers.
type socketClient struct {
cmn.BaseService
reqQueue chan *ReqRes
flushTimer *cmn.ThrottleTimer
mustConnect bool
mtx sync.Mutex
addr string
conn net.Conn
err error
reqSent *list.List
resCb func(*types.Request, *types.Response) // listens to all callbacks
}
func NewSocketClient(addr string, mustConnect bool) *socketClient {
cli := &socketClient{
reqQueue: make(chan *ReqRes, reqQueueSize),
flushTimer: cmn.NewThrottleTimer("socketClient", flushThrottleMS),
mustConnect: mustConnect,
addr: addr,
reqSent: list.New(),
resCb: nil,
}
cli.BaseService = *cmn.NewBaseService(nil, "socketClient", cli)
return cli
}
func (cli *socketClient) OnStart() error {
if err := cli.BaseService.OnStart(); err != nil {
return err
}
var err error
var conn net.Conn
RETRY_LOOP:
for {
conn, err = cmn.Connect(cli.addr)
if err != nil {
if cli.mustConnect {
return err
}
cli.Logger.Error(fmt.Sprintf("abci.socketClient failed to connect to %v. Retrying...", cli.addr), "err", err)
time.Sleep(time.Second * dialRetryIntervalSeconds)
continue RETRY_LOOP
}
cli.conn = conn
go cli.sendRequestsRoutine(conn)
go cli.recvResponseRoutine(conn)
return nil
}
}
func (cli *socketClient) OnStop() {
cli.BaseService.OnStop()
cli.mtx.Lock()
defer cli.mtx.Unlock()
if cli.conn != nil {
cli.conn.Close()
}
cli.flushQueue()
}
// Stop the client and set the error
func (cli *socketClient) StopForError(err error) {
if !cli.IsRunning() {
return
}
cli.mtx.Lock()
if cli.err == nil {
cli.err = err
}
cli.mtx.Unlock()
cli.Logger.Error(fmt.Sprintf("Stopping abci.socketClient for error: %v", err.Error()))
cli.Stop()
}
func (cli *socketClient) Error() error {
cli.mtx.Lock()
defer cli.mtx.Unlock()
return cli.err
}
// Set listener for all responses
// NOTE: callback may get internally generated flush responses.
func (cli *socketClient) SetResponseCallback(resCb Callback) {
cli.mtx.Lock()
cli.resCb = resCb
cli.mtx.Unlock()
}
//----------------------------------------
func (cli *socketClient) sendRequestsRoutine(conn net.Conn) {
w := bufio.NewWriter(conn)
for {
select {
case <-cli.flushTimer.Ch:
select {
case cli.reqQueue <- NewReqRes(types.ToRequestFlush()):
default:
// Probably will fill the buffer, or retry later.
}
case <-cli.Quit():
return
case reqres := <-cli.reqQueue:
cli.willSendReq(reqres)
err := types.WriteMessage(reqres.Request, w)
if err != nil {
cli.StopForError(fmt.Errorf("Error writing msg: %v", err))
return
}
// cli.Logger.Debug("Sent request", "requestType", reflect.TypeOf(reqres.Request), "request", reqres.Request)
if _, ok := reqres.Request.Value.(*types.Request_Flush); ok {
err = w.Flush()
if err != nil {
cli.StopForError(fmt.Errorf("Error flushing writer: %v", err))
return
}
}
}
}
}
func (cli *socketClient) recvResponseRoutine(conn net.Conn) {
r := bufio.NewReader(conn) // Buffer reads
for {
var res = &types.Response{}
err := types.ReadMessage(r, res)
if err != nil {
cli.StopForError(err)
return
}
switch r := res.Value.(type) {
case *types.Response_Exception:
// XXX After setting cli.err, release waiters (e.g. reqres.Done())
cli.StopForError(errors.New(r.Exception.Error))
return
default:
// cli.Logger.Debug("Received response", "responseType", reflect.TypeOf(res), "response", res)
err := cli.didRecvResponse(res)
if err != nil {
cli.StopForError(err)
return
}
}
}
}
func (cli *socketClient) willSendReq(reqres *ReqRes) {
cli.mtx.Lock()
defer cli.mtx.Unlock()
cli.reqSent.PushBack(reqres)
}
func (cli *socketClient) didRecvResponse(res *types.Response) error {
cli.mtx.Lock()
defer cli.mtx.Unlock()
// Get the first ReqRes
next := cli.reqSent.Front()
if next == nil {
return fmt.Errorf("Unexpected result type %v when nothing expected", reflect.TypeOf(res.Value))
}
reqres := next.Value.(*ReqRes)
if !resMatchesReq(reqres.Request, res) {
return fmt.Errorf("Unexpected result type %v when response to %v expected",
reflect.TypeOf(res.Value), reflect.TypeOf(reqres.Request.Value))
}
reqres.Response = res // Set response
reqres.Done() // Release waiters
cli.reqSent.Remove(next) // Pop first item from linked list
// Notify reqRes listener if set
if cb := reqres.GetCallback(); cb != nil {
cb(res)
}
// Notify client listener if set
if cli.resCb != nil {
cli.resCb(reqres.Request, res)
}
return nil
}
//----------------------------------------
func (cli *socketClient) EchoAsync(msg string) *ReqRes {
return cli.queueRequest(types.ToRequestEcho(msg))
}
func (cli *socketClient) FlushAsync() *ReqRes {
return cli.queueRequest(types.ToRequestFlush())
}
func (cli *socketClient) InfoAsync(req types.RequestInfo) *ReqRes {
return cli.queueRequest(types.ToRequestInfo(req))
}
func (cli *socketClient) SetOptionAsync(req types.RequestSetOption) *ReqRes {
return cli.queueRequest(types.ToRequestSetOption(req))
}
func (cli *socketClient) DeliverTxAsync(tx []byte) *ReqRes {
return cli.queueRequest(types.ToRequestDeliverTx(tx))
}
func (cli *socketClient) CheckTxAsync(tx []byte) *ReqRes {
return cli.queueRequest(types.ToRequestCheckTx(tx))
}
func (cli *socketClient) QueryAsync(req types.RequestQuery) *ReqRes {
return cli.queueRequest(types.ToRequestQuery(req))
}
func (cli *socketClient) CommitAsync() *ReqRes {
return cli.queueRequest(types.ToRequestCommit())
}
func (cli *socketClient) InitChainAsync(req types.RequestInitChain) *ReqRes {
return cli.queueRequest(types.ToRequestInitChain(req))
}
func (cli *socketClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
return cli.queueRequest(types.ToRequestBeginBlock(req))
}
func (cli *socketClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes {
return cli.queueRequest(types.ToRequestEndBlock(req))
}
//----------------------------------------
func (cli *socketClient) FlushSync() error {
reqRes := cli.queueRequest(types.ToRequestFlush())
if err := cli.Error(); err != nil {
return err
}
reqRes.Wait() // NOTE: if we don't flush the queue, its possible to get stuck here
return cli.Error()
}
func (cli *socketClient) EchoSync(msg string) (*types.ResponseEcho, error) {
reqres := cli.queueRequest(types.ToRequestEcho(msg))
cli.FlushSync()
return reqres.Response.GetEcho(), cli.Error()
}
func (cli *socketClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
reqres := cli.queueRequest(types.ToRequestInfo(req))
cli.FlushSync()
return reqres.Response.GetInfo(), cli.Error()
}
func (cli *socketClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
reqres := cli.queueRequest(types.ToRequestSetOption(req))
cli.FlushSync()
return reqres.Response.GetSetOption(), cli.Error()
}
func (cli *socketClient) DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error) {
reqres := cli.queueRequest(types.ToRequestDeliverTx(tx))
cli.FlushSync()
return reqres.Response.GetDeliverTx(), cli.Error()
}
func (cli *socketClient) CheckTxSync(tx []byte) (*types.ResponseCheckTx, error) {
reqres := cli.queueRequest(types.ToRequestCheckTx(tx))
cli.FlushSync()
return reqres.Response.GetCheckTx(), cli.Error()
}
func (cli *socketClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
reqres := cli.queueRequest(types.ToRequestQuery(req))
cli.FlushSync()
return reqres.Response.GetQuery(), cli.Error()
}
func (cli *socketClient) CommitSync() (*types.ResponseCommit, error) {
reqres := cli.queueRequest(types.ToRequestCommit())
cli.FlushSync()
return reqres.Response.GetCommit(), cli.Error()
}
func (cli *socketClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) {
reqres := cli.queueRequest(types.ToRequestInitChain(req))
cli.FlushSync()
return reqres.Response.GetInitChain(), cli.Error()
}
func (cli *socketClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
reqres := cli.queueRequest(types.ToRequestBeginBlock(req))
cli.FlushSync()
return reqres.Response.GetBeginBlock(), cli.Error()
}
func (cli *socketClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) {
reqres := cli.queueRequest(types.ToRequestEndBlock(req))
cli.FlushSync()
return reqres.Response.GetEndBlock(), cli.Error()
}
//----------------------------------------
func (cli *socketClient) queueRequest(req *types.Request) *ReqRes {
reqres := NewReqRes(req)
// TODO: set cli.err if reqQueue times out
cli.reqQueue <- reqres
// Maybe auto-flush, or unset auto-flush
switch req.Value.(type) {
case *types.Request_Flush:
cli.flushTimer.Unset()
default:
cli.flushTimer.Set()
}
return reqres
}
func (cli *socketClient) flushQueue() {
// mark all in-flight messages as resolved (they will get cli.Error())
for req := cli.reqSent.Front(); req != nil; req = req.Next() {
reqres := req.Value.(*ReqRes)
reqres.Done()
}
// mark all queued messages as resolved
LOOP:
for {
select {
case reqres := <-cli.reqQueue:
reqres.Done()
default:
break LOOP
}
}
}
//----------------------------------------
func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
switch req.Value.(type) {
case *types.Request_Echo:
_, ok = res.Value.(*types.Response_Echo)
case *types.Request_Flush:
_, ok = res.Value.(*types.Response_Flush)
case *types.Request_Info:
_, ok = res.Value.(*types.Response_Info)
case *types.Request_SetOption:
_, ok = res.Value.(*types.Response_SetOption)
case *types.Request_DeliverTx:
_, ok = res.Value.(*types.Response_DeliverTx)
case *types.Request_CheckTx:
_, ok = res.Value.(*types.Response_CheckTx)
case *types.Request_Commit:
_, ok = res.Value.(*types.Response_Commit)
case *types.Request_Query:
_, ok = res.Value.(*types.Response_Query)
case *types.Request_InitChain:
_, ok = res.Value.(*types.Response_InitChain)
case *types.Request_BeginBlock:
_, ok = res.Value.(*types.Response_BeginBlock)
case *types.Request_EndBlock:
_, ok = res.Value.(*types.Response_EndBlock)
}
return ok
}

View File

@@ -1,121 +0,0 @@
package abcicli_test
import (
"errors"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/server"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
func TestSocketClientStopForErrorDeadlock(t *testing.T) {
c := abcicli.NewSocketClient(":80", false)
err := errors.New("foo-tendermint")
// See Issue https://github.com/tendermint/abci/issues/114
doneChan := make(chan bool)
go func() {
defer close(doneChan)
c.StopForError(err)
c.StopForError(err)
}()
select {
case <-doneChan:
case <-time.After(time.Second * 4):
t.Fatalf("Test took too long, potential deadlock still exists")
}
}
func TestProperSyncCalls(t *testing.T) {
app := slowApp{}
s, c := setupClientServer(t, app)
defer s.Stop()
defer c.Stop()
resp := make(chan error, 1)
go func() {
// This is BeginBlockSync unrolled....
reqres := c.BeginBlockAsync(types.RequestBeginBlock{})
c.FlushSync()
res := reqres.Response.GetBeginBlock()
require.NotNil(t, res)
resp <- c.Error()
}()
select {
case <-time.After(time.Second):
require.Fail(t, "No response arrived")
case err, ok := <-resp:
require.True(t, ok, "Must not close channel")
assert.NoError(t, err, "This should return success")
}
}
func TestHangingSyncCalls(t *testing.T) {
app := slowApp{}
s, c := setupClientServer(t, app)
defer s.Stop()
defer c.Stop()
resp := make(chan error, 1)
go func() {
// Start BeginBlock and flush it
reqres := c.BeginBlockAsync(types.RequestBeginBlock{})
flush := c.FlushAsync()
// wait 20 ms for all events to travel socket, but
// no response yet from server
time.Sleep(20 * time.Millisecond)
// kill the server, so the connections break
s.Stop()
// wait for the response from BeginBlock
reqres.Wait()
flush.Wait()
resp <- c.Error()
}()
select {
case <-time.After(time.Second):
require.Fail(t, "No response arrived")
case err, ok := <-resp:
require.True(t, ok, "Must not close channel")
assert.Error(t, err, "We should get EOF error")
}
}
func setupClientServer(t *testing.T, app types.Application) (
cmn.Service, abcicli.Client) {
// some port between 20k and 30k
port := 20000 + cmn.RandInt32()%10000
addr := fmt.Sprintf("localhost:%d", port)
s, err := server.NewServer(addr, "socket", app)
require.NoError(t, err)
err = s.Start()
require.NoError(t, err)
c := abcicli.NewSocketClient(addr, true)
err = c.Start()
require.NoError(t, err)
return s, c
}
type slowApp struct {
types.BaseApplication
}
func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
time.Sleep(200 * time.Millisecond)
return types.ResponseBeginBlock{}
}

View File

@@ -1,774 +0,0 @@
package main
import (
"bufio"
"encoding/hex"
"errors"
"fmt"
"io"
"os"
"strings"
"github.com/spf13/cobra"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/log"
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/example/counter"
"github.com/tendermint/tendermint/abci/example/kvstore"
"github.com/tendermint/tendermint/abci/server"
servertest "github.com/tendermint/tendermint/abci/tests/server"
"github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/abci/version"
"github.com/tendermint/tendermint/crypto/merkle"
)
// client is a global variable so it can be reused by the console
var (
client abcicli.Client
logger log.Logger
)
// flags
var (
// global
flagAddress string
flagAbci string
flagVerbose bool // for the println output
flagLogLevel string // for the logger
// query
flagPath string
flagHeight int
flagProve bool
// counter
flagSerial bool
// kvstore
flagPersist string
)
var RootCmd = &cobra.Command{
Use: "abci-cli",
Short: "the ABCI CLI tool wraps an ABCI client",
Long: "the ABCI CLI tool wraps an ABCI client and is used for testing ABCI servers",
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
switch cmd.Use {
case "counter", "kvstore", "dummy": // for the examples apps, don't pre-run
return nil
case "version": // skip running for version command
return nil
}
if logger == nil {
allowLevel, err := log.AllowLevel(flagLogLevel)
if err != nil {
return err
}
logger = log.NewFilter(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), allowLevel)
}
if client == nil {
var err error
client, err = abcicli.NewClient(flagAddress, flagAbci, false)
if err != nil {
return err
}
client.SetLogger(logger.With("module", "abci-client"))
if err := client.Start(); err != nil {
return err
}
}
return nil
},
}
// Structure for data passed to print response.
type response struct {
// generic abci response
Data []byte
Code uint32
Info string
Log string
Query *queryResponse
}
type queryResponse struct {
Key []byte
Value []byte
Height int64
Proof *merkle.Proof
}
func Execute() error {
addGlobalFlags()
addCommands()
return RootCmd.Execute()
}
func addGlobalFlags() {
RootCmd.PersistentFlags().StringVarP(&flagAddress, "address", "", "tcp://0.0.0.0:26658", "address of application socket")
RootCmd.PersistentFlags().StringVarP(&flagAbci, "abci", "", "socket", "either socket or grpc")
RootCmd.PersistentFlags().BoolVarP(&flagVerbose, "verbose", "v", false, "print the command and results as if it were a console session")
RootCmd.PersistentFlags().StringVarP(&flagLogLevel, "log_level", "", "debug", "set the logger level")
}
func addQueryFlags() {
queryCmd.PersistentFlags().StringVarP(&flagPath, "path", "", "/store", "path to prefix query with")
queryCmd.PersistentFlags().IntVarP(&flagHeight, "height", "", 0, "height to query the blockchain at")
queryCmd.PersistentFlags().BoolVarP(&flagProve, "prove", "", false, "whether or not to return a merkle proof of the query result")
}
func addCounterFlags() {
counterCmd.PersistentFlags().BoolVarP(&flagSerial, "serial", "", false, "enforce incrementing (serial) transactions")
}
func addDummyFlags() {
dummyCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database")
}
func addKVStoreFlags() {
kvstoreCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database")
}
func addCommands() {
RootCmd.AddCommand(batchCmd)
RootCmd.AddCommand(consoleCmd)
RootCmd.AddCommand(echoCmd)
RootCmd.AddCommand(infoCmd)
RootCmd.AddCommand(setOptionCmd)
RootCmd.AddCommand(deliverTxCmd)
RootCmd.AddCommand(checkTxCmd)
RootCmd.AddCommand(commitCmd)
RootCmd.AddCommand(versionCmd)
RootCmd.AddCommand(testCmd)
addQueryFlags()
RootCmd.AddCommand(queryCmd)
// examples
addCounterFlags()
RootCmd.AddCommand(counterCmd)
// deprecated, left for backwards compatibility
addDummyFlags()
RootCmd.AddCommand(dummyCmd)
// replaces dummy, see issue #196
addKVStoreFlags()
RootCmd.AddCommand(kvstoreCmd)
}
var batchCmd = &cobra.Command{
Use: "batch",
Short: "run a batch of abci commands against an application",
Long: `run a batch of abci commands against an application
This command is run by piping in a file containing a series of commands
you'd like to run:
abci-cli batch < example.file
where example.file looks something like:
set_option serial on
check_tx 0x00
check_tx 0xff
deliver_tx 0x00
check_tx 0x00
deliver_tx 0x01
deliver_tx 0x04
info
`,
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdBatch(cmd, args)
},
}
var consoleCmd = &cobra.Command{
Use: "console",
Short: "start an interactive ABCI console for multiple commands",
Long: `start an interactive ABCI console for multiple commands
This command opens an interactive console for running any of the other commands
without opening a new connection each time
`,
Args: cobra.ExactArgs(0),
ValidArgs: []string{"echo", "info", "set_option", "deliver_tx", "check_tx", "commit", "query"},
RunE: func(cmd *cobra.Command, args []string) error {
return cmdConsole(cmd, args)
},
}
var echoCmd = &cobra.Command{
Use: "echo",
Short: "have the application echo a message",
Long: "have the application echo a message",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdEcho(cmd, args)
},
}
var infoCmd = &cobra.Command{
Use: "info",
Short: "get some info about the application",
Long: "get some info about the application",
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdInfo(cmd, args)
},
}
var setOptionCmd = &cobra.Command{
Use: "set_option",
Short: "set an option on the application",
Long: "set an option on the application",
Args: cobra.ExactArgs(2),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdSetOption(cmd, args)
},
}
var deliverTxCmd = &cobra.Command{
Use: "deliver_tx",
Short: "deliver a new transaction to the application",
Long: "deliver a new transaction to the application",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdDeliverTx(cmd, args)
},
}
var checkTxCmd = &cobra.Command{
Use: "check_tx",
Short: "validate a transaction",
Long: "validate a transaction",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdCheckTx(cmd, args)
},
}
var commitCmd = &cobra.Command{
Use: "commit",
Short: "commit the application state and return the Merkle root hash",
Long: "commit the application state and return the Merkle root hash",
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdCommit(cmd, args)
},
}
var versionCmd = &cobra.Command{
Use: "version",
Short: "print ABCI console version",
Long: "print ABCI console version",
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
fmt.Println(version.Version)
return nil
},
}
var queryCmd = &cobra.Command{
Use: "query",
Short: "query the application state",
Long: "query the application state",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdQuery(cmd, args)
},
}
var counterCmd = &cobra.Command{
Use: "counter",
Short: "ABCI demo example",
Long: "ABCI demo example",
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdCounter(cmd, args)
},
}
// deprecated, left for backwards compatibility
var dummyCmd = &cobra.Command{
Use: "dummy",
Deprecated: "use: [abci-cli kvstore] instead",
Short: "ABCI demo example",
Long: "ABCI demo example",
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdKVStore(cmd, args)
},
}
var kvstoreCmd = &cobra.Command{
Use: "kvstore",
Short: "ABCI demo example",
Long: "ABCI demo example",
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdKVStore(cmd, args)
},
}
var testCmd = &cobra.Command{
Use: "test",
Short: "run integration tests",
Long: "run integration tests",
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
return cmdTest(cmd, args)
},
}
// Generates new Args array based off of previous call args to maintain flag persistence
func persistentArgs(line []byte) []string {
// generate the arguments to run from original os.Args
// to maintain flag arguments
args := os.Args
args = args[:len(args)-1] // remove the previous command argument
if len(line) > 0 { // prevents introduction of extra space leading to argument parse errors
args = append(args, strings.Split(string(line), " ")...)
}
return args
}
//--------------------------------------------------------------------------------
func compose(fs []func() error) error {
if len(fs) == 0 {
return nil
} else {
err := fs[0]()
if err == nil {
return compose(fs[1:])
} else {
return err
}
}
}
func cmdTest(cmd *cobra.Command, args []string) error {
return compose(
[]func() error{
func() error { return servertest.InitChain(client) },
func() error { return servertest.SetOption(client, "serial", "on") },
func() error { return servertest.Commit(client, nil) },
func() error { return servertest.DeliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil) },
func() error { return servertest.Commit(client, nil) },
func() error { return servertest.DeliverTx(client, []byte{0x00}, code.CodeTypeOK, nil) },
func() error { return servertest.Commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1}) },
func() error { return servertest.DeliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil) },
func() error { return servertest.DeliverTx(client, []byte{0x01}, code.CodeTypeOK, nil) },
func() error { return servertest.DeliverTx(client, []byte{0x00, 0x02}, code.CodeTypeOK, nil) },
func() error { return servertest.DeliverTx(client, []byte{0x00, 0x03}, code.CodeTypeOK, nil) },
func() error { return servertest.DeliverTx(client, []byte{0x00, 0x00, 0x04}, code.CodeTypeOK, nil) },
func() error {
return servertest.DeliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
},
func() error { return servertest.Commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5}) },
})
}
func cmdBatch(cmd *cobra.Command, args []string) error {
bufReader := bufio.NewReader(os.Stdin)
for {
line, more, err := bufReader.ReadLine()
if more {
return errors.New("Input line is too long")
} else if err == io.EOF {
break
} else if len(line) == 0 {
continue
} else if err != nil {
return err
}
cmdArgs := persistentArgs(line)
if err := muxOnCommands(cmd, cmdArgs); err != nil {
return err
}
fmt.Println()
}
return nil
}
func cmdConsole(cmd *cobra.Command, args []string) error {
for {
fmt.Printf("> ")
bufReader := bufio.NewReader(os.Stdin)
line, more, err := bufReader.ReadLine()
if more {
return errors.New("Input is too long")
} else if err != nil {
return err
}
pArgs := persistentArgs(line)
if err := muxOnCommands(cmd, pArgs); err != nil {
return err
}
}
return nil
}
func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
if len(pArgs) < 2 {
return errors.New("expecting persistent args of the form: abci-cli [command] <...>")
}
// TODO: this parsing is fragile
args := []string{}
for i := 0; i < len(pArgs); i++ {
arg := pArgs[i]
// check for flags
if strings.HasPrefix(arg, "-") {
// if it has an equal, we can just skip
if strings.Contains(arg, "=") {
continue
}
// if its a boolean, we can just skip
_, err := cmd.Flags().GetBool(strings.TrimLeft(arg, "-"))
if err == nil {
continue
}
// otherwise, we need to skip the next one too
i += 1
continue
}
// append the actual arg
args = append(args, arg)
}
var subCommand string
var actualArgs []string
if len(args) > 1 {
subCommand = args[1]
}
if len(args) > 2 {
actualArgs = args[2:]
}
cmd.Use = subCommand // for later print statements ...
switch strings.ToLower(subCommand) {
case "check_tx":
return cmdCheckTx(cmd, actualArgs)
case "commit":
return cmdCommit(cmd, actualArgs)
case "deliver_tx":
return cmdDeliverTx(cmd, actualArgs)
case "echo":
return cmdEcho(cmd, actualArgs)
case "info":
return cmdInfo(cmd, actualArgs)
case "query":
return cmdQuery(cmd, actualArgs)
case "set_option":
return cmdSetOption(cmd, actualArgs)
default:
return cmdUnimplemented(cmd, pArgs)
}
}
func cmdUnimplemented(cmd *cobra.Command, args []string) error {
msg := "unimplemented command"
if len(args) > 0 {
msg += fmt.Sprintf(" args: [%s]", strings.Join(args, " "))
}
printResponse(cmd, args, response{
Code: codeBad,
Log: msg,
})
fmt.Println("Available commands:")
fmt.Printf("%s: %s\n", echoCmd.Use, echoCmd.Short)
fmt.Printf("%s: %s\n", infoCmd.Use, infoCmd.Short)
fmt.Printf("%s: %s\n", checkTxCmd.Use, checkTxCmd.Short)
fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short)
fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short)
fmt.Printf("%s: %s\n", commitCmd.Use, commitCmd.Short)
fmt.Printf("%s: %s\n", setOptionCmd.Use, setOptionCmd.Short)
fmt.Println("Use \"[command] --help\" for more information about a command.")
return nil
}
// Have the application echo a message
func cmdEcho(cmd *cobra.Command, args []string) error {
msg := ""
if len(args) > 0 {
msg = args[0]
}
res, err := client.EchoSync(msg)
if err != nil {
return err
}
printResponse(cmd, args, response{
Data: []byte(res.Message),
})
return nil
}
// Get some info from the application
func cmdInfo(cmd *cobra.Command, args []string) error {
var version string
if len(args) == 1 {
version = args[0]
}
res, err := client.InfoSync(types.RequestInfo{Version: version})
if err != nil {
return err
}
printResponse(cmd, args, response{
Data: []byte(res.Data),
})
return nil
}
const codeBad uint32 = 10
// Set an option on the application
func cmdSetOption(cmd *cobra.Command, args []string) error {
if len(args) < 2 {
printResponse(cmd, args, response{
Code: codeBad,
Log: "want at least arguments of the form: <key> <value>",
})
return nil
}
key, val := args[0], args[1]
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: val})
if err != nil {
return err
}
printResponse(cmd, args, response{Log: "OK (SetOption doesn't return anything.)"}) // NOTE: Nothing to show...
return nil
}
// Append a new tx to application
func cmdDeliverTx(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
printResponse(cmd, args, response{
Code: codeBad,
Log: "want the tx",
})
return nil
}
txBytes, err := stringOrHexToBytes(args[0])
if err != nil {
return err
}
res, err := client.DeliverTxSync(txBytes)
if err != nil {
return err
}
printResponse(cmd, args, response{
Code: res.Code,
Data: res.Data,
Info: res.Info,
Log: res.Log,
})
return nil
}
// Validate a tx
func cmdCheckTx(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
printResponse(cmd, args, response{
Code: codeBad,
Info: "want the tx",
})
return nil
}
txBytes, err := stringOrHexToBytes(args[0])
if err != nil {
return err
}
res, err := client.CheckTxSync(txBytes)
if err != nil {
return err
}
printResponse(cmd, args, response{
Code: res.Code,
Data: res.Data,
Info: res.Info,
Log: res.Log,
})
return nil
}
// Get application Merkle root hash
func cmdCommit(cmd *cobra.Command, args []string) error {
res, err := client.CommitSync()
if err != nil {
return err
}
printResponse(cmd, args, response{
Data: res.Data,
})
return nil
}
// Query application state
func cmdQuery(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
printResponse(cmd, args, response{
Code: codeBad,
Info: "want the query",
Log: "",
})
return nil
}
queryBytes, err := stringOrHexToBytes(args[0])
if err != nil {
return err
}
resQuery, err := client.QuerySync(types.RequestQuery{
Data: queryBytes,
Path: flagPath,
Height: int64(flagHeight),
Prove: flagProve,
})
if err != nil {
return err
}
printResponse(cmd, args, response{
Code: resQuery.Code,
Info: resQuery.Info,
Log: resQuery.Log,
Query: &queryResponse{
Key: resQuery.Key,
Value: resQuery.Value,
Height: resQuery.Height,
Proof: resQuery.Proof,
},
})
return nil
}
func cmdCounter(cmd *cobra.Command, args []string) error {
app := counter.NewCounterApplication(flagSerial)
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
// Start the listener
srv, err := server.NewServer(flagAddress, flagAbci, app)
if err != nil {
return err
}
srv.SetLogger(logger.With("module", "abci-server"))
if err := srv.Start(); err != nil {
return err
}
// Wait forever
cmn.TrapSignal(func() {
// Cleanup
srv.Stop()
})
return nil
}
func cmdKVStore(cmd *cobra.Command, args []string) error {
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
// Create the application - in memory or persisted to disk
var app types.Application
if flagPersist == "" {
app = kvstore.NewKVStoreApplication()
} else {
app = kvstore.NewPersistentKVStoreApplication(flagPersist)
app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore"))
}
// Start the listener
srv, err := server.NewServer(flagAddress, flagAbci, app)
if err != nil {
return err
}
srv.SetLogger(logger.With("module", "abci-server"))
if err := srv.Start(); err != nil {
return err
}
// Wait forever
cmn.TrapSignal(func() {
// Cleanup
srv.Stop()
})
return nil
}
//--------------------------------------------------------------------------------
func printResponse(cmd *cobra.Command, args []string, rsp response) {
if flagVerbose {
fmt.Println(">", cmd.Use, strings.Join(args, " "))
}
// Always print the status code.
if rsp.Code == types.CodeTypeOK {
fmt.Printf("-> code: OK\n")
} else {
fmt.Printf("-> code: %d\n", rsp.Code)
}
if len(rsp.Data) != 0 {
// Do no print this line when using the commit command
// because the string comes out as gibberish
if cmd.Use != "commit" {
fmt.Printf("-> data: %s\n", rsp.Data)
}
fmt.Printf("-> data.hex: 0x%X\n", rsp.Data)
}
if rsp.Log != "" {
fmt.Printf("-> log: %s\n", rsp.Log)
}
if rsp.Query != nil {
fmt.Printf("-> height: %d\n", rsp.Query.Height)
if rsp.Query.Key != nil {
fmt.Printf("-> key: %s\n", rsp.Query.Key)
fmt.Printf("-> key.hex: %X\n", rsp.Query.Key)
}
if rsp.Query.Value != nil {
fmt.Printf("-> value: %s\n", rsp.Query.Value)
fmt.Printf("-> value.hex: %X\n", rsp.Query.Value)
}
if rsp.Query.Proof != nil {
fmt.Printf("-> proof: %#v\n", rsp.Query.Proof)
}
}
}
// NOTE: s is interpreted as a string unless prefixed with 0x
func stringOrHexToBytes(s string) ([]byte, error) {
if len(s) > 2 && strings.ToLower(s[:2]) == "0x" {
b, err := hex.DecodeString(s[2:])
if err != nil {
err = fmt.Errorf("Error decoding hex argument: %s", err.Error())
return nil, err
}
return b, nil
}
if !strings.HasPrefix(s, "\"") || !strings.HasSuffix(s, "\"") {
err := fmt.Errorf("Invalid string arg: \"%s\". Must be quoted or a \"0x\"-prefixed hex string", s)
return nil, err
}
return []byte(s[1 : len(s)-1]), nil
}

View File

@@ -1,14 +0,0 @@
package main
import (
"fmt"
"os"
)
func main() {
err := Execute()
if err != nil {
fmt.Print(err)
os.Exit(1)
}
}

View File

@@ -1,10 +0,0 @@
package code
// Return codes for the examples
const (
CodeTypeOK uint32 = 0
CodeTypeEncodingError uint32 = 1
CodeTypeBadNonce uint32 = 2
CodeTypeUnauthorized uint32 = 3
CodeTypeUnknownError uint32 = 4
)

View File

@@ -1,103 +0,0 @@
package counter
import (
"encoding/binary"
"fmt"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
)
type CounterApplication struct {
types.BaseApplication
hashCount int
txCount int
serial bool
}
func NewCounterApplication(serial bool) *CounterApplication {
return &CounterApplication{serial: serial}
}
func (app *CounterApplication) Info(req types.RequestInfo) types.ResponseInfo {
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
}
func (app *CounterApplication) SetOption(req types.RequestSetOption) types.ResponseSetOption {
key, value := req.Key, req.Value
if key == "serial" && value == "on" {
app.serial = true
} else {
/*
TODO Panic and have the ABCI server pass an exception.
The client can call SetOptionSync() and get an `error`.
return types.ResponseSetOption{
Error: fmt.Sprintf("Unknown key (%s) or value (%s)", key, value),
}
*/
return types.ResponseSetOption{}
}
return types.ResponseSetOption{}
}
func (app *CounterApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
if app.serial {
if len(tx) > 8 {
return types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))}
}
tx8 := make([]byte, 8)
copy(tx8[len(tx8)-len(tx):], tx)
txValue := binary.BigEndian.Uint64(tx8)
if txValue != uint64(app.txCount) {
return types.ResponseDeliverTx{
Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
}
}
app.txCount++
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
}
func (app *CounterApplication) CheckTx(tx []byte) types.ResponseCheckTx {
if app.serial {
if len(tx) > 8 {
return types.ResponseCheckTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))}
}
tx8 := make([]byte, 8)
copy(tx8[len(tx8)-len(tx):], tx)
txValue := binary.BigEndian.Uint64(tx8)
if txValue < uint64(app.txCount) {
return types.ResponseCheckTx{
Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue)}
}
}
return types.ResponseCheckTx{Code: code.CodeTypeOK}
}
func (app *CounterApplication) Commit() (resp types.ResponseCommit) {
app.hashCount++
if app.txCount == 0 {
return types.ResponseCommit{}
}
hash := make([]byte, 8)
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
return types.ResponseCommit{Data: hash}
}
func (app *CounterApplication) Query(reqQuery types.RequestQuery) types.ResponseQuery {
switch reqQuery.Path {
case "hash":
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))}
case "tx":
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.txCount))}
default:
return types.ResponseQuery{Log: fmt.Sprintf("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)}
}
}

View File

@@ -1,3 +0,0 @@
package example
// so the go tool doesn't return errors about no buildable go files ...

View File

@@ -1,156 +0,0 @@
package example
import (
"fmt"
"net"
"reflect"
"testing"
"time"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"golang.org/x/net/context"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/log"
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/example/kvstore"
abciserver "github.com/tendermint/tendermint/abci/server"
"github.com/tendermint/tendermint/abci/types"
)
func TestKVStore(t *testing.T) {
fmt.Println("### Testing KVStore")
testStream(t, kvstore.NewKVStoreApplication())
}
func TestBaseApp(t *testing.T) {
fmt.Println("### Testing BaseApp")
testStream(t, types.NewBaseApplication())
}
func TestGRPC(t *testing.T) {
fmt.Println("### Testing GRPC")
testGRPCSync(t, types.NewGRPCApplication(types.NewBaseApplication()))
}
func testStream(t *testing.T, app types.Application) {
numDeliverTxs := 20000
// Start the listener
server := abciserver.NewSocketServer("unix://test.sock", app)
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
if err := server.Start(); err != nil {
require.NoError(t, err, "Error starting socket server")
}
defer server.Stop()
// Connect to the socket
client := abcicli.NewSocketClient("unix://test.sock", false)
client.SetLogger(log.TestingLogger().With("module", "abci-client"))
if err := client.Start(); err != nil {
t.Fatalf("Error starting socket client: %v", err.Error())
}
defer client.Stop()
done := make(chan struct{})
counter := 0
client.SetResponseCallback(func(req *types.Request, res *types.Response) {
// Process response
switch r := res.Value.(type) {
case *types.Response_DeliverTx:
counter++
if r.DeliverTx.Code != code.CodeTypeOK {
t.Error("DeliverTx failed with ret_code", r.DeliverTx.Code)
}
if counter > numDeliverTxs {
t.Fatalf("Too many DeliverTx responses. Got %d, expected %d", counter, numDeliverTxs)
}
if counter == numDeliverTxs {
go func() {
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
close(done)
}()
return
}
case *types.Response_Flush:
// ignore
default:
t.Error("Unexpected response type", reflect.TypeOf(res.Value))
}
})
// Write requests
for counter := 0; counter < numDeliverTxs; counter++ {
// Send request
reqRes := client.DeliverTxAsync([]byte("test"))
_ = reqRes
// check err ?
// Sometimes send flush messages
if counter%123 == 0 {
client.FlushAsync()
// check err ?
}
}
// Send final flush message
client.FlushAsync()
<-done
}
//-------------------------
// test grpc
func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) {
return cmn.Connect(addr)
}
func testGRPCSync(t *testing.T, app *types.GRPCApplication) {
numDeliverTxs := 2000
// Start the listener
server := abciserver.NewGRPCServer("unix://test.sock", app)
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
if err := server.Start(); err != nil {
t.Fatalf("Error starting GRPC server: %v", err.Error())
}
defer server.Stop()
// Connect to the socket
conn, err := grpc.Dial("unix://test.sock", grpc.WithInsecure(), grpc.WithDialer(dialerFunc))
if err != nil {
t.Fatalf("Error dialing GRPC server: %v", err.Error())
}
defer conn.Close()
client := types.NewABCIApplicationClient(conn)
// Write requests
for counter := 0; counter < numDeliverTxs; counter++ {
// Send request
response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{Tx: []byte("test")})
if err != nil {
t.Fatalf("Error in GRPC DeliverTx: %v", err.Error())
}
counter++
if response.Code != code.CodeTypeOK {
t.Error("DeliverTx failed with ret_code", response.Code)
}
if counter > numDeliverTxs {
t.Fatal("Too many DeliverTx responses")
}
t.Log("response", counter)
if counter == numDeliverTxs {
go func() {
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
}()
}
}
}

View File

@@ -1 +0,0 @@
node_modules

View File

@@ -1 +0,0 @@
This example has been moved here: https://github.com/tendermint/js-abci/tree/master/example

View File

@@ -1,31 +0,0 @@
# KVStore
There are two app's here: the KVStoreApplication and the PersistentKVStoreApplication.
## KVStoreApplication
The KVStoreApplication is a simple merkle key-value store.
Transactions of the form `key=value` are stored as key-value pairs in the tree.
Transactions without an `=` sign set the value to the key.
The app has no replay protection (other than what the mempool provides).
## PersistentKVStoreApplication
The PersistentKVStoreApplication wraps the KVStoreApplication
and provides two additional features:
1) persistence of state across app restarts (using Tendermint's ABCI-Handshake mechanism)
2) validator set changes
The state is persisted in leveldb along with the last block committed,
and the Handshake allows any necessary blocks to be replayed.
Validator set changes are effected using the following transaction format:
```
val:pubkey1/power1,addr2/power2,addr3/power3"
```
where `power1` is the new voting power for the validator with `pubkey1` (possibly a new one).
There is no sybil protection against new validators joining.
Validators can be removed by setting their power to `0`.

View File

@@ -1,36 +0,0 @@
package kvstore
import (
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
// RandVal creates one random validator, with a key derived
// from the input value
func RandVal(i int) types.ValidatorUpdate {
pubkey := cmn.RandBytes(32)
power := cmn.RandUint16() + 1
v := types.Ed25519ValidatorUpdate(pubkey, int64(power))
return v
}
// RandVals returns a list of cnt validators for initializing
// the application. Note that the keys are deterministically
// derived from the index in the array, while the power is
// random (Change this if not desired)
func RandVals(cnt int) []types.ValidatorUpdate {
res := make([]types.ValidatorUpdate, cnt)
for i := 0; i < cnt; i++ {
res[i] = RandVal(i)
}
return res
}
// InitKVStore initializes the kvstore app with some data,
// which allows tests to pass and is fine as long as you
// don't make any tx that modify the validator state
func InitKVStore(app *PersistentKVStoreApplication) {
app.InitChain(types.RequestInitChain{
Validators: RandVals(1),
})
}

View File

@@ -1,134 +0,0 @@
package kvstore
import (
"bytes"
"encoding/binary"
"encoding/json"
"fmt"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/version"
)
var (
stateKey = []byte("stateKey")
kvPairPrefixKey = []byte("kvPairKey:")
ProtocolVersion version.Protocol = 0x1
)
type State struct {
db dbm.DB
Size int64 `json:"size"`
Height int64 `json:"height"`
AppHash []byte `json:"app_hash"`
}
func loadState(db dbm.DB) State {
stateBytes := db.Get(stateKey)
var state State
if len(stateBytes) != 0 {
err := json.Unmarshal(stateBytes, &state)
if err != nil {
panic(err)
}
}
state.db = db
return state
}
func saveState(state State) {
stateBytes, err := json.Marshal(state)
if err != nil {
panic(err)
}
state.db.Set(stateKey, stateBytes)
}
func prefixKey(key []byte) []byte {
return append(kvPairPrefixKey, key...)
}
//---------------------------------------------------
var _ types.Application = (*KVStoreApplication)(nil)
type KVStoreApplication struct {
types.BaseApplication
state State
}
func NewKVStoreApplication() *KVStoreApplication {
state := loadState(dbm.NewMemDB())
return &KVStoreApplication{state: state}
}
func (app *KVStoreApplication) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
return types.ResponseInfo{
Data: fmt.Sprintf("{\"size\":%v}", app.state.Size),
Version: version.ABCIVersion,
AppVersion: ProtocolVersion.Uint64(),
}
}
// tx is either "key=value" or just arbitrary bytes
func (app *KVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
var key, value []byte
parts := bytes.Split(tx, []byte("="))
if len(parts) == 2 {
key, value = parts[0], parts[1]
} else {
key, value = tx, tx
}
app.state.db.Set(prefixKey(key), value)
app.state.Size += 1
tags := []cmn.KVPair{
{Key: []byte("app.creator"), Value: []byte("Cosmoshi Netowoko")},
{Key: []byte("app.key"), Value: key},
}
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags}
}
func (app *KVStoreApplication) CheckTx(tx []byte) types.ResponseCheckTx {
return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}
}
func (app *KVStoreApplication) Commit() types.ResponseCommit {
// Using a memdb - just return the big endian size of the db
appHash := make([]byte, 8)
binary.PutVarint(appHash, app.state.Size)
app.state.AppHash = appHash
app.state.Height += 1
saveState(app.state)
return types.ResponseCommit{Data: appHash}
}
func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
if reqQuery.Prove {
value := app.state.db.Get(prefixKey(reqQuery.Data))
resQuery.Index = -1 // TODO make Proof return index
resQuery.Key = reqQuery.Data
resQuery.Value = value
if value != nil {
resQuery.Log = "exists"
} else {
resQuery.Log = "does not exist"
}
return
} else {
resQuery.Key = reqQuery.Data
value := app.state.db.Get(prefixKey(reqQuery.Data))
resQuery.Value = value
if value != nil {
resQuery.Log = "exists"
} else {
resQuery.Log = "does not exist"
}
return
}
}

View File

@@ -1,311 +0,0 @@
package kvstore
import (
"bytes"
"fmt"
"io/ioutil"
"sort"
"testing"
"github.com/stretchr/testify/require"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/log"
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/code"
abciserver "github.com/tendermint/tendermint/abci/server"
"github.com/tendermint/tendermint/abci/types"
)
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
ar := app.DeliverTx(tx)
require.False(t, ar.IsErr(), ar)
// repeating tx doesn't raise error
ar = app.DeliverTx(tx)
require.False(t, ar.IsErr(), ar)
// make sure query is fine
resQuery := app.Query(types.RequestQuery{
Path: "/store",
Data: []byte(key),
})
require.Equal(t, code.CodeTypeOK, resQuery.Code)
require.Equal(t, value, string(resQuery.Value))
// make sure proof is fine
resQuery = app.Query(types.RequestQuery{
Path: "/store",
Data: []byte(key),
Prove: true,
})
require.EqualValues(t, code.CodeTypeOK, resQuery.Code)
require.Equal(t, value, string(resQuery.Value))
}
func TestKVStoreKV(t *testing.T) {
kvstore := NewKVStoreApplication()
key := "abc"
value := key
tx := []byte(key)
testKVStore(t, kvstore, tx, key, value)
value = "def"
tx = []byte(key + "=" + value)
testKVStore(t, kvstore, tx, key, value)
}
func TestPersistentKVStoreKV(t *testing.T) {
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
if err != nil {
t.Fatal(err)
}
kvstore := NewPersistentKVStoreApplication(dir)
key := "abc"
value := key
tx := []byte(key)
testKVStore(t, kvstore, tx, key, value)
value = "def"
tx = []byte(key + "=" + value)
testKVStore(t, kvstore, tx, key, value)
}
func TestPersistentKVStoreInfo(t *testing.T) {
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
if err != nil {
t.Fatal(err)
}
kvstore := NewPersistentKVStoreApplication(dir)
InitKVStore(kvstore)
height := int64(0)
resInfo := kvstore.Info(types.RequestInfo{})
if resInfo.LastBlockHeight != height {
t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight)
}
// make and apply block
height = int64(1)
hash := []byte("foo")
header := types.Header{
Height: int64(height),
}
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
kvstore.Commit()
resInfo = kvstore.Info(types.RequestInfo{})
if resInfo.LastBlockHeight != height {
t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight)
}
}
// add a validator, remove a validator, update a validator
func TestValUpdates(t *testing.T) {
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
if err != nil {
t.Fatal(err)
}
kvstore := NewPersistentKVStoreApplication(dir)
// init with some validators
total := 10
nInit := 5
vals := RandVals(total)
// iniitalize with the first nInit
kvstore.InitChain(types.RequestInitChain{
Validators: vals[:nInit],
})
vals1, vals2 := vals[:nInit], kvstore.Validators()
valsEqual(t, vals1, vals2)
var v1, v2, v3 types.ValidatorUpdate
// add some validators
v1, v2 = vals[nInit], vals[nInit+1]
diff := []types.ValidatorUpdate{v1, v2}
tx1 := MakeValSetChangeTx(v1.PubKey, v1.Power)
tx2 := MakeValSetChangeTx(v2.PubKey, v2.Power)
makeApplyBlock(t, kvstore, 1, diff, tx1, tx2)
vals1, vals2 = vals[:nInit+2], kvstore.Validators()
valsEqual(t, vals1, vals2)
// remove some validators
v1, v2, v3 = vals[nInit-2], vals[nInit-1], vals[nInit]
v1.Power = 0
v2.Power = 0
v3.Power = 0
diff = []types.ValidatorUpdate{v1, v2, v3}
tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power)
tx2 = MakeValSetChangeTx(v2.PubKey, v2.Power)
tx3 := MakeValSetChangeTx(v3.PubKey, v3.Power)
makeApplyBlock(t, kvstore, 2, diff, tx1, tx2, tx3)
vals1 = append(vals[:nInit-2], vals[nInit+1])
vals2 = kvstore.Validators()
valsEqual(t, vals1, vals2)
// update some validators
v1 = vals[0]
if v1.Power == 5 {
v1.Power = 6
} else {
v1.Power = 5
}
diff = []types.ValidatorUpdate{v1}
tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power)
makeApplyBlock(t, kvstore, 3, diff, tx1)
vals1 = append([]types.ValidatorUpdate{v1}, vals1[1:]...)
vals2 = kvstore.Validators()
valsEqual(t, vals1, vals2)
}
func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff []types.ValidatorUpdate, txs ...[]byte) {
// make and apply block
height := int64(heightInt)
hash := []byte("foo")
header := types.Header{
Height: height,
}
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
for _, tx := range txs {
if r := kvstore.DeliverTx(tx); r.IsErr() {
t.Fatal(r)
}
}
resEndBlock := kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
kvstore.Commit()
valsEqual(t, diff, resEndBlock.ValidatorUpdates)
}
// order doesn't matter
func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) {
if len(vals1) != len(vals2) {
t.Fatalf("vals dont match in len. got %d, expected %d", len(vals2), len(vals1))
}
sort.Sort(types.ValidatorUpdates(vals1))
sort.Sort(types.ValidatorUpdates(vals2))
for i, v1 := range vals1 {
v2 := vals2[i]
if !bytes.Equal(v1.PubKey.Data, v2.PubKey.Data) ||
v1.Power != v2.Power {
t.Fatalf("vals dont match at index %d. got %X/%d , expected %X/%d", i, v2.PubKey, v2.Power, v1.PubKey, v1.Power)
}
}
}
func makeSocketClientServer(app types.Application, name string) (abcicli.Client, cmn.Service, error) {
// Start the listener
socket := fmt.Sprintf("unix://%s.sock", name)
logger := log.TestingLogger()
server := abciserver.NewSocketServer(socket, app)
server.SetLogger(logger.With("module", "abci-server"))
if err := server.Start(); err != nil {
return nil, nil, err
}
// Connect to the socket
client := abcicli.NewSocketClient(socket, false)
client.SetLogger(logger.With("module", "abci-client"))
if err := client.Start(); err != nil {
server.Stop()
return nil, nil, err
}
return client, server, nil
}
func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, cmn.Service, error) {
// Start the listener
socket := fmt.Sprintf("unix://%s.sock", name)
logger := log.TestingLogger()
gapp := types.NewGRPCApplication(app)
server := abciserver.NewGRPCServer(socket, gapp)
server.SetLogger(logger.With("module", "abci-server"))
if err := server.Start(); err != nil {
return nil, nil, err
}
client := abcicli.NewGRPCClient(socket, true)
client.SetLogger(logger.With("module", "abci-client"))
if err := client.Start(); err != nil {
server.Stop()
return nil, nil, err
}
return client, server, nil
}
func TestClientServer(t *testing.T) {
// set up socket app
kvstore := NewKVStoreApplication()
client, server, err := makeSocketClientServer(kvstore, "kvstore-socket")
require.Nil(t, err)
defer server.Stop()
defer client.Stop()
runClientTests(t, client)
// set up grpc app
kvstore = NewKVStoreApplication()
gclient, gserver, err := makeGRPCClientServer(kvstore, "kvstore-grpc")
require.Nil(t, err)
defer gserver.Stop()
defer gclient.Stop()
runClientTests(t, gclient)
}
func runClientTests(t *testing.T, client abcicli.Client) {
// run some tests....
key := "abc"
value := key
tx := []byte(key)
testClient(t, client, tx, key, value)
value = "def"
tx = []byte(key + "=" + value)
testClient(t, client, tx, key, value)
}
func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) {
ar, err := app.DeliverTxSync(tx)
require.NoError(t, err)
require.False(t, ar.IsErr(), ar)
// repeating tx doesn't raise error
ar, err = app.DeliverTxSync(tx)
require.NoError(t, err)
require.False(t, ar.IsErr(), ar)
// make sure query is fine
resQuery, err := app.QuerySync(types.RequestQuery{
Path: "/store",
Data: []byte(key),
})
require.Nil(t, err)
require.Equal(t, code.CodeTypeOK, resQuery.Code)
require.Equal(t, value, string(resQuery.Value))
// make sure proof is fine
resQuery, err = app.QuerySync(types.RequestQuery{
Path: "/store",
Data: []byte(key),
Prove: true,
})
require.Nil(t, err)
require.Equal(t, code.CodeTypeOK, resQuery.Code)
require.Equal(t, value, string(resQuery.Value))
}

View File

@@ -1,199 +0,0 @@
package kvstore
import (
"bytes"
"encoding/hex"
"fmt"
"strconv"
"strings"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
)
const (
ValidatorSetChangePrefix string = "val:"
)
//-----------------------------------------
var _ types.Application = (*PersistentKVStoreApplication)(nil)
type PersistentKVStoreApplication struct {
app *KVStoreApplication
// validator set
ValUpdates []types.ValidatorUpdate
logger log.Logger
}
func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication {
name := "kvstore"
db, err := dbm.NewGoLevelDB(name, dbDir)
if err != nil {
panic(err)
}
state := loadState(db)
return &PersistentKVStoreApplication{
app: &KVStoreApplication{state: state},
logger: log.NewNopLogger(),
}
}
func (app *PersistentKVStoreApplication) SetLogger(l log.Logger) {
app.logger = l
}
func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.ResponseInfo {
res := app.app.Info(req)
res.LastBlockHeight = app.app.state.Height
res.LastBlockAppHash = app.app.state.AppHash
return res
}
func (app *PersistentKVStoreApplication) SetOption(req types.RequestSetOption) types.ResponseSetOption {
return app.app.SetOption(req)
}
// tx is either "val:pubkey/power" or "key=value" or just arbitrary bytes
func (app *PersistentKVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
// if it starts with "val:", update the validator set
// format is "val:pubkey/power"
if isValidatorTx(tx) {
// update validators in the merkle tree
// and in app.ValUpdates
return app.execValidatorTx(tx)
}
// otherwise, update the key-value store
return app.app.DeliverTx(tx)
}
func (app *PersistentKVStoreApplication) CheckTx(tx []byte) types.ResponseCheckTx {
return app.app.CheckTx(tx)
}
// Commit will panic if InitChain was not called
func (app *PersistentKVStoreApplication) Commit() types.ResponseCommit {
return app.app.Commit()
}
func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) types.ResponseQuery {
return app.app.Query(reqQuery)
}
// Save the validators in the merkle tree
func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) types.ResponseInitChain {
for _, v := range req.Validators {
r := app.updateValidator(v)
if r.IsErr() {
app.logger.Error("Error updating validators", "r", r)
}
}
return types.ResponseInitChain{}
}
// Track the block hash and header information
func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
// reset valset changes
app.ValUpdates = make([]types.ValidatorUpdate, 0)
return types.ResponseBeginBlock{}
}
// Update the validator set
func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock {
return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates}
}
//---------------------------------------------
// update validators
func (app *PersistentKVStoreApplication) Validators() (validators []types.ValidatorUpdate) {
itr := app.app.state.db.Iterator(nil, nil)
for ; itr.Valid(); itr.Next() {
if isValidatorTx(itr.Key()) {
validator := new(types.ValidatorUpdate)
err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator)
if err != nil {
panic(err)
}
validators = append(validators, *validator)
}
}
return
}
func MakeValSetChangeTx(pubkey types.PubKey, power int64) []byte {
return []byte(fmt.Sprintf("val:%X/%d", pubkey.Data, power))
}
func isValidatorTx(tx []byte) bool {
return strings.HasPrefix(string(tx), ValidatorSetChangePrefix)
}
// format is "val:pubkey/power"
// pubkey is raw 32-byte ed25519 key
func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.ResponseDeliverTx {
tx = tx[len(ValidatorSetChangePrefix):]
//get the pubkey and power
pubKeyAndPower := strings.Split(string(tx), "/")
if len(pubKeyAndPower) != 2 {
return types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Expected 'pubkey/power'. Got %v", pubKeyAndPower)}
}
pubkeyS, powerS := pubKeyAndPower[0], pubKeyAndPower[1]
// decode the pubkey
pubkey, err := hex.DecodeString(pubkeyS)
if err != nil {
return types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Pubkey (%s) is invalid hex", pubkeyS)}
}
// decode the power
power, err := strconv.ParseInt(powerS, 10, 64)
if err != nil {
return types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Power (%s) is not an int", powerS)}
}
// update
return app.updateValidator(types.Ed25519ValidatorUpdate(pubkey, int64(power)))
}
// add, update, or remove a validator
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx {
key := []byte("val:" + string(v.PubKey.Data))
if v.Power == 0 {
// remove validator
if !app.app.state.db.Has(key) {
return types.ResponseDeliverTx{
Code: code.CodeTypeUnauthorized,
Log: fmt.Sprintf("Cannot remove non-existent validator %X", key)}
}
app.app.state.db.Delete(key)
} else {
// add or update validator
value := bytes.NewBuffer(make([]byte, 0))
if err := types.WriteMessage(&v, value); err != nil {
return types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Error encoding validator: %v", err)}
}
app.app.state.db.Set(key, value.Bytes())
}
// we only update the changes array if we successfully updated the tree
app.ValUpdates = append(app.ValUpdates, v)
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
}

View File

@@ -1,50 +0,0 @@
from wire import decode_string
# map type_byte to message name
message_types = {
0x01: "echo",
0x02: "flush",
0x03: "info",
0x04: "set_option",
0x21: "deliver_tx",
0x22: "check_tx",
0x23: "commit",
0x24: "add_listener",
0x25: "rm_listener",
}
# return the decoded arguments of abci messages
class RequestDecoder():
def __init__(self, reader):
self.reader = reader
def echo(self):
return decode_string(self.reader)
def flush(self):
return
def info(self):
return
def set_option(self):
return decode_string(self.reader), decode_string(self.reader)
def deliver_tx(self):
return decode_string(self.reader)
def check_tx(self):
return decode_string(self.reader)
def commit(self):
return
def add_listener(self):
# TODO
return
def rm_listener(self):
# TODO
return

View File

@@ -1,56 +0,0 @@
# Simple read() method around a bytearray
class BytesBuffer():
def __init__(self, b):
self.buf = b
self.readCount = 0
def count(self):
return self.readCount
def reset_count(self):
self.readCount = 0
def size(self):
return len(self.buf)
def peek(self):
return self.buf[0]
def write(self, b):
# b should be castable to byte array
self.buf += bytearray(b)
def read(self, n):
if len(self.buf) < n:
print "reader err: buf less than n"
# TODO: exception
return
self.readCount += n
r = self.buf[:n]
self.buf = self.buf[n:]
return r
# Buffer bytes off a tcp connection and read them off in chunks
class ConnReader():
def __init__(self, conn):
self.conn = conn
self.buf = bytearray()
# blocking
def read(self, n):
while n > len(self.buf):
moreBuf = self.conn.recv(1024)
if not moreBuf:
raise IOError("dead connection")
self.buf = self.buf + bytearray(moreBuf)
r = self.buf[:n]
self.buf = self.buf[n:]
return r

View File

@@ -1,202 +0,0 @@
import socket
import select
import sys
from wire import decode_varint, encode
from reader import BytesBuffer
from msg import RequestDecoder, message_types
# hold the asyncronous state of a connection
# ie. we may not get enough bytes on one read to decode the message
class Connection():
def __init__(self, fd, app):
self.fd = fd
self.app = app
self.recBuf = BytesBuffer(bytearray())
self.resBuf = BytesBuffer(bytearray())
self.msgLength = 0
self.decoder = RequestDecoder(self.recBuf)
self.inProgress = False # are we in the middle of a message
def recv(this):
data = this.fd.recv(1024)
if not data: # what about len(data) == 0
raise IOError("dead connection")
this.recBuf.write(data)
# ABCI server responds to messges by calling methods on the app
class ABCIServer():
def __init__(self, app, port=5410):
self.app = app
# map conn file descriptors to (app, reqBuf, resBuf, msgDecoder)
self.appMap = {}
self.port = port
self.listen_backlog = 10
self.listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listener.setblocking(0)
self.listener.bind(('', port))
self.listener.listen(self.listen_backlog)
self.shutdown = False
self.read_list = [self.listener]
self.write_list = []
def handle_new_connection(self, r):
new_fd, new_addr = r.accept()
new_fd.setblocking(0) # non-blocking
self.read_list.append(new_fd)
self.write_list.append(new_fd)
print 'new connection to', new_addr
self.appMap[new_fd] = Connection(new_fd, self.app)
def handle_conn_closed(self, r):
self.read_list.remove(r)
self.write_list.remove(r)
r.close()
print "connection closed"
def handle_recv(self, r):
# app, recBuf, resBuf, conn
conn = self.appMap[r]
while True:
try:
print "recv loop"
# check if we need more data first
if conn.inProgress:
if (conn.msgLength == 0 or conn.recBuf.size() < conn.msgLength):
conn.recv()
else:
if conn.recBuf.size() == 0:
conn.recv()
conn.inProgress = True
# see if we have enough to get the message length
if conn.msgLength == 0:
ll = conn.recBuf.peek()
if conn.recBuf.size() < 1 + ll:
# we don't have enough bytes to read the length yet
return
print "decoding msg length"
conn.msgLength = decode_varint(conn.recBuf)
# see if we have enough to decode the message
if conn.recBuf.size() < conn.msgLength:
return
# now we can decode the message
# first read the request type and get the particular msg
# decoder
typeByte = conn.recBuf.read(1)
typeByte = int(typeByte[0])
resTypeByte = typeByte + 0x10
req_type = message_types[typeByte]
if req_type == "flush":
# messages are length prefixed
conn.resBuf.write(encode(1))
conn.resBuf.write([resTypeByte])
conn.fd.send(str(conn.resBuf.buf))
conn.msgLength = 0
conn.inProgress = False
conn.resBuf = BytesBuffer(bytearray())
return
decoder = getattr(conn.decoder, req_type)
print "decoding args"
req_args = decoder()
print "got args", req_args
# done decoding message
conn.msgLength = 0
conn.inProgress = False
req_f = getattr(conn.app, req_type)
if req_args is None:
res = req_f()
elif isinstance(req_args, tuple):
res = req_f(*req_args)
else:
res = req_f(req_args)
if isinstance(res, tuple):
res, ret_code = res
else:
ret_code = res
res = None
print "called", req_type, "ret code:", ret_code
if ret_code != 0:
print "non-zero retcode:", ret_code
if req_type in ("echo", "info"): # these dont return a ret code
enc = encode(res)
# messages are length prefixed
conn.resBuf.write(encode(len(enc) + 1))
conn.resBuf.write([resTypeByte])
conn.resBuf.write(enc)
else:
enc, encRet = encode(res), encode(ret_code)
# messages are length prefixed
conn.resBuf.write(encode(len(enc) + len(encRet) + 1))
conn.resBuf.write([resTypeByte])
conn.resBuf.write(encRet)
conn.resBuf.write(enc)
except TypeError as e:
print "TypeError on reading from connection:", e
self.handle_conn_closed(r)
return
except ValueError as e:
print "ValueError on reading from connection:", e
self.handle_conn_closed(r)
return
except IOError as e:
print "IOError on reading from connection:", e
self.handle_conn_closed(r)
return
except Exception as e:
# sys.exc_info()[0] # TODO better
print "error reading from connection", str(e)
self.handle_conn_closed(r)
return
def main_loop(self):
while not self.shutdown:
r_list, w_list, _ = select.select(
self.read_list, self.write_list, [], 2.5)
for r in r_list:
if (r == self.listener):
try:
self.handle_new_connection(r)
# undo adding to read list ...
except NameError as e:
print "Could not connect due to NameError:", e
except TypeError as e:
print "Could not connect due to TypeError:", e
except:
print "Could not connect due to unexpected error:", sys.exc_info()[0]
else:
self.handle_recv(r)
def handle_shutdown(self):
for r in self.read_list:
r.close()
for w in self.write_list:
try:
w.close()
except Exception as e:
print(e) # TODO: add logging
self.shutdown = True

View File

@@ -1,115 +0,0 @@
# the decoder works off a reader
# the encoder returns bytearray
def hex2bytes(h):
return bytearray(h.decode('hex'))
def bytes2hex(b):
if type(b) in (str, unicode):
return "".join([hex(ord(c))[2:].zfill(2) for c in b])
else:
return bytes2hex(b.decode())
# expects uvarint64 (no crazy big nums!)
def uvarint_size(i):
if i == 0:
return 0
for j in xrange(1, 8):
if i < 1 << j * 8:
return j
return 8
# expects i < 2**size
def encode_big_endian(i, size):
if size == 0:
return bytearray()
return encode_big_endian(i / 256, size - 1) + bytearray([i % 256])
def decode_big_endian(reader, size):
if size == 0:
return 0
firstByte = reader.read(1)[0]
return firstByte * (256 ** (size - 1)) + decode_big_endian(reader, size - 1)
# ints are max 16 bytes long
def encode_varint(i):
negate = False
if i < 0:
negate = True
i = -i
size = uvarint_size(i)
if size == 0:
return bytearray([0])
big_end = encode_big_endian(i, size)
if negate:
size += 0xF0
return bytearray([size]) + big_end
# returns the int and whats left of the byte array
def decode_varint(reader):
size = reader.read(1)[0]
if size == 0:
return 0
negate = True if size > int(0xF0) else False
if negate:
size = size - 0xF0
i = decode_big_endian(reader, size)
if negate:
i = i * (-1)
return i
def encode_string(s):
size = encode_varint(len(s))
return size + bytearray(s)
def decode_string(reader):
length = decode_varint(reader)
return str(reader.read(length))
def encode_list(s):
b = bytearray()
map(b.extend, map(encode, s))
return encode_varint(len(s)) + b
def encode(s):
if s is None:
return bytearray()
if isinstance(s, int):
return encode_varint(s)
elif isinstance(s, str):
return encode_string(s)
elif isinstance(s, list):
return encode_list(s)
else:
print "UNSUPPORTED TYPE!", type(s), s
if __name__ == '__main__':
ns = [100, 100, 1000, 256]
ss = [2, 5, 5, 2]
bs = map(encode_big_endian, ns, ss)
ds = map(decode_big_endian, bs, ss)
print ns
print [i[0] for i in ds]
ss = ["abc", "hi there jim", "ok now what"]
e = map(encode_string, ss)
d = map(decode_string, e)
print ss
print [i[0] for i in d]

View File

@@ -1,82 +0,0 @@
import sys
from abci.wire import hex2bytes, decode_big_endian, encode_big_endian
from abci.server import ABCIServer
from abci.reader import BytesBuffer
class CounterApplication():
def __init__(self):
sys.exit("The python example is out of date. Upgrading the Python examples is currently left as an exercise to you.")
self.hashCount = 0
self.txCount = 0
self.serial = False
def echo(self, msg):
return msg, 0
def info(self):
return ["hashes:%d, txs:%d" % (self.hashCount, self.txCount)], 0
def set_option(self, key, value):
if key == "serial" and value == "on":
self.serial = True
return 0
def deliver_tx(self, txBytes):
if self.serial:
txByteArray = bytearray(txBytes)
if len(txBytes) >= 2 and txBytes[:2] == "0x":
txByteArray = hex2bytes(txBytes[2:])
txValue = decode_big_endian(
BytesBuffer(txByteArray), len(txBytes))
if txValue != self.txCount:
return None, 6
self.txCount += 1
return None, 0
def check_tx(self, txBytes):
if self.serial:
txByteArray = bytearray(txBytes)
if len(txBytes) >= 2 and txBytes[:2] == "0x":
txByteArray = hex2bytes(txBytes[2:])
txValue = decode_big_endian(
BytesBuffer(txByteArray), len(txBytes))
if txValue < self.txCount:
return 6
return 0
def commit(self):
self.hashCount += 1
if self.txCount == 0:
return "", 0
h = encode_big_endian(self.txCount, 8)
h.reverse()
return str(h), 0
def add_listener(self):
return 0
def rm_listener(self):
return 0
def event(self):
return
if __name__ == '__main__':
l = len(sys.argv)
if l == 1:
port = 26658
elif l == 2:
port = int(sys.argv[1])
else:
print "too many arguments"
quit()
print 'ABCI Demo APP (Python)'
app = CounterApplication()
server = ABCIServer(app, port)
server.main_loop()

View File

@@ -1,50 +0,0 @@
from .wire import decode_string
# map type_byte to message name
message_types = {
0x01: "echo",
0x02: "flush",
0x03: "info",
0x04: "set_option",
0x21: "deliver_tx",
0x22: "check_tx",
0x23: "commit",
0x24: "add_listener",
0x25: "rm_listener",
}
# return the decoded arguments of abci messages
class RequestDecoder():
def __init__(self, reader):
self.reader = reader
def echo(self):
return decode_string(self.reader)
def flush(self):
return
def info(self):
return
def set_option(self):
return decode_string(self.reader), decode_string(self.reader)
def deliver_tx(self):
return decode_string(self.reader)
def check_tx(self):
return decode_string(self.reader)
def commit(self):
return
def add_listener(self):
# TODO
return
def rm_listener(self):
# TODO
return

View File

@@ -1,56 +0,0 @@
# Simple read() method around a bytearray
class BytesBuffer():
def __init__(self, b):
self.buf = b
self.readCount = 0
def count(self):
return self.readCount
def reset_count(self):
self.readCount = 0
def size(self):
return len(self.buf)
def peek(self):
return self.buf[0]
def write(self, b):
# b should be castable to byte array
self.buf += bytearray(b)
def read(self, n):
if len(self.buf) < n:
print("reader err: buf less than n")
# TODO: exception
return
self.readCount += n
r = self.buf[:n]
self.buf = self.buf[n:]
return r
# Buffer bytes off a tcp connection and read them off in chunks
class ConnReader():
def __init__(self, conn):
self.conn = conn
self.buf = bytearray()
# blocking
def read(self, n):
while n > len(self.buf):
moreBuf = self.conn.recv(1024)
if not moreBuf:
raise IOError("dead connection")
self.buf = self.buf + bytearray(moreBuf)
r = self.buf[:n]
self.buf = self.buf[n:]
return r

View File

@@ -1,196 +0,0 @@
import socket
import select
import sys
import logging
from .wire import decode_varint, encode
from .reader import BytesBuffer
from .msg import RequestDecoder, message_types
# hold the asyncronous state of a connection
# ie. we may not get enough bytes on one read to decode the message
logger = logging.getLogger(__name__)
class Connection():
def __init__(self, fd, app):
self.fd = fd
self.app = app
self.recBuf = BytesBuffer(bytearray())
self.resBuf = BytesBuffer(bytearray())
self.msgLength = 0
self.decoder = RequestDecoder(self.recBuf)
self.inProgress = False # are we in the middle of a message
def recv(this):
data = this.fd.recv(1024)
if not data: # what about len(data) == 0
raise IOError("dead connection")
this.recBuf.write(data)
# ABCI server responds to messges by calling methods on the app
class ABCIServer():
def __init__(self, app, port=5410):
self.app = app
# map conn file descriptors to (app, reqBuf, resBuf, msgDecoder)
self.appMap = {}
self.port = port
self.listen_backlog = 10
self.listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listener.setblocking(0)
self.listener.bind(('', port))
self.listener.listen(self.listen_backlog)
self.shutdown = False
self.read_list = [self.listener]
self.write_list = []
def handle_new_connection(self, r):
new_fd, new_addr = r.accept()
new_fd.setblocking(0) # non-blocking
self.read_list.append(new_fd)
self.write_list.append(new_fd)
print('new connection to', new_addr)
self.appMap[new_fd] = Connection(new_fd, self.app)
def handle_conn_closed(self, r):
self.read_list.remove(r)
self.write_list.remove(r)
r.close()
print("connection closed")
def handle_recv(self, r):
# app, recBuf, resBuf, conn
conn = self.appMap[r]
while True:
try:
print("recv loop")
# check if we need more data first
if conn.inProgress:
if (conn.msgLength == 0 or conn.recBuf.size() < conn.msgLength):
conn.recv()
else:
if conn.recBuf.size() == 0:
conn.recv()
conn.inProgress = True
# see if we have enough to get the message length
if conn.msgLength == 0:
ll = conn.recBuf.peek()
if conn.recBuf.size() < 1 + ll:
# we don't have enough bytes to read the length yet
return
print("decoding msg length")
conn.msgLength = decode_varint(conn.recBuf)
# see if we have enough to decode the message
if conn.recBuf.size() < conn.msgLength:
return
# now we can decode the message
# first read the request type and get the particular msg
# decoder
typeByte = conn.recBuf.read(1)
typeByte = int(typeByte[0])
resTypeByte = typeByte + 0x10
req_type = message_types[typeByte]
if req_type == "flush":
# messages are length prefixed
conn.resBuf.write(encode(1))
conn.resBuf.write([resTypeByte])
conn.fd.send(conn.resBuf.buf)
conn.msgLength = 0
conn.inProgress = False
conn.resBuf = BytesBuffer(bytearray())
return
decoder = getattr(conn.decoder, req_type)
print("decoding args")
req_args = decoder()
print("got args", req_args)
# done decoding message
conn.msgLength = 0
conn.inProgress = False
req_f = getattr(conn.app, req_type)
if req_args is None:
res = req_f()
elif isinstance(req_args, tuple):
res = req_f(*req_args)
else:
res = req_f(req_args)
if isinstance(res, tuple):
res, ret_code = res
else:
ret_code = res
res = None
print("called", req_type, "ret code:", ret_code, 'res:', res)
if ret_code != 0:
print("non-zero retcode:", ret_code)
if req_type in ("echo", "info"): # these dont return a ret code
enc = encode(res)
# messages are length prefixed
conn.resBuf.write(encode(len(enc) + 1))
conn.resBuf.write([resTypeByte])
conn.resBuf.write(enc)
else:
enc, encRet = encode(res), encode(ret_code)
# messages are length prefixed
conn.resBuf.write(encode(len(enc) + len(encRet) + 1))
conn.resBuf.write([resTypeByte])
conn.resBuf.write(encRet)
conn.resBuf.write(enc)
except IOError as e:
print("IOError on reading from connection:", e)
self.handle_conn_closed(r)
return
except Exception as e:
logger.exception("error reading from connection")
self.handle_conn_closed(r)
return
def main_loop(self):
while not self.shutdown:
r_list, w_list, _ = select.select(
self.read_list, self.write_list, [], 2.5)
for r in r_list:
if (r == self.listener):
try:
self.handle_new_connection(r)
# undo adding to read list ...
except NameError as e:
print("Could not connect due to NameError:", e)
except TypeError as e:
print("Could not connect due to TypeError:", e)
except:
print("Could not connect due to unexpected error:", sys.exc_info()[0])
else:
self.handle_recv(r)
def handle_shutdown(self):
for r in self.read_list:
r.close()
for w in self.write_list:
try:
w.close()
except Exception as e:
print(e) # TODO: add logging
self.shutdown = True

View File

@@ -1,119 +0,0 @@
# the decoder works off a reader
# the encoder returns bytearray
def hex2bytes(h):
return bytearray(h.decode('hex'))
def bytes2hex(b):
if type(b) in (str, str):
return "".join([hex(ord(c))[2:].zfill(2) for c in b])
else:
return bytes2hex(b.decode())
# expects uvarint64 (no crazy big nums!)
def uvarint_size(i):
if i == 0:
return 0
for j in range(1, 8):
if i < 1 << j * 8:
return j
return 8
# expects i < 2**size
def encode_big_endian(i, size):
if size == 0:
return bytearray()
return encode_big_endian(i // 256, size - 1) + bytearray([i % 256])
def decode_big_endian(reader, size):
if size == 0:
return 0
firstByte = reader.read(1)[0]
return firstByte * (256 ** (size - 1)) + decode_big_endian(reader, size - 1)
# ints are max 16 bytes long
def encode_varint(i):
negate = False
if i < 0:
negate = True
i = -i
size = uvarint_size(i)
if size == 0:
return bytearray([0])
big_end = encode_big_endian(i, size)
if negate:
size += 0xF0
return bytearray([size]) + big_end
# returns the int and whats left of the byte array
def decode_varint(reader):
size = reader.read(1)[0]
if size == 0:
return 0
negate = True if size > int(0xF0) else False
if negate:
size = size - 0xF0
i = decode_big_endian(reader, size)
if negate:
i = i * (-1)
return i
def encode_string(s):
size = encode_varint(len(s))
return size + bytearray(s, 'utf8')
def decode_string(reader):
length = decode_varint(reader)
raw_data = reader.read(length)
return raw_data.decode()
def encode_list(s):
b = bytearray()
list(map(b.extend, list(map(encode, s))))
return encode_varint(len(s)) + b
def encode(s):
print('encoding', repr(s))
if s is None:
return bytearray()
if isinstance(s, int):
return encode_varint(s)
elif isinstance(s, str):
return encode_string(s)
elif isinstance(s, list):
return encode_list(s)
elif isinstance(s, bytearray):
return encode_string(s)
else:
print("UNSUPPORTED TYPE!", type(s), s)
if __name__ == '__main__':
ns = [100, 100, 1000, 256]
ss = [2, 5, 5, 2]
bs = list(map(encode_big_endian, ns, ss))
ds = list(map(decode_big_endian, bs, ss))
print(ns)
print([i[0] for i in ds])
ss = ["abc", "hi there jim", "ok now what"]
e = list(map(encode_string, ss))
d = list(map(decode_string, e))
print(ss)
print([i[0] for i in d])

View File

@@ -1,82 +0,0 @@
import sys
from abci.wire import hex2bytes, decode_big_endian, encode_big_endian
from abci.server import ABCIServer
from abci.reader import BytesBuffer
class CounterApplication():
def __init__(self):
sys.exit("The python example is out of date. Upgrading the Python examples is currently left as an exercise to you.")
self.hashCount = 0
self.txCount = 0
self.serial = False
def echo(self, msg):
return msg, 0
def info(self):
return ["hashes:%d, txs:%d" % (self.hashCount, self.txCount)], 0
def set_option(self, key, value):
if key == "serial" and value == "on":
self.serial = True
return 0
def deliver_tx(self, txBytes):
if self.serial:
txByteArray = bytearray(txBytes)
if len(txBytes) >= 2 and txBytes[:2] == "0x":
txByteArray = hex2bytes(txBytes[2:])
txValue = decode_big_endian(
BytesBuffer(txByteArray), len(txBytes))
if txValue != self.txCount:
return None, 6
self.txCount += 1
return None, 0
def check_tx(self, txBytes):
if self.serial:
txByteArray = bytearray(txBytes)
if len(txBytes) >= 2 and txBytes[:2] == "0x":
txByteArray = hex2bytes(txBytes[2:])
txValue = decode_big_endian(
BytesBuffer(txByteArray), len(txBytes))
if txValue < self.txCount:
return 6
return 0
def commit(self):
self.hashCount += 1
if self.txCount == 0:
return "", 0
h = encode_big_endian(self.txCount, 8)
h.reverse()
return h.decode(), 0
def add_listener(self):
return 0
def rm_listener(self):
return 0
def event(self):
return
if __name__ == '__main__':
l = len(sys.argv)
if l == 1:
port = 26658
elif l == 2:
port = int(sys.argv[1])
else:
print("too many arguments")
quit()
print('ABCI Demo APP (Python)')
app = CounterApplication()
server = ABCIServer(app, port)
server.main_loop()

View File

@@ -1,57 +0,0 @@
package server
import (
"net"
"google.golang.org/grpc"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
type GRPCServer struct {
cmn.BaseService
proto string
addr string
listener net.Listener
server *grpc.Server
app types.ABCIApplicationServer
}
// NewGRPCServer returns a new gRPC ABCI server
func NewGRPCServer(protoAddr string, app types.ABCIApplicationServer) cmn.Service {
proto, addr := cmn.ProtocolAndAddress(protoAddr)
s := &GRPCServer{
proto: proto,
addr: addr,
listener: nil,
app: app,
}
s.BaseService = *cmn.NewBaseService(nil, "ABCIServer", s)
return s
}
// OnStart starts the gRPC service
func (s *GRPCServer) OnStart() error {
if err := s.BaseService.OnStart(); err != nil {
return err
}
ln, err := net.Listen(s.proto, s.addr)
if err != nil {
return err
}
s.Logger.Info("Listening", "proto", s.proto, "addr", s.addr)
s.listener = ln
s.server = grpc.NewServer()
types.RegisterABCIApplicationServer(s.server, s.app)
go s.server.Serve(s.listener)
return nil
}
// OnStop stops the gRPC server
func (s *GRPCServer) OnStop() {
s.BaseService.OnStop()
s.server.Stop()
}

View File

@@ -1,31 +0,0 @@
/*
Package server is used to start a new ABCI server.
It contains two server implementation:
* gRPC server
* socket server
*/
package server
import (
"fmt"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
func NewServer(protoAddr, transport string, app types.Application) (cmn.Service, error) {
var s cmn.Service
var err error
switch transport {
case "socket":
s = NewSocketServer(protoAddr, app)
case "grpc":
s = NewGRPCServer(protoAddr, types.NewGRPCApplication(app))
default:
err = fmt.Errorf("Unknown server type %s", transport)
}
return s, err
}

View File

@@ -1,226 +0,0 @@
package server
import (
"bufio"
"fmt"
"io"
"net"
"sync"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
// var maxNumberConnections = 2
type SocketServer struct {
cmn.BaseService
proto string
addr string
listener net.Listener
connsMtx sync.Mutex
conns map[int]net.Conn
nextConnID int
appMtx sync.Mutex
app types.Application
}
func NewSocketServer(protoAddr string, app types.Application) cmn.Service {
proto, addr := cmn.ProtocolAndAddress(protoAddr)
s := &SocketServer{
proto: proto,
addr: addr,
listener: nil,
app: app,
conns: make(map[int]net.Conn),
}
s.BaseService = *cmn.NewBaseService(nil, "ABCIServer", s)
return s
}
func (s *SocketServer) OnStart() error {
if err := s.BaseService.OnStart(); err != nil {
return err
}
ln, err := net.Listen(s.proto, s.addr)
if err != nil {
return err
}
s.listener = ln
go s.acceptConnectionsRoutine()
return nil
}
func (s *SocketServer) OnStop() {
s.BaseService.OnStop()
if err := s.listener.Close(); err != nil {
s.Logger.Error("Error closing listener", "err", err)
}
s.connsMtx.Lock()
defer s.connsMtx.Unlock()
for id, conn := range s.conns {
delete(s.conns, id)
if err := conn.Close(); err != nil {
s.Logger.Error("Error closing connection", "id", id, "conn", conn, "err", err)
}
}
}
func (s *SocketServer) addConn(conn net.Conn) int {
s.connsMtx.Lock()
defer s.connsMtx.Unlock()
connID := s.nextConnID
s.nextConnID++
s.conns[connID] = conn
return connID
}
// deletes conn even if close errs
func (s *SocketServer) rmConn(connID int) error {
s.connsMtx.Lock()
defer s.connsMtx.Unlock()
conn, ok := s.conns[connID]
if !ok {
return fmt.Errorf("Connection %d does not exist", connID)
}
delete(s.conns, connID)
return conn.Close()
}
func (s *SocketServer) acceptConnectionsRoutine() {
for {
// Accept a connection
s.Logger.Info("Waiting for new connection...")
conn, err := s.listener.Accept()
if err != nil {
if !s.IsRunning() {
return // Ignore error from listener closing.
}
s.Logger.Error("Failed to accept connection: " + err.Error())
continue
}
s.Logger.Info("Accepted a new connection")
connID := s.addConn(conn)
closeConn := make(chan error, 2) // Push to signal connection closed
responses := make(chan *types.Response, 1000) // A channel to buffer responses
// Read requests from conn and deal with them
go s.handleRequests(closeConn, conn, responses)
// Pull responses from 'responses' and write them to conn.
go s.handleResponses(closeConn, conn, responses)
// Wait until signal to close connection
go s.waitForClose(closeConn, connID)
}
}
func (s *SocketServer) waitForClose(closeConn chan error, connID int) {
err := <-closeConn
if err == io.EOF {
s.Logger.Error("Connection was closed by client")
} else if err != nil {
s.Logger.Error("Connection error", "error", err)
} else {
// never happens
s.Logger.Error("Connection was closed.")
}
// Close the connection
if err := s.rmConn(connID); err != nil {
s.Logger.Error("Error in closing connection", "error", err)
}
}
// Read requests from conn and deal with them
func (s *SocketServer) handleRequests(closeConn chan error, conn net.Conn, responses chan<- *types.Response) {
var count int
var bufReader = bufio.NewReader(conn)
for {
var req = &types.Request{}
err := types.ReadMessage(bufReader, req)
if err != nil {
if err == io.EOF {
closeConn <- err
} else {
closeConn <- fmt.Errorf("Error reading message: %v", err.Error())
}
return
}
s.appMtx.Lock()
count++
s.handleRequest(req, responses)
s.appMtx.Unlock()
}
}
func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types.Response) {
switch r := req.Value.(type) {
case *types.Request_Echo:
responses <- types.ToResponseEcho(r.Echo.Message)
case *types.Request_Flush:
responses <- types.ToResponseFlush()
case *types.Request_Info:
res := s.app.Info(*r.Info)
responses <- types.ToResponseInfo(res)
case *types.Request_SetOption:
res := s.app.SetOption(*r.SetOption)
responses <- types.ToResponseSetOption(res)
case *types.Request_DeliverTx:
res := s.app.DeliverTx(r.DeliverTx.Tx)
responses <- types.ToResponseDeliverTx(res)
case *types.Request_CheckTx:
res := s.app.CheckTx(r.CheckTx.Tx)
responses <- types.ToResponseCheckTx(res)
case *types.Request_Commit:
res := s.app.Commit()
responses <- types.ToResponseCommit(res)
case *types.Request_Query:
res := s.app.Query(*r.Query)
responses <- types.ToResponseQuery(res)
case *types.Request_InitChain:
res := s.app.InitChain(*r.InitChain)
responses <- types.ToResponseInitChain(res)
case *types.Request_BeginBlock:
res := s.app.BeginBlock(*r.BeginBlock)
responses <- types.ToResponseBeginBlock(res)
case *types.Request_EndBlock:
res := s.app.EndBlock(*r.EndBlock)
responses <- types.ToResponseEndBlock(res)
default:
responses <- types.ToResponseException("Unknown request")
}
}
// Pull responses from 'responses' and write them to conn.
func (s *SocketServer) handleResponses(closeConn chan error, conn net.Conn, responses <-chan *types.Response) {
var count int
var bufWriter = bufio.NewWriter(conn)
for {
var res = <-responses
err := types.WriteMessage(res, bufWriter)
if err != nil {
closeConn <- fmt.Errorf("Error writing message: %v", err.Error())
return
}
if _, ok := res.Value.(*types.Response_Flush); ok {
err = bufWriter.Flush()
if err != nil {
closeConn <- fmt.Errorf("Error flushing write buffer: %v", err.Error())
return
}
}
count++
}
}

View File

@@ -1 +0,0 @@
package benchmarks

View File

@@ -1,55 +0,0 @@
package main
import (
"bufio"
"fmt"
"log"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
func main() {
conn, err := cmn.Connect("unix://test.sock")
if err != nil {
log.Fatal(err.Error())
}
// Read a bunch of responses
go func() {
counter := 0
for {
var res = &types.Response{}
err := types.ReadMessage(conn, res)
if err != nil {
log.Fatal(err.Error())
}
counter++
if counter%1000 == 0 {
fmt.Println("Read", counter)
}
}
}()
// Write a bunch of requests
counter := 0
for i := 0; ; i++ {
var bufWriter = bufio.NewWriter(conn)
var req = types.ToRequestEcho("foobar")
err := types.WriteMessage(req, bufWriter)
if err != nil {
log.Fatal(err.Error())
}
err = bufWriter.Flush()
if err != nil {
log.Fatal(err.Error())
}
counter++
if counter%1000 == 0 {
fmt.Println("Write", counter)
}
}
}

View File

@@ -1,69 +0,0 @@
package main
import (
"bufio"
"fmt"
"log"
"net"
"reflect"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
func main() {
conn, err := cmn.Connect("unix://test.sock")
if err != nil {
log.Fatal(err.Error())
}
// Make a bunch of requests
counter := 0
for i := 0; ; i++ {
req := types.ToRequestEcho("foobar")
_, err := makeRequest(conn, req)
if err != nil {
log.Fatal(err.Error())
}
counter++
if counter%1000 == 0 {
fmt.Println(counter)
}
}
}
func makeRequest(conn net.Conn, req *types.Request) (*types.Response, error) {
var bufWriter = bufio.NewWriter(conn)
// Write desired request
err := types.WriteMessage(req, bufWriter)
if err != nil {
return nil, err
}
err = types.WriteMessage(types.ToRequestFlush(), bufWriter)
if err != nil {
return nil, err
}
err = bufWriter.Flush()
if err != nil {
return nil, err
}
// Read desired response
var res = &types.Response{}
err = types.ReadMessage(conn, res)
if err != nil {
return nil, err
}
var resFlush = &types.Response{}
err = types.ReadMessage(conn, resFlush)
if err != nil {
return nil, err
}
if _, ok := resFlush.Value.(*types.Response_Flush); !ok {
return nil, fmt.Errorf("Expected flush response but got something else: %v", reflect.TypeOf(resFlush))
}
return res, nil
}

View File

@@ -1,27 +0,0 @@
package tests
import (
"testing"
"github.com/stretchr/testify/assert"
abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/kvstore"
abciserver "github.com/tendermint/tendermint/abci/server"
)
func TestClientServerNoAddrPrefix(t *testing.T) {
addr := "localhost:26658"
transport := "socket"
app := kvstore.NewKVStoreApplication()
server, err := abciserver.NewServer(addr, transport, app)
assert.NoError(t, err, "expected no error on NewServer")
err = server.Start()
assert.NoError(t, err, "expected no error on server.Start")
client, err := abciclient.NewClient(addr, transport, true)
assert.NoError(t, err, "expected no error on NewClient")
err = client.Start()
assert.NoError(t, err, "expected no error on client.Start")
}

View File

@@ -1,96 +0,0 @@
package testsuite
import (
"bytes"
"errors"
"fmt"
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
func InitChain(client abcicli.Client) error {
total := 10
vals := make([]types.ValidatorUpdate, total)
for i := 0; i < total; i++ {
pubkey := cmn.RandBytes(33)
power := cmn.RandInt()
vals[i] = types.Ed25519ValidatorUpdate(pubkey, int64(power))
}
_, err := client.InitChainSync(types.RequestInitChain{
Validators: vals,
})
if err != nil {
fmt.Printf("Failed test: InitChain - %v\n", err)
return err
}
fmt.Println("Passed test: InitChain")
return nil
}
func SetOption(client abcicli.Client, key, value string) error {
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: value})
if err != nil {
fmt.Println("Failed test: SetOption")
fmt.Printf("error while setting %v=%v: \nerror: %v\n", key, value, err)
return err
}
fmt.Println("Passed test: SetOption")
return nil
}
func Commit(client abcicli.Client, hashExp []byte) error {
res, err := client.CommitSync()
data := res.Data
if err != nil {
fmt.Println("Failed test: Commit")
fmt.Printf("error while committing: %v\n", err)
return err
}
if !bytes.Equal(data, hashExp) {
fmt.Println("Failed test: Commit")
fmt.Printf("Commit hash was unexpected. Got %X expected %X\n", data, hashExp)
return errors.New("CommitTx failed")
}
fmt.Println("Passed test: Commit")
return nil
}
func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
res, _ := client.DeliverTxSync(txBytes)
code, data, log := res.Code, res.Data, res.Log
if code != codeExp {
fmt.Println("Failed test: DeliverTx")
fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n",
code, codeExp, log)
return errors.New("DeliverTx error")
}
if !bytes.Equal(data, dataExp) {
fmt.Println("Failed test: DeliverTx")
fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n",
data, dataExp)
return errors.New("DeliverTx error")
}
fmt.Println("Passed test: DeliverTx")
return nil
}
func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
res, _ := client.CheckTxSync(txBytes)
code, data, log := res.Code, res.Data, res.Log
if code != codeExp {
fmt.Println("Failed test: CheckTx")
fmt.Printf("CheckTx response code was unexpected. Got %v expected %v. Log: %v\n",
code, codeExp, log)
return errors.New("CheckTx")
}
if !bytes.Equal(data, dataExp) {
fmt.Println("Failed test: CheckTx")
fmt.Printf("CheckTx response data was unexpected. Got %X expected %X\n",
data, dataExp)
return errors.New("CheckTx")
}
fmt.Println("Passed test: CheckTx")
return nil
}

View File

@@ -1,78 +0,0 @@
package main
import (
"bytes"
"fmt"
"os"
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
)
func startClient(abciType string) abcicli.Client {
// Start client
client, err := abcicli.NewClient("tcp://127.0.0.1:26658", abciType, true)
if err != nil {
panic(err.Error())
}
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
client.SetLogger(logger.With("module", "abcicli"))
if err := client.Start(); err != nil {
panicf("connecting to abci_app: %v", err.Error())
}
return client
}
func setOption(client abcicli.Client, key, value string) {
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: value})
if err != nil {
panicf("setting %v=%v: \nerr: %v", key, value, err)
}
}
func commit(client abcicli.Client, hashExp []byte) {
res, err := client.CommitSync()
if err != nil {
panicf("client error: %v", err)
}
if !bytes.Equal(res.Data, hashExp) {
panicf("Commit hash was unexpected. Got %X expected %X", res.Data, hashExp)
}
}
func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
res, err := client.DeliverTxSync(txBytes)
if err != nil {
panicf("client error: %v", err)
}
if res.Code != codeExp {
panicf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v", res.Code, codeExp, res.Log)
}
if !bytes.Equal(res.Data, dataExp) {
panicf("DeliverTx response data was unexpected. Got %X expected %X", res.Data, dataExp)
}
}
/*func checkTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
res, err := client.CheckTxSync(txBytes)
if err != nil {
panicf("client error: %v", err)
}
if res.IsErr() {
panicf("checking tx %X: %v\nlog: %v", txBytes, res.Log)
}
if res.Code != codeExp {
panicf("CheckTx response code was unexpected. Got %v expected %v. Log: %v",
res.Code, codeExp, res.Log)
}
if !bytes.Equal(res.Data, dataExp) {
panicf("CheckTx response data was unexpected. Got %X expected %X",
res.Data, dataExp)
}
}*/
func panicf(format string, a ...interface{}) {
panic(fmt.Sprintf(format, a...))
}

View File

@@ -1,84 +0,0 @@
package main
import (
"fmt"
"log"
"os"
"os/exec"
"time"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
)
var abciType string
func init() {
abciType = os.Getenv("ABCI")
if abciType == "" {
abciType = "socket"
}
}
func main() {
testCounter()
}
const (
maxABCIConnectTries = 10
)
func ensureABCIIsUp(typ string, n int) error {
var err error
cmdString := "abci-cli echo hello"
if typ == "grpc" {
cmdString = "abci-cli --abci grpc echo hello"
}
for i := 0; i < n; i++ {
cmd := exec.Command("bash", "-c", cmdString) // nolint: gas
_, err = cmd.CombinedOutput()
if err == nil {
break
}
<-time.After(500 * time.Millisecond)
}
return err
}
func testCounter() {
abciApp := os.Getenv("ABCI_APP")
if abciApp == "" {
panic("No ABCI_APP specified")
}
fmt.Printf("Running %s test with abci=%s\n", abciApp, abciType)
cmd := exec.Command("bash", "-c", fmt.Sprintf("abci-cli %s", abciApp)) // nolint: gas
cmd.Stdout = os.Stdout
if err := cmd.Start(); err != nil {
log.Fatalf("starting %q err: %v", abciApp, err)
}
defer cmd.Wait()
defer cmd.Process.Kill()
if err := ensureABCIIsUp(abciType, maxABCIConnectTries); err != nil {
log.Fatalf("echo failed: %v", err)
}
client := startClient(abciType)
defer client.Stop()
setOption(client, "serial", "on")
commit(client, nil)
deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
commit(client, nil)
deliverTx(client, []byte{0x00}, types.CodeTypeOK, nil)
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1})
deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
deliverTx(client, []byte{0x01}, types.CodeTypeOK, nil)
deliverTx(client, []byte{0x00, 0x02}, types.CodeTypeOK, nil)
deliverTx(client, []byte{0x00, 0x03}, types.CodeTypeOK, nil)
deliverTx(client, []byte{0x00, 0x00, 0x04}, types.CodeTypeOK, nil)
deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5})
}

View File

@@ -1,27 +0,0 @@
#! /bin/bash
set -e
# These tests spawn the counter app and server by execing the ABCI_APP command and run some simple client tests against it
# Get the directory of where this script is.
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
# Change into that dir because we expect that.
cd "$DIR"
echo "RUN COUNTER OVER SOCKET"
# test golang counter
ABCI_APP="counter" go run ./*.go
echo "----------------------"
echo "RUN COUNTER OVER GRPC"
# test golang counter via grpc
ABCI_APP="counter --abci=grpc" ABCI="grpc" go run ./*.go
echo "----------------------"
# test nodejs counter
# TODO: fix node app
#ABCI_APP="node $GOPATH/src/github.com/tendermint/js-abci/example/app.js" go test -test.run TestCounter

View File

@@ -1,10 +0,0 @@
echo hello
info
commit
deliver_tx "abc"
info
commit
query "abc"
deliver_tx "def=xyz"
commit
query "def"

View File

@@ -1,51 +0,0 @@
> echo hello
-> code: OK
-> data: hello
-> data.hex: 0x68656C6C6F
> info
-> code: OK
-> data: {"size":0}
-> data.hex: 0x7B2273697A65223A307D
> commit
-> code: OK
-> data.hex: 0x0000000000000000
> deliver_tx "abc"
-> code: OK
> info
-> code: OK
-> data: {"size":1}
-> data.hex: 0x7B2273697A65223A317D
> commit
-> code: OK
-> data.hex: 0x0200000000000000
> query "abc"
-> code: OK
-> log: exists
-> height: 0
-> key: abc
-> key.hex: 616263
-> value: abc
-> value.hex: 616263
> deliver_tx "def=xyz"
-> code: OK
> commit
-> code: OK
-> data.hex: 0x0400000000000000
> query "def"
-> code: OK
-> log: exists
-> height: 0
-> key: def
-> key.hex: 646566
-> value: xyz
-> value.hex: 78797A

View File

@@ -1,8 +0,0 @@
set_option serial on
check_tx 0x00
check_tx 0xff
deliver_tx 0x00
check_tx 0x00
deliver_tx 0x01
deliver_tx 0x04
info

View File

@@ -1,29 +0,0 @@
> set_option serial on
-> code: OK
-> log: OK (SetOption doesn't return anything.)
> check_tx 0x00
-> code: OK
> check_tx 0xff
-> code: OK
> deliver_tx 0x00
-> code: OK
> check_tx 0x00
-> code: 2
-> log: Invalid nonce. Expected >= 1, got 0
> deliver_tx 0x01
-> code: OK
> deliver_tx 0x04
-> code: 2
-> log: Invalid nonce. Expected 2, got 4
> info
-> code: OK
-> data: {"hashes":0,"txs":2}
-> data.hex: 0x7B22686173686573223A302C22747873223A327D

View File

@@ -1,42 +0,0 @@
#! /bin/bash
set -e
# Get the root directory.
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
DIR="$( cd -P "$( dirname "$SOURCE" )/../.." && pwd )"
# Change into that dir because we expect that.
cd "$DIR" || exit
function testExample() {
N=$1
INPUT=$2
APP="$3 $4"
echo "Example $N: $APP"
$APP &> /dev/null &
sleep 2
abci-cli --log_level=error --verbose batch < "$INPUT" > "${INPUT}.out.new"
killall "$3"
pre=$(shasum < "${INPUT}.out")
post=$(shasum < "${INPUT}.out.new")
if [[ "$pre" != "$post" ]]; then
echo "You broke the tutorial"
echo "Got:"
cat "${INPUT}.out.new"
echo "Expected:"
cat "${INPUT}.out"
exit 1
fi
rm "${INPUT}".out.new
}
testExample 1 tests/test_cli/ex1.abci abci-cli kvstore
testExample 2 tests/test_cli/ex2.abci abci-cli counter
echo ""
echo "PASS"

View File

@@ -1 +0,0 @@
package tests

View File

@@ -1,138 +0,0 @@
package types // nolint: goimports
import (
context "golang.org/x/net/context"
)
// Application is an interface that enables any finite, deterministic state machine
// to be driven by a blockchain-based replication engine via the ABCI.
// All methods take a RequestXxx argument and return a ResponseXxx argument,
// except CheckTx/DeliverTx, which take `tx []byte`, and `Commit`, which takes nothing.
type Application interface {
// Info/Query Connection
Info(RequestInfo) ResponseInfo // Return application info
SetOption(RequestSetOption) ResponseSetOption // Set application option
Query(RequestQuery) ResponseQuery // Query for state
// Mempool Connection
CheckTx(tx []byte) ResponseCheckTx // Validate a tx for the mempool
// Consensus Connection
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain with validators and other info from TendermintCore
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block
DeliverTx(tx []byte) ResponseDeliverTx // Deliver a tx for full processing
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
}
//-------------------------------------------------------
// BaseApplication is a base form of Application
var _ Application = (*BaseApplication)(nil)
type BaseApplication struct {
}
func NewBaseApplication() *BaseApplication {
return &BaseApplication{}
}
func (BaseApplication) Info(req RequestInfo) ResponseInfo {
return ResponseInfo{}
}
func (BaseApplication) SetOption(req RequestSetOption) ResponseSetOption {
return ResponseSetOption{}
}
func (BaseApplication) DeliverTx(tx []byte) ResponseDeliverTx {
return ResponseDeliverTx{Code: CodeTypeOK}
}
func (BaseApplication) CheckTx(tx []byte) ResponseCheckTx {
return ResponseCheckTx{Code: CodeTypeOK}
}
func (BaseApplication) Commit() ResponseCommit {
return ResponseCommit{}
}
func (BaseApplication) Query(req RequestQuery) ResponseQuery {
return ResponseQuery{Code: CodeTypeOK}
}
func (BaseApplication) InitChain(req RequestInitChain) ResponseInitChain {
return ResponseInitChain{}
}
func (BaseApplication) BeginBlock(req RequestBeginBlock) ResponseBeginBlock {
return ResponseBeginBlock{}
}
func (BaseApplication) EndBlock(req RequestEndBlock) ResponseEndBlock {
return ResponseEndBlock{}
}
//-------------------------------------------------------
// GRPCApplication is a GRPC wrapper for Application
type GRPCApplication struct {
app Application
}
func NewGRPCApplication(app Application) *GRPCApplication {
return &GRPCApplication{app}
}
func (app *GRPCApplication) Echo(ctx context.Context, req *RequestEcho) (*ResponseEcho, error) {
return &ResponseEcho{Message: req.Message}, nil
}
func (app *GRPCApplication) Flush(ctx context.Context, req *RequestFlush) (*ResponseFlush, error) {
return &ResponseFlush{}, nil
}
func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*ResponseInfo, error) {
res := app.app.Info(*req)
return &res, nil
}
func (app *GRPCApplication) SetOption(ctx context.Context, req *RequestSetOption) (*ResponseSetOption, error) {
res := app.app.SetOption(*req)
return &res, nil
}
func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) {
res := app.app.DeliverTx(req.Tx)
return &res, nil
}
func (app *GRPCApplication) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
res := app.app.CheckTx(req.Tx)
return &res, nil
}
func (app *GRPCApplication) Query(ctx context.Context, req *RequestQuery) (*ResponseQuery, error) {
res := app.app.Query(*req)
return &res, nil
}
func (app *GRPCApplication) Commit(ctx context.Context, req *RequestCommit) (*ResponseCommit, error) {
res := app.app.Commit()
return &res, nil
}
func (app *GRPCApplication) InitChain(ctx context.Context, req *RequestInitChain) (*ResponseInitChain, error) {
res := app.app.InitChain(*req)
return &res, nil
}
func (app *GRPCApplication) BeginBlock(ctx context.Context, req *RequestBeginBlock) (*ResponseBeginBlock, error) {
res := app.app.BeginBlock(*req)
return &res, nil
}
func (app *GRPCApplication) EndBlock(ctx context.Context, req *RequestEndBlock) (*ResponseEndBlock, error) {
res := app.app.EndBlock(*req)
return &res, nil
}

View File

@@ -1,210 +0,0 @@
package types
import (
"bufio"
"encoding/binary"
"io"
"github.com/gogo/protobuf/proto"
)
const (
maxMsgSize = 104857600 // 100MB
)
// WriteMessage writes a varint length-delimited protobuf message.
func WriteMessage(msg proto.Message, w io.Writer) error {
bz, err := proto.Marshal(msg)
if err != nil {
return err
}
return encodeByteSlice(w, bz)
}
// ReadMessage reads a varint length-delimited protobuf message.
func ReadMessage(r io.Reader, msg proto.Message) error {
return readProtoMsg(r, msg, maxMsgSize)
}
func readProtoMsg(r io.Reader, msg proto.Message, maxSize int) error {
// binary.ReadVarint takes an io.ByteReader, eg. a bufio.Reader
reader, ok := r.(*bufio.Reader)
if !ok {
reader = bufio.NewReader(r)
}
length64, err := binary.ReadVarint(reader)
if err != nil {
return err
}
length := int(length64)
if length < 0 || length > maxSize {
return io.ErrShortBuffer
}
buf := make([]byte, length)
if _, err := io.ReadFull(reader, buf); err != nil {
return err
}
return proto.Unmarshal(buf, msg)
}
//-----------------------------------------------------------------------
// NOTE: we copied wire.EncodeByteSlice from go-wire rather than keep
// go-wire as a dep
func encodeByteSlice(w io.Writer, bz []byte) (err error) {
err = encodeVarint(w, int64(len(bz)))
if err != nil {
return
}
_, err = w.Write(bz)
return
}
func encodeVarint(w io.Writer, i int64) (err error) {
var buf [10]byte
n := binary.PutVarint(buf[:], i)
_, err = w.Write(buf[0:n])
return
}
//----------------------------------------
func ToRequestEcho(message string) *Request {
return &Request{
Value: &Request_Echo{&RequestEcho{Message: message}},
}
}
func ToRequestFlush() *Request {
return &Request{
Value: &Request_Flush{&RequestFlush{}},
}
}
func ToRequestInfo(req RequestInfo) *Request {
return &Request{
Value: &Request_Info{&req},
}
}
func ToRequestSetOption(req RequestSetOption) *Request {
return &Request{
Value: &Request_SetOption{&req},
}
}
func ToRequestDeliverTx(tx []byte) *Request {
return &Request{
Value: &Request_DeliverTx{&RequestDeliverTx{Tx: tx}},
}
}
func ToRequestCheckTx(tx []byte) *Request {
return &Request{
Value: &Request_CheckTx{&RequestCheckTx{Tx: tx}},
}
}
func ToRequestCommit() *Request {
return &Request{
Value: &Request_Commit{&RequestCommit{}},
}
}
func ToRequestQuery(req RequestQuery) *Request {
return &Request{
Value: &Request_Query{&req},
}
}
func ToRequestInitChain(req RequestInitChain) *Request {
return &Request{
Value: &Request_InitChain{&req},
}
}
func ToRequestBeginBlock(req RequestBeginBlock) *Request {
return &Request{
Value: &Request_BeginBlock{&req},
}
}
func ToRequestEndBlock(req RequestEndBlock) *Request {
return &Request{
Value: &Request_EndBlock{&req},
}
}
//----------------------------------------
func ToResponseException(errStr string) *Response {
return &Response{
Value: &Response_Exception{&ResponseException{Error: errStr}},
}
}
func ToResponseEcho(message string) *Response {
return &Response{
Value: &Response_Echo{&ResponseEcho{Message: message}},
}
}
func ToResponseFlush() *Response {
return &Response{
Value: &Response_Flush{&ResponseFlush{}},
}
}
func ToResponseInfo(res ResponseInfo) *Response {
return &Response{
Value: &Response_Info{&res},
}
}
func ToResponseSetOption(res ResponseSetOption) *Response {
return &Response{
Value: &Response_SetOption{&res},
}
}
func ToResponseDeliverTx(res ResponseDeliverTx) *Response {
return &Response{
Value: &Response_DeliverTx{&res},
}
}
func ToResponseCheckTx(res ResponseCheckTx) *Response {
return &Response{
Value: &Response_CheckTx{&res},
}
}
func ToResponseCommit(res ResponseCommit) *Response {
return &Response{
Value: &Response_Commit{&res},
}
}
func ToResponseQuery(res ResponseQuery) *Response {
return &Response{
Value: &Response_Query{&res},
}
}
func ToResponseInitChain(res ResponseInitChain) *Response {
return &Response{
Value: &Response_InitChain{&res},
}
}
func ToResponseBeginBlock(res ResponseBeginBlock) *Response {
return &Response{
Value: &Response_BeginBlock{&res},
}
}
func ToResponseEndBlock(res ResponseEndBlock) *Response {
return &Response{
Value: &Response_EndBlock{&res},
}
}

View File

@@ -1,103 +0,0 @@
package types
import (
"bytes"
"encoding/json"
"strings"
"testing"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
cmn "github.com/tendermint/tendermint/libs/common"
)
func TestMarshalJSON(t *testing.T) {
b, err := json.Marshal(&ResponseDeliverTx{})
assert.Nil(t, err)
// Do not include empty fields.
assert.False(t, strings.Contains(string(b), "code"))
r1 := ResponseCheckTx{
Code: 1,
Data: []byte("hello"),
GasWanted: 43,
Tags: []cmn.KVPair{
{Key: []byte("pho"), Value: []byte("bo")},
},
}
b, err = json.Marshal(&r1)
assert.Nil(t, err)
var r2 ResponseCheckTx
err = json.Unmarshal(b, &r2)
assert.Nil(t, err)
assert.Equal(t, r1, r2)
}
func TestWriteReadMessageSimple(t *testing.T) {
cases := []proto.Message{
&RequestEcho{
Message: "Hello",
},
}
for _, c := range cases {
buf := new(bytes.Buffer)
err := WriteMessage(c, buf)
assert.Nil(t, err)
msg := new(RequestEcho)
err = ReadMessage(buf, msg)
assert.Nil(t, err)
assert.Equal(t, c, msg)
}
}
func TestWriteReadMessage(t *testing.T) {
cases := []proto.Message{
&Header{
NumTxs: 4,
},
// TODO: add the rest
}
for _, c := range cases {
buf := new(bytes.Buffer)
err := WriteMessage(c, buf)
assert.Nil(t, err)
msg := new(Header)
err = ReadMessage(buf, msg)
assert.Nil(t, err)
assert.Equal(t, c, msg)
}
}
func TestWriteReadMessage2(t *testing.T) {
phrase := "hello-world"
cases := []proto.Message{
&ResponseCheckTx{
Data: []byte(phrase),
Log: phrase,
GasWanted: 10,
Tags: []cmn.KVPair{
cmn.KVPair{Key: []byte("abc"), Value: []byte("def")},
},
},
// TODO: add the rest
}
for _, c := range cases {
buf := new(bytes.Buffer)
err := WriteMessage(c, buf)
assert.Nil(t, err)
msg := new(ResponseCheckTx)
err = ReadMessage(buf, msg)
assert.Nil(t, err)
assert.Equal(t, c, msg)
}
}

View File

@@ -1,55 +0,0 @@
// +build ignore
package main
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"os/exec"
"regexp"
"strings"
)
// This script replaces most `[]byte` with `data.Bytes` in a `.pb.go` file.
// It was written before we realized we could use `gogo/protobuf` to achieve
// this more natively. So it's here for safe keeping in case we ever need to
// abandon `gogo/protobuf`.
func main() {
bytePattern := regexp.MustCompile("[[][]]byte")
const oldPath = "types/types.pb.go"
const tmpPath = "types/types.pb.new"
content, err := ioutil.ReadFile(oldPath)
if err != nil {
panic("cannot read " + oldPath)
os.Exit(1)
}
lines := bytes.Split(content, []byte("\n"))
outFile, _ := os.Create(tmpPath)
wroteImport := false
for _, line_bytes := range lines {
line := string(line_bytes)
gotPackageLine := strings.HasPrefix(line, "package ")
writeImportTime := strings.HasPrefix(line, "import ")
containsDescriptor := strings.Contains(line, "Descriptor")
containsByteArray := strings.Contains(line, "[]byte")
if containsByteArray && !containsDescriptor {
line = string(bytePattern.ReplaceAll([]byte(line), []byte("data.Bytes")))
}
if writeImportTime && !wroteImport {
wroteImport = true
fmt.Fprintf(outFile, "import \"github.com/tendermint/go-wire/data\"\n")
}
if gotPackageLine {
fmt.Fprintf(outFile, "%s\n", "//nolint: gas")
}
fmt.Fprintf(outFile, "%s\n", line)
}
outFile.Close()
os.Remove(oldPath)
os.Rename(tmpPath, oldPath)
exec.Command("goimports", "-w", oldPath)
}

View File

@@ -1,16 +0,0 @@
package types
const (
PubKeyEd25519 = "ed25519"
)
func Ed25519ValidatorUpdate(pubkey []byte, power int64) ValidatorUpdate {
return ValidatorUpdate{
// Address:
PubKey: PubKey{
Type: PubKeyEd25519,
Data: pubkey,
},
Power: power,
}
}

View File

@@ -1,121 +0,0 @@
package types
import (
"bytes"
"encoding/json"
"github.com/gogo/protobuf/jsonpb"
)
const (
CodeTypeOK uint32 = 0
)
// IsOK returns true if Code is OK.
func (r ResponseCheckTx) IsOK() bool {
return r.Code == CodeTypeOK
}
// IsErr returns true if Code is something other than OK.
func (r ResponseCheckTx) IsErr() bool {
return r.Code != CodeTypeOK
}
// IsOK returns true if Code is OK.
func (r ResponseDeliverTx) IsOK() bool {
return r.Code == CodeTypeOK
}
// IsErr returns true if Code is something other than OK.
func (r ResponseDeliverTx) IsErr() bool {
return r.Code != CodeTypeOK
}
// IsOK returns true if Code is OK.
func (r ResponseQuery) IsOK() bool {
return r.Code == CodeTypeOK
}
// IsErr returns true if Code is something other than OK.
func (r ResponseQuery) IsErr() bool {
return r.Code != CodeTypeOK
}
//---------------------------------------------------------------------------
// override JSON marshalling so we dont emit defaults (ie. disable omitempty)
// note we need Unmarshal functions too because protobuf had the bright idea
// to marshal int64->string. cool. cool, cool, cool: https://developers.google.com/protocol-buffers/docs/proto3#json
var (
jsonpbMarshaller = jsonpb.Marshaler{
EnumsAsInts: true,
EmitDefaults: false,
}
jsonpbUnmarshaller = jsonpb.Unmarshaler{}
)
func (r *ResponseSetOption) MarshalJSON() ([]byte, error) {
s, err := jsonpbMarshaller.MarshalToString(r)
return []byte(s), err
}
func (r *ResponseSetOption) UnmarshalJSON(b []byte) error {
reader := bytes.NewBuffer(b)
return jsonpbUnmarshaller.Unmarshal(reader, r)
}
func (r *ResponseCheckTx) MarshalJSON() ([]byte, error) {
s, err := jsonpbMarshaller.MarshalToString(r)
return []byte(s), err
}
func (r *ResponseCheckTx) UnmarshalJSON(b []byte) error {
reader := bytes.NewBuffer(b)
return jsonpbUnmarshaller.Unmarshal(reader, r)
}
func (r *ResponseDeliverTx) MarshalJSON() ([]byte, error) {
s, err := jsonpbMarshaller.MarshalToString(r)
return []byte(s), err
}
func (r *ResponseDeliverTx) UnmarshalJSON(b []byte) error {
reader := bytes.NewBuffer(b)
return jsonpbUnmarshaller.Unmarshal(reader, r)
}
func (r *ResponseQuery) MarshalJSON() ([]byte, error) {
s, err := jsonpbMarshaller.MarshalToString(r)
return []byte(s), err
}
func (r *ResponseQuery) UnmarshalJSON(b []byte) error {
reader := bytes.NewBuffer(b)
return jsonpbUnmarshaller.Unmarshal(reader, r)
}
func (r *ResponseCommit) MarshalJSON() ([]byte, error) {
s, err := jsonpbMarshaller.MarshalToString(r)
return []byte(s), err
}
func (r *ResponseCommit) UnmarshalJSON(b []byte) error {
reader := bytes.NewBuffer(b)
return jsonpbUnmarshaller.Unmarshal(reader, r)
}
// Some compile time assertions to ensure we don't
// have accidental runtime surprises later on.
// jsonEncodingRoundTripper ensures that asserted
// interfaces implement both MarshalJSON and UnmarshalJSON
type jsonRoundTripper interface {
json.Marshaler
json.Unmarshaler
}
var _ jsonRoundTripper = (*ResponseCommit)(nil)
var _ jsonRoundTripper = (*ResponseQuery)(nil)
var _ jsonRoundTripper = (*ResponseDeliverTx)(nil)
var _ jsonRoundTripper = (*ResponseCheckTx)(nil)
var _ jsonRoundTripper = (*ResponseSetOption)(nil)

File diff suppressed because it is too large Load Diff

View File

@@ -1,333 +0,0 @@
syntax = "proto3";
package types;
// For more information on gogo.proto, see:
// https://github.com/gogo/protobuf/blob/master/extensions.md
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto";
import "github.com/tendermint/tendermint/libs/common/types.proto";
import "github.com/tendermint/tendermint/crypto/merkle/merkle.proto";
// This file is copied from http://github.com/tendermint/abci
// NOTE: When using custom types, mind the warnings.
// https://github.com/gogo/protobuf/blob/master/custom_types.md#warnings-and-issues
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
option (gogoproto.sizer_all) = true;
option (gogoproto.goproto_registration) = true;
// Generate tests
option (gogoproto.populate_all) = true;
option (gogoproto.equal_all) = true;
option (gogoproto.testgen_all) = true;
//----------------------------------------
// Request types
message Request {
oneof value {
RequestEcho echo = 2;
RequestFlush flush = 3;
RequestInfo info = 4;
RequestSetOption set_option = 5;
RequestInitChain init_chain = 6;
RequestQuery query = 7;
RequestBeginBlock begin_block = 8;
RequestCheckTx check_tx = 9;
RequestDeliverTx deliver_tx = 19;
RequestEndBlock end_block = 11;
RequestCommit commit = 12;
}
}
message RequestEcho {
string message = 1;
}
message RequestFlush {
}
message RequestInfo {
string version = 1;
uint64 block_version = 2;
uint64 p2p_version = 3;
}
// nondeterministic
message RequestSetOption {
string key = 1;
string value = 2;
}
message RequestInitChain {
google.protobuf.Timestamp time = 1 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true];
string chain_id = 2;
ConsensusParams consensus_params = 3;
repeated ValidatorUpdate validators = 4 [(gogoproto.nullable)=false];
bytes app_state_bytes = 5;
}
message RequestQuery {
bytes data = 1;
string path = 2;
int64 height = 3;
bool prove = 4;
}
message RequestBeginBlock {
bytes hash = 1;
Header header = 2 [(gogoproto.nullable)=false];
LastCommitInfo last_commit_info = 3 [(gogoproto.nullable)=false];
repeated Evidence byzantine_validators = 4 [(gogoproto.nullable)=false];
}
message RequestCheckTx {
bytes tx = 1;
}
message RequestDeliverTx {
bytes tx = 1;
}
message RequestEndBlock {
int64 height = 1;
}
message RequestCommit {
}
//----------------------------------------
// Response types
message Response {
oneof value {
ResponseException exception = 1;
ResponseEcho echo = 2;
ResponseFlush flush = 3;
ResponseInfo info = 4;
ResponseSetOption set_option = 5;
ResponseInitChain init_chain = 6;
ResponseQuery query = 7;
ResponseBeginBlock begin_block = 8;
ResponseCheckTx check_tx = 9;
ResponseDeliverTx deliver_tx = 10;
ResponseEndBlock end_block = 11;
ResponseCommit commit = 12;
}
}
// nondeterministic
message ResponseException {
string error = 1;
}
message ResponseEcho {
string message = 1;
}
message ResponseFlush {
}
message ResponseInfo {
string data = 1;
string version = 2;
uint64 app_version = 3;
int64 last_block_height = 4;
bytes last_block_app_hash = 5;
}
// nondeterministic
message ResponseSetOption {
uint32 code = 1;
// bytes data = 2;
string log = 3;
string info = 4;
}
message ResponseInitChain {
ConsensusParams consensus_params = 1;
repeated ValidatorUpdate validators = 2 [(gogoproto.nullable)=false];
}
message ResponseQuery {
uint32 code = 1;
// bytes data = 2; // use "value" instead.
string log = 3; // nondeterministic
string info = 4; // nondeterministic
int64 index = 5;
bytes key = 6;
bytes value = 7;
merkle.Proof proof = 8;
int64 height = 9;
string codespace = 10;
}
message ResponseBeginBlock {
repeated common.KVPair tags = 1 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
}
message ResponseCheckTx {
uint32 code = 1;
bytes data = 2;
string log = 3; // nondeterministic
string info = 4; // nondeterministic
int64 gas_wanted = 5;
int64 gas_used = 6;
repeated common.KVPair tags = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
string codespace = 8;
}
message ResponseDeliverTx {
uint32 code = 1;
bytes data = 2;
string log = 3; // nondeterministic
string info = 4; // nondeterministic
int64 gas_wanted = 5;
int64 gas_used = 6;
repeated common.KVPair tags = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
string codespace = 8;
}
message ResponseEndBlock {
repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable)=false];
ConsensusParams consensus_param_updates = 2;
repeated common.KVPair tags = 3 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
}
message ResponseCommit {
// reserve 1
bytes data = 2;
}
//----------------------------------------
// Misc.
// ConsensusParams contains all consensus-relevant parameters
// that can be adjusted by the abci app
message ConsensusParams {
BlockSizeParams block_size = 1;
EvidenceParams evidence = 2;
ValidatorParams validator = 3;
}
// BlockSize contains limits on the block size.
message BlockSizeParams {
// Note: must be greater than 0
int64 max_bytes = 1;
// Note: must be greater or equal to -1
int64 max_gas = 2;
}
// EvidenceParams contains limits on the evidence.
message EvidenceParams {
// Note: must be greater than 0
int64 max_age = 1;
}
// ValidatorParams contains limits on validators.
message ValidatorParams {
repeated string pub_key_types = 1;
}
message LastCommitInfo {
int32 round = 1;
repeated VoteInfo votes = 2 [(gogoproto.nullable)=false];
}
//----------------------------------------
// Blockchain Types
message Header {
// basic block info
Version version = 1 [(gogoproto.nullable)=false];
string chain_id = 2 [(gogoproto.customname)="ChainID"];
int64 height = 3;
google.protobuf.Timestamp time = 4 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true];
int64 num_txs = 5;
int64 total_txs = 6;
// prev block info
BlockID last_block_id = 7 [(gogoproto.nullable)=false];
// hashes of block data
bytes last_commit_hash = 8; // commit from validators from the last block
bytes data_hash = 9; // transactions
// hashes from the app output from the prev block
bytes validators_hash = 10; // validators for the current block
bytes next_validators_hash = 11; // validators for the next block
bytes consensus_hash = 12; // consensus params for current block
bytes app_hash = 13; // state after txs from the previous block
bytes last_results_hash = 14;// root hash of all results from the txs from the previous block
// consensus info
bytes evidence_hash = 15; // evidence included in the block
bytes proposer_address = 16; // original proposer of the block
}
message Version {
uint64 Block = 1;
uint64 App = 2;
}
message BlockID {
bytes hash = 1;
PartSetHeader parts_header = 2 [(gogoproto.nullable)=false];
}
message PartSetHeader {
int32 total = 1;
bytes hash = 2;
}
// Validator
message Validator {
bytes address = 1;
//PubKey pub_key = 2 [(gogoproto.nullable)=false];
int64 power = 3;
}
// ValidatorUpdate
message ValidatorUpdate {
PubKey pub_key = 1 [(gogoproto.nullable)=false];
int64 power = 2;
}
// VoteInfo
message VoteInfo {
Validator validator = 1 [(gogoproto.nullable)=false];
bool signed_last_block = 2;
}
message PubKey {
string type = 1;
bytes data = 2;
}
message Evidence {
string type = 1;
Validator validator = 2 [(gogoproto.nullable)=false];
int64 height = 3;
google.protobuf.Timestamp time = 4 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true];
int64 total_voting_power = 5;
}
//----------------------------------------
// Service Definition
service ABCIApplication {
rpc Echo(RequestEcho) returns (ResponseEcho) ;
rpc Flush(RequestFlush) returns (ResponseFlush);
rpc Info(RequestInfo) returns (ResponseInfo);
rpc SetOption(RequestSetOption) returns (ResponseSetOption);
rpc DeliverTx(RequestDeliverTx) returns (ResponseDeliverTx);
rpc CheckTx(RequestCheckTx) returns (ResponseCheckTx);
rpc Query(RequestQuery) returns (ResponseQuery);
rpc Commit(RequestCommit) returns (ResponseCommit);
rpc InitChain(RequestInitChain) returns (ResponseInitChain);
rpc BeginBlock(RequestBeginBlock) returns (ResponseBeginBlock);
rpc EndBlock(RequestEndBlock) returns (ResponseEndBlock);
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,34 +0,0 @@
package types
import (
"bytes"
"sort"
)
//------------------------------------------------------------------------------
// ValidatorUpdates is a list of validators that implements the Sort interface
type ValidatorUpdates []ValidatorUpdate
var _ sort.Interface = (ValidatorUpdates)(nil)
// All these methods for ValidatorUpdates:
// Len, Less and Swap
// are for ValidatorUpdates to implement sort.Interface
// which will be used by the sort package.
// See Issue https://github.com/tendermint/abci/issues/212
func (v ValidatorUpdates) Len() int {
return len(v)
}
// XXX: doesn't distinguish same validator with different power
func (v ValidatorUpdates) Less(i, j int) bool {
return bytes.Compare(v[i].PubKey.Data, v[j].PubKey.Data) <= 0
}
func (v ValidatorUpdates) Swap(i, j int) {
v1 := v[i]
v[i] = v[j]
v[j] = v1
}

View File

@@ -1,9 +0,0 @@
package version
import (
"github.com/tendermint/tendermint/version"
)
// TODO: eliminate this after some version refactor
const Version = version.ABCIVersion

View File

@@ -4,36 +4,28 @@ import (
"testing"
"time"
"github.com/tendermint/go-amino"
amino "github.com/tendermint/go-amino"
"github.com/tendermint/go-crypto"
proto "github.com/tendermint/tendermint/benchmarks/proto"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/p2p"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
)
func testNodeInfo(id p2p.ID) p2p.DefaultNodeInfo {
return p2p.DefaultNodeInfo{
ProtocolVersion: p2p.ProtocolVersion{1, 2, 3},
ID_: id,
Moniker: "SOMENAME",
Network: "SOMENAME",
ListenAddr: "SOMEADDR",
Version: "SOMEVER",
Other: p2p.DefaultNodeInfoOther{
TxIndex: "on",
RPCAddress: "0.0.0.0:26657",
},
}
}
func BenchmarkEncodeStatusWire(b *testing.B) {
b.StopTimer()
cdc := amino.NewCodec()
ctypes.RegisterAmino(cdc)
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
status := &ctypes.ResultStatus{
NodeInfo: testNodeInfo(nodeKey.ID()),
NodeInfo: p2p.NodeInfo{
ID: nodeKey.ID(),
Moniker: "SOMENAME",
Network: "SOMENAME",
ListenAddr: "SOMEADDR",
Version: "SOMEVER",
Other: []string{"SOMESTRING", "OTHERSTRING"},
},
SyncInfo: ctypes.SyncInfo{
LatestBlockHash: []byte("SOMEBYTES"),
LatestBlockHeight: 123,
@@ -60,8 +52,15 @@ func BenchmarkEncodeNodeInfoWire(b *testing.B) {
b.StopTimer()
cdc := amino.NewCodec()
ctypes.RegisterAmino(cdc)
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
nodeInfo := testNodeInfo(nodeKey.ID())
nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
nodeInfo := p2p.NodeInfo{
ID: nodeKey.ID(),
Moniker: "SOMENAME",
Network: "SOMENAME",
ListenAddr: "SOMEADDR",
Version: "SOMEVER",
Other: []string{"SOMESTRING", "OTHERSTRING"},
}
b.StartTimer()
counter := 0
@@ -78,8 +77,15 @@ func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
b.StopTimer()
cdc := amino.NewCodec()
ctypes.RegisterAmino(cdc)
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
nodeInfo := testNodeInfo(nodeKey.ID())
nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
nodeInfo := p2p.NodeInfo{
ID: nodeKey.ID(),
Moniker: "SOMENAME",
Network: "SOMENAME",
ListenAddr: "SOMEADDR",
Version: "SOMEVER",
Other: []string{"SOMESTRING", "OTHERSTRING"},
}
b.StartTimer()
counter := 0
@@ -92,7 +98,7 @@ func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
func BenchmarkEncodeNodeInfoProto(b *testing.B) {
b.StopTimer()
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
nodeID := string(nodeKey.ID())
someName := "SOMENAME"
someAddr := "SOMEADDR"

View File

@@ -0,0 +1,26 @@
DIST_DIRS := find * -type d -exec
VERSION := $(shell perl -ne '/^var version.*"([^"]+)".*$$/ && print "v$$1\n"' main.go)
GOTOOLS = \
github.com/mitchellh/gox
tools:
go get $(GOTOOLS)
get_vendor_deps:
@hash glide 2>/dev/null || go get github.com/Masterminds/glide
glide install
build:
go build
install:
go install
test:
go test -race
clean:
rm -f ./experiments
rm -rf ./dist
.PHONY: tools get_vendor_deps build install test clean

View File

@@ -0,0 +1,12 @@
package: github.com/tendermint/tendermint/benchmarks/experiments
import:
- package: github.com/tendermint/tendermint
version: v0.16.0
subpackages:
- rpc/client
- rpc/lib/types
- types
- package: github.com/tendermint/tmlibs
version: v0.7.0
subpackages:
- log

View File

@@ -0,0 +1,126 @@
package main
import (
"encoding/binary"
"fmt"
"math/rand"
"os"
"sync"
"time"
"context"
"github.com/tendermint/tendermint/rpc/client"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tmlibs/log"
)
var logger = log.NewNopLogger()
var finishedTasks = 0
var mutex = &sync.Mutex{}
func main() {
var endpoint = "tcp://0.0.0.0:46657"
var httpClient = getHTTPClient(endpoint)
var res, err = httpClient.Status()
if err != nil {
logger.Info("something wrong happens", err)
}
logger.Info("received status", res)
go monitorTask(endpoint)
txCount := 10
var clientNumber = 10
for i := 0; i < clientNumber; i++ {
go clientTask(i, txCount, endpoint)
}
for finishedTasks < clientNumber+1 {
}
fmt.Printf("Done: %d\n", finishedTasks)
}
func clientTask(id, txCount int, endpoint string) {
var httpClient = getHTTPClient(endpoint)
for i := 0; i < txCount; i++ {
var _, err = httpClient.BroadcastTxSync(generateTx(id, rand.Int()))
if err != nil {
fmt.Printf("Something wrong happened: %s\n", err)
}
}
fmt.Printf("Finished client task: %d\n", id)
mutex.Lock()
finishedTasks++
mutex.Unlock()
}
func getHTTPClient(rpcAddr string) *client.HTTP {
return client.NewHTTP(rpcAddr, "/websocket")
}
func generateTx(i, valI int) []byte {
// a tx encodes the validator index, the tx number, and some random junk
tx := make([]byte, 250)
binary.PutUvarint(tx[:32], uint64(valI))
binary.PutUvarint(tx[32:64], uint64(i))
if _, err := rand.Read(tx[65:]); err != nil {
fmt.Println("err reading from crypto/rand", err)
os.Exit(1)
}
return tx
}
func monitorTask(endpoint string) {
fmt.Println("Monitor task started...")
var duration = 5 * time.Second
const subscriber = "monitor"
ctx, cancel := context.WithTimeout(context.Background(), duration)
defer cancel()
evts := make(chan interface{})
var httpClient = getHTTPClient(endpoint)
httpClient.Start()
evtTyp := types.EventNewBlockHeader
// register for the next event of this type
query := types.QueryForEvent(evtTyp)
err := httpClient.Subscribe(ctx, subscriber, query, evts)
if err != nil {
fmt.Println("error when subscribing", err)
}
// make sure to unregister after the test is over
defer httpClient.UnsubscribeAll(ctx, subscriber)
totalNumOfCommittedTxs := int64(0)
for {
fmt.Println("Starting main loop", err)
select {
case evt := <-evts:
event := evt.(types.TMEventData)
header, ok := event.Unwrap().(types.EventDataNewBlockHeader)
if ok {
fmt.Println("received header\n", header.Header.StringIndented(""))
} else {
fmt.Println("not able to unwrap header")
}
// Do some metric computation with header
totalNumOfCommittedTxs += header.Header.NumTxs
case <-ctx.Done():
fmt.Printf("Finished monitor task. Received %d transactions \n", totalNumOfCommittedTxs)
mutex.Lock()
finishedTasks++
mutex.Unlock()
return
}
}
}

View File

@@ -3,7 +3,7 @@ package benchmarks
import (
"testing"
cmn "github.com/tendermint/tendermint/libs/common"
cmn "github.com/tendermint/tmlibs/common"
)
func BenchmarkSomething(b *testing.B) {

View File

@@ -4,7 +4,7 @@ import (
"os"
"testing"
cmn "github.com/tendermint/tendermint/libs/common"
cmn "github.com/tendermint/tmlibs/common"
)
func BenchmarkFileWrite(b *testing.B) {

View File

@@ -6,12 +6,12 @@ import (
"fmt"
"time"
cmn "github.com/tendermint/tendermint/libs/common"
rpcclient "github.com/tendermint/tendermint/rpc/lib/client"
cmn "github.com/tendermint/tmlibs/common"
)
func main() {
wsc := rpcclient.NewWSClient("127.0.0.1:26657", "/websocket")
wsc := rpcclient.NewWSClient("127.0.0.1:46657", "/websocket")
err := wsc.Start()
if err != nil {
cmn.Exit(err.Error())

View File

@@ -5,12 +5,11 @@ import (
"fmt"
"math"
"sync"
"sync/atomic"
"time"
cmn "github.com/tendermint/tendermint/libs/common"
flow "github.com/tendermint/tendermint/libs/flowrate"
"github.com/tendermint/tendermint/libs/log"
cmn "github.com/tendermint/tmlibs/common"
flow "github.com/tendermint/tmlibs/flowrate"
"github.com/tendermint/tmlibs/log"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
@@ -29,10 +28,10 @@ eg, L = latency = 0.1s
*/
const (
requestIntervalMS = 2
maxTotalRequesters = 600
requestIntervalMS = 100
maxTotalRequesters = 1000
maxPendingRequests = maxTotalRequesters
maxPendingRequestsPerPeer = 20
maxPendingRequestsPerPeer = 50
// Minimum recv rate to ensure we're receiving blocks from a peer fast
// enough. If a peer is not sending us data at at least that rate, we
@@ -67,13 +66,11 @@ type BlockPool struct {
// block requests
requesters map[int64]*bpRequester
height int64 // the lowest key in requesters.
numPending int32 // number of requests pending assignment or block response
// peers
peers map[p2p.ID]*bpPeer
maxPeerHeight int64
// atomic
numPending int32 // number of requests pending assignment or block response
requestsCh chan<- BlockRequest
errorsCh chan<- peerError
}
@@ -154,7 +151,7 @@ func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequester
pool.mtx.Lock()
defer pool.mtx.Unlock()
return pool.height, atomic.LoadInt32(&pool.numPending), len(pool.requesters)
return pool.height, pool.numPending, len(pool.requesters)
}
// TODO: relax conditions, prevent abuse.
@@ -168,12 +165,9 @@ func (pool *BlockPool) IsCaughtUp() bool {
return false
}
// Some conditions to determine if we're caught up.
// Ensures we've either received a block or waited some amount of time,
// and that we're synced to the highest known height. Note we use maxPeerHeight - 1
// because to sync block H requires block H+1 to verify the LastCommit.
receivedBlockOrTimedOut := pool.height > 0 || time.Since(pool.startTime) > 5*time.Second
ourChainIsLongestAmongPeers := pool.maxPeerHeight == 0 || pool.height >= (pool.maxPeerHeight-1)
// some conditions to determine if we're caught up
receivedBlockOrTimedOut := (pool.height > 0 || time.Since(pool.startTime) > 5*time.Second)
ourChainIsLongestAmongPeers := pool.maxPeerHeight == 0 || pool.height >= pool.maxPeerHeight
isCaughtUp := receivedBlockOrTimedOut && ourChainIsLongestAmongPeers
return isCaughtUp
}
@@ -222,12 +216,14 @@ func (pool *BlockPool) RedoRequest(height int64) p2p.ID {
defer pool.mtx.Unlock()
request := pool.requesters[height]
peerID := request.getPeerID()
if peerID != p2p.ID("") {
// RemovePeer will redo all requesters associated with this peer.
pool.removePeer(peerID)
if request.block == nil {
panic("Expected block to be non-nil")
}
return peerID
// RemovePeer will redo all requesters associated with this peer.
pool.removePeer(request.peerID)
return request.peerID
}
// TODO: ensure that blocks come in order for each peer.
@@ -249,14 +245,13 @@ func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int
}
if requester.setBlock(block, peerID) {
atomic.AddInt32(&pool.numPending, -1)
pool.numPending--
peer := pool.peers[peerID]
if peer != nil {
peer.decrPending(blockSize)
}
} else {
pool.Logger.Info("invalid peer", "peer", peerID, "blockHeight", block.Height)
pool.sendError(errors.New("invalid peer"), peerID)
// Bad peer?
}
}
@@ -296,7 +291,10 @@ func (pool *BlockPool) RemovePeer(peerID p2p.ID) {
func (pool *BlockPool) removePeer(peerID p2p.ID) {
for _, requester := range pool.requesters {
if requester.getPeerID() == peerID {
requester.redo(peerID)
if requester.getBlock() != nil {
pool.numPending++
}
go requester.redo() // pick another peer and ...
}
}
delete(pool.peers, peerID)
@@ -330,14 +328,11 @@ func (pool *BlockPool) makeNextRequester() {
defer pool.mtx.Unlock()
nextHeight := pool.height + pool.requestersLen()
if nextHeight > pool.maxPeerHeight {
return
}
request := newBPRequester(pool, nextHeight)
// request.SetLogger(pool.Logger.With("height", nextHeight))
pool.requesters[nextHeight] = request
atomic.AddInt32(&pool.numPending, 1)
pool.numPending++
err := request.Start()
if err != nil {
@@ -365,17 +360,17 @@ func (pool *BlockPool) sendError(err error, peerID p2p.ID) {
// unused by tendermint; left for debugging purposes
func (pool *BlockPool) debug() string {
pool.mtx.Lock()
pool.mtx.Lock() // Lock
defer pool.mtx.Unlock()
str := ""
nextHeight := pool.height + pool.requestersLen()
for h := pool.height; h < nextHeight; h++ {
if pool.requesters[h] == nil {
str += fmt.Sprintf("H(%v):X ", h)
str += cmn.Fmt("H(%v):X ", h)
} else {
str += fmt.Sprintf("H(%v):", h)
str += fmt.Sprintf("B?(%v) ", pool.requesters[h].block != nil)
str += cmn.Fmt("H(%v):", h)
str += cmn.Fmt("B?(%v) ", pool.requesters[h].block != nil)
}
}
return str
@@ -460,7 +455,7 @@ type bpRequester struct {
pool *BlockPool
height int64
gotBlockCh chan struct{}
redoCh chan p2p.ID //redo may send multitime, add peerId to identify repeat
redoCh chan struct{}
mtx sync.Mutex
peerID p2p.ID
@@ -471,8 +466,8 @@ func newBPRequester(pool *BlockPool, height int64) *bpRequester {
bpr := &bpRequester{
pool: pool,
height: height,
gotBlockCh: make(chan struct{}, 1),
redoCh: make(chan p2p.ID, 1),
gotBlockCh: make(chan struct{}),
redoCh: make(chan struct{}),
peerID: "",
block: nil,
@@ -486,7 +481,7 @@ func (bpr *bpRequester) OnStart() error {
return nil
}
// Returns true if the peer matches and block doesn't already exist.
// Returns true if the peer matches
func (bpr *bpRequester) setBlock(block *types.Block, peerID p2p.ID) bool {
bpr.mtx.Lock()
if bpr.block != nil || bpr.peerID != peerID {
@@ -496,10 +491,7 @@ func (bpr *bpRequester) setBlock(block *types.Block, peerID p2p.ID) bool {
bpr.block = block
bpr.mtx.Unlock()
select {
case bpr.gotBlockCh <- struct{}{}:
default:
}
bpr.gotBlockCh <- struct{}{}
return true
}
@@ -515,27 +507,17 @@ func (bpr *bpRequester) getPeerID() p2p.ID {
return bpr.peerID
}
// This is called from the requestRoutine, upon redo().
func (bpr *bpRequester) reset() {
bpr.mtx.Lock()
defer bpr.mtx.Unlock()
if bpr.block != nil {
atomic.AddInt32(&bpr.pool.numPending, 1)
}
bpr.peerID = ""
bpr.block = nil
bpr.mtx.Unlock()
}
// Tells bpRequester to pick another peer and try again.
// NOTE: Nonblocking, and does nothing if another redo
// was already requested.
func (bpr *bpRequester) redo(peerId p2p.ID) {
select {
case bpr.redoCh <- peerId:
default:
}
// NOTE: blocking
func (bpr *bpRequester) redo() {
bpr.redoCh <- struct{}{}
}
// Responsible for making more requests as necessary
@@ -564,25 +546,26 @@ OUTER_LOOP:
// Send request and wait.
bpr.pool.sendRequest(bpr.height, peer.id)
WAIT_LOOP:
for {
select {
case <-bpr.pool.Quit():
bpr.Stop()
return
case <-bpr.Quit():
return
case <-bpr.redoCh:
bpr.reset()
continue OUTER_LOOP // When peer is removed
case <-bpr.gotBlockCh:
// We got the block, now see if it's good.
select {
case <-bpr.pool.Quit():
bpr.Stop()
return
case <-bpr.Quit():
return
case peerID := <-bpr.redoCh:
if peerID == bpr.peerID {
bpr.reset()
continue OUTER_LOOP
} else {
continue WAIT_LOOP
}
case <-bpr.gotBlockCh:
// We got a block!
// Continue the for-loop and wait til Quit.
continue WAIT_LOOP
case <-bpr.redoCh:
bpr.reset()
continue OUTER_LOOP
}
}
}

View File

@@ -1,11 +1,12 @@
package blockchain
import (
"math/rand"
"testing"
"time"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/log"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
@@ -16,52 +17,16 @@ func init() {
}
type testPeer struct {
id p2p.ID
height int64
inputChan chan inputData //make sure each peer's data is sequential
id p2p.ID
height int64
}
type inputData struct {
t *testing.T
pool *BlockPool
request BlockRequest
}
func (p testPeer) runInputRoutine() {
go func() {
for input := range p.inputChan {
p.simulateInput(input)
}
}()
}
// Request desired, pretend like we got the block immediately.
func (p testPeer) simulateInput(input inputData) {
block := &types.Block{Header: types.Header{Height: input.request.Height}}
input.pool.AddBlock(input.request.PeerID, block, 123)
input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height)
}
type testPeers map[p2p.ID]testPeer
func (ps testPeers) start() {
for _, v := range ps {
v.runInputRoutine()
}
}
func (ps testPeers) stop() {
for _, v := range ps {
close(v.inputChan)
}
}
func makePeers(numPeers int, minHeight, maxHeight int64) testPeers {
peers := make(testPeers, numPeers)
func makePeers(numPeers int, minHeight, maxHeight int64) map[p2p.ID]testPeer {
peers := make(map[p2p.ID]testPeer, numPeers)
for i := 0; i < numPeers; i++ {
peerID := p2p.ID(cmn.RandStr(12))
height := minHeight + cmn.RandInt63n(maxHeight-minHeight)
peers[peerID] = testPeer{peerID, height, make(chan inputData, 10)}
height := minHeight + rand.Int63n(maxHeight-minHeight)
peers[peerID] = testPeer{peerID, height}
}
return peers
}
@@ -81,9 +46,6 @@ func TestBasic(t *testing.T) {
defer pool.Stop()
peers.start()
defer peers.stop()
// Introduce each peer.
go func() {
for _, peer := range peers {
@@ -116,8 +78,12 @@ func TestBasic(t *testing.T) {
if request.Height == 300 {
return // Done!
}
peers[request.PeerID].inputChan <- inputData{t, pool, request}
// Request desired, pretend like we got the block immediately.
go func() {
block := &types.Block{Header: &types.Header{Height: request.Height}}
pool.AddBlock(request.PeerID, block, 123)
t.Logf("Added block from peer %v (height: %v)", request.PeerID, request.Height)
}()
}
}
}

View File

@@ -1,26 +1,23 @@
package blockchain
import (
"errors"
"fmt"
"reflect"
"time"
amino "github.com/tendermint/go-amino"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/p2p"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
)
const (
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
BlockchainChannel = byte(0x40)
trySyncIntervalMS = 10
trySyncIntervalMS = 50
// stop syncing when last block's time is
// within this much of the system time.
// stopSyncingDurationMinutes = 10
@@ -78,9 +75,8 @@ func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *Bl
store.Height()))
}
requestsCh := make(chan BlockRequest, maxTotalRequesters)
const capacity = 1000 // must be bigger than peers count
const capacity = 1000 // must be bigger than peers count
requestsCh := make(chan BlockRequest, capacity)
errorsCh := make(chan peerError, capacity) // so we don't block in #Receive#pool.AddBlock
pool := NewBlockPool(
@@ -110,6 +106,9 @@ func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
// OnStart implements cmn.Service.
func (bcR *BlockchainReactor) OnStart() error {
if err := bcR.BaseReactor.OnStart(); err != nil {
return err
}
if bcR.fastSync {
err := bcR.pool.Start()
if err != nil {
@@ -122,6 +121,7 @@ func (bcR *BlockchainReactor) OnStart() error {
// OnStop implements cmn.Service.
func (bcR *BlockchainReactor) OnStop() {
bcR.BaseReactor.OnStop()
bcR.pool.Stop()
}
@@ -174,19 +174,13 @@ func (bcR *BlockchainReactor) respondToPeer(msg *bcBlockRequestMessage,
// Receive implements Reactor by handling 4 types of messages (look below).
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
msg, err := decodeMsg(msgBytes)
msg, err := DecodeMessage(msgBytes)
if err != nil {
bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
bcR.Switch.StopPeerForError(src, err)
return
}
if err = msg.ValidateBasic(); err != nil {
bcR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
bcR.Switch.StopPeerForError(src, err)
return
}
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
switch msg := msg.(type) {
@@ -195,6 +189,7 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
// Unfortunately not queued since the queue is full.
}
case *bcBlockResponseMessage:
// Got a block.
bcR.pool.AddBlock(src.ID(), msg.Block, len(msgBytes))
case *bcStatusRequestMessage:
// Send peer our state.
@@ -207,12 +202,13 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
// Got a peer status. Unverified.
bcR.pool.SetPeerHeight(src.ID(), msg.Height)
default:
bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
bcR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
}
}
// Handle messages from the poolReactor telling the reactor what to do.
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
// (Except for the SYNC_LOOP, which is the primary purpose and must be synchronous.)
func (bcR *BlockchainReactor) poolRoutine() {
trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
@@ -227,8 +223,6 @@ func (bcR *BlockchainReactor) poolRoutine() {
lastHundred := time.Now()
lastRate := 0.0
didProcessCh := make(chan struct{}, 1)
FOR_LOOP:
for {
select {
@@ -244,17 +238,14 @@ FOR_LOOP:
// The pool handles timeouts, just let it go.
continue FOR_LOOP
}
case err := <-bcR.errorsCh:
peer := bcR.Switch.Peers().Get(err.peerID)
if peer != nil {
bcR.Switch.StopPeerForError(peer, err)
}
case <-statusUpdateTicker.C:
// ask for status updates
go bcR.BroadcastStatusRequest() // nolint: errcheck
case <-switchToConsensusTicker.C:
height, numPending, lenRequesters := bcR.pool.GetStatus()
outbound, inbound, _ := bcR.Switch.NumPeers()
@@ -264,94 +255,65 @@ FOR_LOOP:
bcR.Logger.Info("Time to switch to consensus reactor!", "height", height)
bcR.pool.Stop()
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
if ok {
conR.SwitchToConsensus(state, blocksSynced)
} else {
// should only happen during testing
}
conR := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
conR.SwitchToConsensus(state, blocksSynced)
break FOR_LOOP
}
case <-trySyncTicker.C: // chan time
select {
case didProcessCh <- struct{}{}:
default:
}
case <-didProcessCh:
// NOTE: It is a subtle mistake to process more than a single block
// at a time (e.g. 10) here, because we only TrySend 1 request per
// loop. The ratio mismatch can result in starving of blocks, a
// sudden burst of requests and responses, and repeat.
// Consequently, it is better to split these routines rather than
// coupling them as it's written here. TODO uncouple from request
// routine.
// See if there are any blocks to sync.
first, second := bcR.pool.PeekTwoBlocks()
//bcR.Logger.Info("TrySync peeked", "first", first, "second", second)
if first == nil || second == nil {
// We need both to sync the first block.
continue FOR_LOOP
} else {
// Try again quickly next loop.
didProcessCh <- struct{}{}
}
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
firstPartsHeader := firstParts.Header()
firstID := types.BlockID{first.Hash(), firstPartsHeader}
// Finally, verify the first block using the second's commit
// NOTE: we can probably make this more efficient, but note that calling
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
// currently necessary.
err := state.Validators.VerifyCommit(
chainID, firstID, first.Height, second.LastCommit)
if err != nil {
bcR.Logger.Error("Error in validation", "err", err)
peerID := bcR.pool.RedoRequest(first.Height)
peer := bcR.Switch.Peers().Get(peerID)
if peer != nil {
// NOTE: we've already removed the peer's request, but we
// still need to clean up the rest.
bcR.Switch.StopPeerForError(peer, fmt.Errorf("BlockchainReactor validation error: %v", err))
// This loop can be slow as long as it's doing syncing work.
SYNC_LOOP:
for i := 0; i < 10; i++ {
// See if there are any blocks to sync.
first, second := bcR.pool.PeekTwoBlocks()
//bcR.Logger.Info("TrySync peeked", "first", first, "second", second)
if first == nil || second == nil {
// We need both to sync the first block.
break SYNC_LOOP
}
peerID2 := bcR.pool.RedoRequest(second.Height)
peer2 := bcR.Switch.Peers().Get(peerID2)
if peer2 != nil && peer2 != peer {
// NOTE: we've already removed the peer's request, but we
// still need to clean up the rest.
bcR.Switch.StopPeerForError(peer2, fmt.Errorf("BlockchainReactor validation error: %v", err))
}
continue FOR_LOOP
} else {
bcR.pool.PopRequest()
// TODO: batch saves so we dont persist to disk every block
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
// TODO: same thing for app - but we would need a way to
// get the hash without persisting the state
var err error
state, err = bcR.blockExec.ApplyBlock(state, firstID, first)
firstParts := first.MakePartSet(state.ConsensusParams.BlockPartSizeBytes)
firstPartsHeader := firstParts.Header()
firstID := types.BlockID{first.Hash(), firstPartsHeader}
// Finally, verify the first block using the second's commit
// NOTE: we can probably make this more efficient, but note that calling
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
// currently necessary.
err := state.Validators.VerifyCommit(
chainID, firstID, first.Height, second.LastCommit)
if err != nil {
// TODO This is bad, are we zombie?
cmn.PanicQ(fmt.Sprintf("Failed to process committed block (%d:%X): %v",
first.Height, first.Hash(), err))
}
blocksSynced++
bcR.Logger.Error("Error in validation", "err", err)
peerID := bcR.pool.RedoRequest(first.Height)
peer := bcR.Switch.Peers().Get(peerID)
if peer != nil {
bcR.Switch.StopPeerForError(peer, fmt.Errorf("BlockchainReactor validation error: %v", err))
}
break SYNC_LOOP
} else {
bcR.pool.PopRequest()
if blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
"max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate)
lastHundred = time.Now()
// TODO: batch saves so we dont persist to disk every block
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
// TODO: same thing for app - but we would need a way to
// get the hash without persisting the state
var err error
state, err = bcR.blockExec.ApplyBlock(state, firstID, first)
if err != nil {
// TODO This is bad, are we zombie?
cmn.PanicQ(cmn.Fmt("Failed to process committed block (%d:%X): %v",
first.Height, first.Hash(), err))
}
blocksSynced++
if blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
"max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate)
lastHundred = time.Now()
}
}
}
continue FOR_LOOP
case <-bcR.Quit():
break FOR_LOOP
}
@@ -369,24 +331,28 @@ func (bcR *BlockchainReactor) BroadcastStatusRequest() error {
// Messages
// BlockchainMessage is a generic message for this reactor.
type BlockchainMessage interface {
ValidateBasic() error
}
type BlockchainMessage interface{}
func RegisterBlockchainMessages(cdc *amino.Codec) {
cdc.RegisterInterface((*BlockchainMessage)(nil), nil)
cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/blockchain/BlockRequest", nil)
cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/blockchain/BlockResponse", nil)
cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/blockchain/NoBlockResponse", nil)
cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/blockchain/StatusResponse", nil)
cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/blockchain/StatusRequest", nil)
cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/mempool/BlockRequest", nil)
cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/mempool/BlockResponse", nil)
cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/mempool/NoBlockResponse", nil)
cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/mempool/StatusResponse", nil)
cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/mempool/StatusRequest", nil)
}
func decodeMsg(bz []byte) (msg BlockchainMessage, err error) {
// DecodeMessage decodes BlockchainMessage.
// TODO: ensure that bz is completely read.
func DecodeMessage(bz []byte) (msg BlockchainMessage, err error) {
if len(bz) > maxMsgSize {
return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
len(bz), maxMsgSize)
}
err = cdc.UnmarshalBinaryBare(bz, &msg)
if err != nil {
err = cmn.ErrorWrap(err, "DecodeMessage() had bytes left over")
}
return
}
@@ -396,32 +362,16 @@ type bcBlockRequestMessage struct {
Height int64
}
// ValidateBasic performs basic validation.
func (m *bcBlockRequestMessage) ValidateBasic() error {
if m.Height < 0 {
return errors.New("Negative Height")
}
return nil
}
func (m *bcBlockRequestMessage) String() string {
return fmt.Sprintf("[bcBlockRequestMessage %v]", m.Height)
return cmn.Fmt("[bcBlockRequestMessage %v]", m.Height)
}
type bcNoBlockResponseMessage struct {
Height int64
}
// ValidateBasic performs basic validation.
func (m *bcNoBlockResponseMessage) ValidateBasic() error {
if m.Height < 0 {
return errors.New("Negative Height")
}
return nil
}
func (brm *bcNoBlockResponseMessage) String() string {
return fmt.Sprintf("[bcNoBlockResponseMessage %d]", brm.Height)
return cmn.Fmt("[bcNoBlockResponseMessage %d]", brm.Height)
}
//-------------------------------------
@@ -430,17 +380,8 @@ type bcBlockResponseMessage struct {
Block *types.Block
}
// ValidateBasic performs basic validation.
func (m *bcBlockResponseMessage) ValidateBasic() error {
if err := m.Block.ValidateBasic(); err != nil {
return err
}
return nil
}
func (m *bcBlockResponseMessage) String() string {
return fmt.Sprintf("[bcBlockResponseMessage %v]", m.Block.Height)
return cmn.Fmt("[bcBlockResponseMessage %v]", m.Block.Height)
}
//-------------------------------------
@@ -449,16 +390,8 @@ type bcStatusRequestMessage struct {
Height int64
}
// ValidateBasic performs basic validation.
func (m *bcStatusRequestMessage) ValidateBasic() error {
if m.Height < 0 {
return errors.New("Negative Height")
}
return nil
}
func (m *bcStatusRequestMessage) String() string {
return fmt.Sprintf("[bcStatusRequestMessage %v]", m.Height)
return cmn.Fmt("[bcStatusRequestMessage %v]", m.Height)
}
//-------------------------------------
@@ -467,14 +400,6 @@ type bcStatusResponseMessage struct {
Height int64
}
// ValidateBasic performs basic validation.
func (m *bcStatusResponseMessage) ValidateBasic() error {
if m.Height < 0 {
return errors.New("Negative Height")
}
return nil
}
func (m *bcStatusResponseMessage) String() string {
return fmt.Sprintf("[bcStatusResponseMessage %v]", m.Height)
return cmn.Fmt("[bcStatusResponseMessage %v]", m.Height)
}

View File

@@ -1,151 +1,71 @@
package blockchain
import (
"sort"
"testing"
"time"
"github.com/stretchr/testify/assert"
cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tmlibs/log"
abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
var config *cfg.Config
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) {
validators := make([]types.GenesisValidator, numValidators)
privValidators := make([]types.PrivValidator, numValidators)
for i := 0; i < numValidators; i++ {
val, privVal := types.RandValidator(randPower, minPower)
validators[i] = types.GenesisValidator{
PubKey: val.PubKey,
Power: val.VotingPower,
}
privValidators[i] = privVal
}
sort.Sort(types.PrivValidatorsByAddress(privValidators))
return &types.GenesisDoc{
GenesisTime: tmtime.Now(),
ChainID: config.ChainID(),
Validators: validators,
}, privValidators
}
func makeVote(header *types.Header, blockID types.BlockID, valset *types.ValidatorSet, privVal types.PrivValidator) *types.Vote {
addr := privVal.GetAddress()
idx, _ := valset.GetByAddress(addr)
vote := &types.Vote{
ValidatorAddress: addr,
ValidatorIndex: idx,
Height: header.Height,
Round: 1,
Timestamp: tmtime.Now(),
Type: types.PrecommitType,
BlockID: blockID,
}
privVal.SignVote(header.ChainID, vote)
return vote
}
type BlockchainReactorPair struct {
reactor *BlockchainReactor
app proxy.AppConns
}
func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals []types.PrivValidator, maxBlockHeight int64) BlockchainReactorPair {
if len(privVals) != 1 {
panic("only support one validator")
}
app := &testApp{}
cc := proxy.NewLocalClientCreator(app)
proxyApp := proxy.NewAppConns(cc)
err := proxyApp.Start()
if err != nil {
panic(cmn.ErrorWrap(err, "error start app"))
}
func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) {
config := cfg.ResetTestRoot("blockchain_reactor_test")
// blockDB := dbm.NewDebugDB("blockDB", dbm.NewMemDB())
// stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB())
blockDB := dbm.NewMemDB()
stateDB := dbm.NewMemDB()
blockStore := NewBlockStore(blockDB)
state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
state, err := sm.LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile())
if err != nil {
panic(cmn.ErrorWrap(err, "error constructing state from genesis file"))
}
return state, blockStore
}
// Make the BlockchainReactor itself.
// NOTE we have to create and commit the blocks first because
// pool.height is determined from the store.
func newBlockchainReactor(logger log.Logger, maxBlockHeight int64) *BlockchainReactor {
state, blockStore := makeStateAndBlockStore(logger)
// Make the blockchainReactor itself
fastSync := true
blockExec := sm.NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), proxyApp.Consensus(),
sm.MockMempool{}, sm.MockEvidencePool{})
// let's add some blocks in
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
lastCommit := &types.Commit{}
if blockHeight > 1 {
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
lastBlock := blockStore.LoadBlock(blockHeight - 1)
vote := makeVote(&lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVals[0])
lastCommit = &types.Commit{Precommits: []*types.Vote{vote}, BlockID: lastBlockMeta.BlockID}
}
thisBlock := makeBlock(blockHeight, state, lastCommit)
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
blockID := types.BlockID{thisBlock.Hash(), thisParts.Header()}
state, err = blockExec.ApplyBlock(state, blockID, thisBlock)
if err != nil {
panic(cmn.ErrorWrap(err, "error apply block"))
}
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
}
var nilApp proxy.AppConnConsensus
blockExec := sm.NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), nilApp,
types.MockMempool{}, types.MockEvidencePool{})
bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
bcReactor.SetLogger(logger.With("module", "blockchain"))
return BlockchainReactorPair{bcReactor, proxyApp}
// Next: we need to set a switch in order for peers to be added in
bcReactor.Switch = p2p.NewSwitch(cfg.DefaultP2PConfig())
// Lastly: let's add some blocks in
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
firstBlock := makeBlock(blockHeight, state)
secondBlock := makeBlock(blockHeight+1, state)
firstParts := firstBlock.MakePartSet(state.ConsensusParams.BlockGossip.BlockPartSizeBytes)
blockStore.SaveBlock(firstBlock, firstParts, secondBlock.LastCommit)
}
return bcReactor
}
func TestNoBlockResponse(t *testing.T) {
config = cfg.ResetTestRoot("blockchain_reactor_test")
genDoc, privVals := randGenesisDoc(1, false, 30)
maxBlockHeight := int64(20)
maxBlockHeight := int64(65)
bcr := newBlockchainReactor(log.TestingLogger(), maxBlockHeight)
bcr.Start()
defer bcr.Stop()
reactorPairs := make([]BlockchainReactorPair, 2)
// Add some peers in
peer := newbcrTestPeer(p2p.ID(cmn.RandStr(12)))
bcr.AddPeer(peer)
reactorPairs[0] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, maxBlockHeight)
reactorPairs[1] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch {
s.AddReactor("BLOCKCHAIN", reactorPairs[i].reactor)
return s
}, p2p.Connect2Switches)
defer func() {
for _, r := range reactorPairs {
r.reactor.Stop()
r.app.Stop()
}
}()
chID := byte(0x01)
tests := []struct {
height int64
@@ -157,100 +77,72 @@ func TestNoBlockResponse(t *testing.T) {
{100, false},
}
for {
if reactorPairs[1].reactor.pool.IsCaughtUp() {
break
}
time.Sleep(10 * time.Millisecond)
}
assert.Equal(t, maxBlockHeight, reactorPairs[0].reactor.store.Height())
// receive a request message from peer,
// wait for our response to be received on the peer
for _, tt := range tests {
block := reactorPairs[1].reactor.store.LoadBlock(tt.height)
reqBlockMsg := &bcBlockRequestMessage{tt.height}
reqBlockBytes := cdc.MustMarshalBinaryBare(reqBlockMsg)
bcr.Receive(chID, peer, reqBlockBytes)
msg := peer.lastBlockchainMessage()
if tt.existent {
assert.True(t, block != nil)
if blockMsg, ok := msg.(*bcBlockResponseMessage); !ok {
t.Fatalf("Expected to receive a block response for height %d", tt.height)
} else if blockMsg.Block.Height != tt.height {
t.Fatalf("Expected response to be for height %d, got %d", tt.height, blockMsg.Block.Height)
}
} else {
assert.True(t, block == nil)
if noBlockMsg, ok := msg.(*bcNoBlockResponseMessage); !ok {
t.Fatalf("Expected to receive a no block response for height %d", tt.height)
} else if noBlockMsg.Height != tt.height {
t.Fatalf("Expected response to be for height %d, got %d", tt.height, noBlockMsg.Height)
}
}
}
}
/*
// NOTE: This is too hard to test without
// an easy way to add test peer to switch
// or without significant refactoring of the module.
// Alternatively we could actually dial a TCP conn but
// that seems extreme.
func TestBadBlockStopsPeer(t *testing.T) {
config = cfg.ResetTestRoot("blockchain_reactor_test")
genDoc, privVals := randGenesisDoc(1, false, 30)
maxBlockHeight := int64(20)
maxBlockHeight := int64(148)
bcr := newBlockchainReactor(log.TestingLogger(), maxBlockHeight)
bcr.Start()
defer bcr.Stop()
otherChain := newBlockchainReactor(log.TestingLogger(), genDoc, privVals, maxBlockHeight)
defer func() {
otherChain.reactor.Stop()
otherChain.app.Stop()
}()
// Add some peers in
peer := newbcrTestPeer(p2p.ID(cmn.RandStr(12)))
reactorPairs := make([]BlockchainReactorPair, 4)
// XXX: This doesn't add the peer to anything,
// so it's hard to check that it's later removed
bcr.AddPeer(peer)
assert.True(t, bcr.Switch.Peers().Size() > 0)
reactorPairs[0] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, maxBlockHeight)
reactorPairs[1] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
reactorPairs[2] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
reactorPairs[3] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
switches := p2p.MakeConnectedSwitches(config.P2P, 4, func(i int, s *p2p.Switch) *p2p.Switch {
s.AddReactor("BLOCKCHAIN", reactorPairs[i].reactor)
return s
}, p2p.Connect2Switches)
defer func() {
for _, r := range reactorPairs {
r.reactor.Stop()
r.app.Stop()
}
}()
// send a bad block from the peer
// default blocks already dont have commits, so should fail
block := bcr.store.LoadBlock(3)
msg := &bcBlockResponseMessage{Block: block}
peer.Send(BlockchainChannel, struct{ BlockchainMessage }{msg})
ticker := time.NewTicker(time.Millisecond * 10)
timer := time.NewTimer(time.Second * 2)
LOOP:
for {
if reactorPairs[3].reactor.pool.IsCaughtUp() {
break
select {
case <-ticker.C:
if bcr.Switch.Peers().Size() == 0 {
break LOOP
}
case <-timer.C:
t.Fatal("Timed out waiting to disconnect peer")
}
time.Sleep(1 * time.Second)
}
//at this time, reactors[0-3] is the newest
assert.Equal(t, 3, reactorPairs[1].reactor.Switch.Peers().Size())
//mark reactorPairs[3] is an invalid peer
reactorPairs[3].reactor.store = otherChain.reactor.store
lastReactorPair := newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
reactorPairs = append(reactorPairs, lastReactorPair)
switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch {
s.AddReactor("BLOCKCHAIN", reactorPairs[len(reactorPairs)-1].reactor)
return s
}, p2p.Connect2Switches)...)
for i := 0; i < len(reactorPairs)-1; i++ {
p2p.Connect2Switches(switches, i, len(reactorPairs)-1)
}
for {
if lastReactorPair.reactor.pool.IsCaughtUp() || lastReactorPair.reactor.Switch.Peers().Size() == 0 {
break
}
time.Sleep(1 * time.Second)
}
assert.True(t, lastReactorPair.reactor.Switch.Peers().Size() < len(reactorPairs)-1)
}
*/
//----------------------------------------------
// utility funcs
@@ -262,41 +154,53 @@ func makeTxs(height int64) (txs []types.Tx) {
return txs
}
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
func makeBlock(height int64, state sm.State) *types.Block {
block, _ := state.MakeBlock(height, makeTxs(height), new(types.Commit))
return block
}
type testApp struct {
abci.BaseApplication
// The Test peer
type bcrTestPeer struct {
cmn.BaseService
id p2p.ID
ch chan interface{}
}
var _ abci.Application = (*testApp)(nil)
var _ p2p.Peer = (*bcrTestPeer)(nil)
func (app *testApp) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) {
return abci.ResponseInfo{}
func newbcrTestPeer(id p2p.ID) *bcrTestPeer {
bcr := &bcrTestPeer{
id: id,
ch: make(chan interface{}, 2),
}
bcr.BaseService = *cmn.NewBaseService(nil, "bcrTestPeer", bcr)
return bcr
}
func (app *testApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock {
return abci.ResponseBeginBlock{}
func (tp *bcrTestPeer) lastBlockchainMessage() interface{} { return <-tp.ch }
func (tp *bcrTestPeer) TrySend(chID byte, msgBytes []byte) bool {
var msg BlockchainMessage
err := cdc.UnmarshalBinaryBare(msgBytes, &msg)
if err != nil {
panic(cmn.ErrorWrap(err, "Error while trying to parse a BlockchainMessage"))
}
if _, ok := msg.(*bcStatusResponseMessage); ok {
// Discard status response messages since they skew our results
// We only want to deal with:
// + bcBlockResponseMessage
// + bcNoBlockResponseMessage
} else {
tp.ch <- msg
}
return true
}
func (app *testApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock {
return abci.ResponseEndBlock{}
}
func (app *testApp) DeliverTx(tx []byte) abci.ResponseDeliverTx {
return abci.ResponseDeliverTx{Tags: []cmn.KVPair{}}
}
func (app *testApp) CheckTx(tx []byte) abci.ResponseCheckTx {
return abci.ResponseCheckTx{}
}
func (app *testApp) Commit() abci.ResponseCommit {
return abci.ResponseCommit{}
}
func (app *testApp) Query(reqQuery abci.RequestQuery) (resQuery abci.ResponseQuery) {
return
}
func (tp *bcrTestPeer) Send(chID byte, msgBytes []byte) bool { return tp.TrySend(chID, msgBytes) }
func (tp *bcrTestPeer) NodeInfo() p2p.NodeInfo { return p2p.NodeInfo{} }
func (tp *bcrTestPeer) Status() p2p.ConnectionStatus { return p2p.ConnectionStatus{} }
func (tp *bcrTestPeer) ID() p2p.ID { return tp.id }
func (tp *bcrTestPeer) IsOutbound() bool { return false }
func (tp *bcrTestPeer) IsPersistent() bool { return true }
func (tp *bcrTestPeer) Get(s string) interface{} { return s }
func (tp *bcrTestPeer) Set(string, interface{}) {}

View File

@@ -4,8 +4,8 @@ import (
"fmt"
"sync"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tendermint/types"
)
@@ -63,7 +63,7 @@ func (bs *BlockStore) LoadBlock(height int64) *types.Block {
part := bs.LoadBlockPart(height, i)
buf = append(buf, part.Bytes...)
}
err := cdc.UnmarshalBinaryLengthPrefixed(buf, block)
err := cdc.UnmarshalBinary(buf, block)
if err != nil {
// NOTE: The existence of meta should imply the existence of the
// block. So, make sure meta is only saved after blocks are saved.
@@ -148,10 +148,10 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
}
height := block.Height
if g, w := height, bs.Height()+1; g != w {
cmn.PanicSanity(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g))
cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g))
}
if !blockParts.IsComplete() {
cmn.PanicSanity(fmt.Sprintf("BlockStore can only save complete block part sets"))
cmn.PanicSanity(cmn.Fmt("BlockStore can only save complete block part sets"))
}
// Save block meta
@@ -188,7 +188,7 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) {
if height != bs.Height()+1 {
cmn.PanicSanity(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
}
partBytes := cdc.MustMarshalBinaryBare(part)
bs.db.Set(calcBlockPartKey(height, index), partBytes)
@@ -224,7 +224,7 @@ type BlockStoreStateJSON struct {
func (bsj BlockStoreStateJSON) Save(db dbm.DB) {
bytes, err := cdc.MarshalJSON(bsj)
if err != nil {
cmn.PanicSanity(fmt.Sprintf("Could not marshal state bytes: %v", err))
cmn.PanicSanity(cmn.Fmt("Could not marshal state bytes: %v", err))
}
db.SetSync(blockStoreKey, bytes)
}

View File

@@ -6,33 +6,16 @@ import (
"runtime/debug"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
cfg "github.com/tendermint/tendermint/config"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/db"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tmlibs/db"
"github.com/tendermint/tmlibs/log"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) {
config := cfg.ResetTestRoot("blockchain_reactor_test")
// blockDB := dbm.NewDebugDB("blockDB", dbm.NewMemDB())
// stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB())
blockDB := dbm.NewMemDB()
stateDB := dbm.NewMemDB()
state, err := sm.LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile())
if err != nil {
panic(cmn.ErrorWrap(err, "error constructing state from genesis file"))
}
return state, NewBlockStore(blockDB)
}
func TestLoadBlockStoreStateJSON(t *testing.T) {
db := db.NewMemDB()
@@ -46,7 +29,7 @@ func TestLoadBlockStoreStateJSON(t *testing.T) {
func TestNewBlockStore(t *testing.T) {
db := db.NewMemDB()
db.Set(blockStoreKey, []byte(`{"height": "10000"}`))
db.Set(blockStoreKey, []byte(`{"height": 10000}`))
bs := NewBlockStore(db)
require.Equal(t, int64(10000), bs.Height(), "failed to properly parse blockstore")
@@ -66,7 +49,7 @@ func TestNewBlockStore(t *testing.T) {
return nil, nil
})
require.NotNil(t, panicErr, "#%d panicCauser: %q expected a panic", i, tt.data)
assert.Contains(t, fmt.Sprintf("%#v", panicErr), tt.wantErr, "#%d data: %q", i, tt.data)
assert.Contains(t, panicErr.Error(), tt.wantErr, "#%d data: %q", i, tt.data)
}
db.Set(blockStoreKey, nil)
@@ -82,12 +65,12 @@ func freshBlockStore() (*BlockStore, db.DB) {
var (
state, _ = makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
block = makeBlock(1, state, new(types.Commit))
block = makeBlock(1, state)
partSet = block.MakePartSet(2)
part1 = partSet.GetPart(0)
part2 = partSet.GetPart(1)
seenCommit1 = &types.Commit{Precommits: []*types.Vote{{Height: 10,
Timestamp: tmtime.Now()}}}
Timestamp: time.Now().UTC()}}}
)
// TODO: This test should be simplified ...
@@ -105,22 +88,22 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
}
// save a block
block := makeBlock(bs.Height()+1, state, new(types.Commit))
block := makeBlock(bs.Height()+1, state)
validPartSet := block.MakePartSet(2)
seenCommit := &types.Commit{Precommits: []*types.Vote{{Height: 10,
Timestamp: tmtime.Now()}}}
Timestamp: time.Now().UTC()}}}
bs.SaveBlock(block, partSet, seenCommit)
require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed")
incompletePartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 2})
uncontiguousPartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 0})
uncontiguousPartSet.AddPart(part2)
uncontiguousPartSet.AddPart(part2, false)
header1 := types.Header{
Height: 1,
NumTxs: 100,
ChainID: "block_test",
Time: tmtime.Now(),
Time: time.Now(),
}
header2 := header1
header2.Height = 4
@@ -128,7 +111,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
// End of setup, test data
commitAtH10 := &types.Commit{Precommits: []*types.Vote{{Height: 10,
Timestamp: tmtime.Now()}}}
Timestamp: time.Now().UTC()}}}
tuples := []struct {
block *types.Block
parts *types.PartSet
@@ -143,7 +126,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
eraseSeenCommitInDB bool
}{
{
block: newBlock(header1, commitAtH10),
block: newBlock(&header1, commitAtH10),
parts: validPartSet,
seenCommit: seenCommit1,
},
@@ -154,35 +137,35 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
},
{
block: newBlock(header2, commitAtH10),
block: newBlock(&header2, commitAtH10),
parts: uncontiguousPartSet,
wantPanic: "only save contiguous blocks", // and incomplete and uncontiguous parts
},
{
block: newBlock(header1, commitAtH10),
block: newBlock(&header1, commitAtH10),
parts: incompletePartSet,
wantPanic: "only save complete block", // incomplete parts
},
{
block: newBlock(header1, commitAtH10),
block: newBlock(&header1, commitAtH10),
parts: validPartSet,
seenCommit: seenCommit1,
corruptCommitInDB: true, // Corrupt the DB's commit entry
wantPanic: "unmarshal to types.Commit failed",
wantPanic: "Error reading block commit",
},
{
block: newBlock(header1, commitAtH10),
block: newBlock(&header1, commitAtH10),
parts: validPartSet,
seenCommit: seenCommit1,
wantPanic: "unmarshal to types.BlockMeta failed",
wantPanic: "Error reading block",
corruptBlockInDB: true, // Corrupt the DB's block entry
},
{
block: newBlock(header1, commitAtH10),
block: newBlock(&header1, commitAtH10),
parts: validPartSet,
seenCommit: seenCommit1,
@@ -191,16 +174,16 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
},
{
block: newBlock(header1, commitAtH10),
block: newBlock(&header1, commitAtH10),
parts: validPartSet,
seenCommit: seenCommit1,
corruptSeenCommitInDB: true,
wantPanic: "unmarshal to types.Commit failed",
wantPanic: "Error reading block seen commit",
},
{
block: newBlock(header1, commitAtH10),
block: newBlock(&header1, commitAtH10),
parts: validPartSet,
seenCommit: seenCommit1,
@@ -255,7 +238,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
if subStr := tuple.wantPanic; subStr != "" {
if panicErr == nil {
t.Errorf("#%d: want a non-nil panic", i)
} else if got := fmt.Sprintf("%#v", panicErr); !strings.Contains(got, subStr) {
} else if got := panicErr.Error(); !strings.Contains(got, subStr) {
t.Errorf("#%d:\n\tgotErr: %q\nwant substring: %q", i, got, subStr)
}
continue
@@ -304,7 +287,7 @@ func TestLoadBlockPart(t *testing.T) {
db.Set(calcBlockPartKey(height, index), []byte("Tendermint"))
res, _, panicErr = doFn(loadPart)
require.NotNil(t, panicErr, "expecting a non-nil panic")
require.Contains(t, panicErr.Error(), "unmarshal to types.Part failed")
require.Contains(t, panicErr.Error(), "Error reading block part")
// 3. A good block serialized and saved to the DB should be retrievable
db.Set(calcBlockPartKey(height, index), cdc.MustMarshalBinaryBare(part1))
@@ -333,7 +316,7 @@ func TestLoadBlockMeta(t *testing.T) {
db.Set(calcBlockMetaKey(height), []byte("Tendermint-Meta"))
res, _, panicErr = doFn(loadMeta)
require.NotNil(t, panicErr, "expecting a non-nil panic")
require.Contains(t, panicErr.Error(), "unmarshal to types.BlockMeta")
require.Contains(t, panicErr.Error(), "Error reading block meta")
// 3. A good blockMeta serialized and saved to the DB should be retrievable
meta := &types.BlockMeta{}
@@ -348,11 +331,11 @@ func TestLoadBlockMeta(t *testing.T) {
func TestBlockFetchAtHeight(t *testing.T) {
state, bs := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
require.Equal(t, bs.Height(), int64(0), "initially the height should be zero")
block := makeBlock(bs.Height()+1, state, new(types.Commit))
block := makeBlock(bs.Height()+1, state)
partSet := block.MakePartSet(2)
seenCommit := &types.Commit{Precommits: []*types.Vote{{Height: 10,
Timestamp: tmtime.Now()}}}
Timestamp: time.Now().UTC()}}}
bs.SaveBlock(block, partSet, seenCommit)
require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed")
@@ -392,7 +375,7 @@ func doFn(fn func() (interface{}, error)) (res interface{}, err error, panicErr
return res, err, panicErr
}
func newBlock(hdr types.Header, lastCommit *types.Commit) *types.Block {
func newBlock(hdr *types.Header, lastCommit *types.Commit) *types.Block {
return &types.Block{
Header: hdr,
LastCommit: lastCommit,

View File

@@ -1,13 +1,13 @@
package blockchain
import (
"github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/types"
amino "github.com/tendermint/go-amino"
"github.com/tendermint/go-crypto"
)
var cdc = amino.NewCodec()
func init() {
RegisterBlockchainMessages(cdc)
types.RegisterBlockAmino(cdc)
crypto.RegisterAmino(cdc)
}

Some files were not shown because too many files have changed in this diff Show More