mirror of
https://github.com/fluencelabs/tendermint
synced 2025-07-15 20:41:37 +00:00
Compare commits
210 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
764091dfbb | ||
|
ab1fa4db8c | ||
|
9123e63a33 | ||
|
ba7ca4f372 | ||
|
1e25e9f58f | ||
|
9b4660b458 | ||
|
16a5c6b2c8 | ||
|
f347143b3d | ||
|
c1952523cd | ||
|
1afe0cb45f | ||
|
7075a8ed81 | ||
|
d83ca54b36 | ||
|
e0aead0be2 | ||
|
94b6dd65ee | ||
|
c147b41013 | ||
|
3a55339114 | ||
|
2dd7030579 | ||
|
e7a12f8e38 | ||
|
0525e8ed5c | ||
|
1d091bafe9 | ||
|
814ef37f75 | ||
|
56341de5eb | ||
|
3c589dac19 | ||
|
618fce8d32 | ||
|
ce0c638005 | ||
|
15d3d12098 | ||
|
76b4a45f98 | ||
|
535fc6cd63 | ||
|
3308ac7d83 | ||
|
a1fd312bb1 | ||
|
b096651e10 | ||
|
44f939c841 | ||
|
d68cdce2d5 | ||
|
43fdc4a1ce | ||
|
cb2f2b94ee | ||
|
4722410e5e | ||
|
55b4bfa1fe | ||
|
1532879c64 | ||
|
12d92fd5db | ||
|
bae0bc02a6 | ||
|
99974ddc8b | ||
|
f30a9752e2 | ||
|
c90985d309 | ||
|
bd222d6e3c | ||
|
0c01b0ded9 | ||
|
e5fb681615 | ||
|
c9698e4848 | ||
|
0e7694ca94 | ||
|
f4e6cf4439 | ||
|
e4921733df | ||
|
69a449a073 | ||
|
30328548f7 | ||
|
57f3592411 | ||
|
1c24031dd2 | ||
|
3d47ef9d74 | ||
|
6488894210 | ||
|
2f0d31b4b6 | ||
|
b2376058a1 | ||
|
e981ff4e7d | ||
|
b126ca0606 | ||
|
40b08f2494 | ||
|
8211fa6ce4 | ||
|
706dd1d6c5 | ||
|
6bc3b8dc6d | ||
|
faf23aa0d4 | ||
|
18b3821e06 | ||
|
1bd700ee52 | ||
|
38783e7fa1 | ||
|
dcbb35089f | ||
|
f33cc3fb3b | ||
|
1b3766d802 | ||
|
da8b043612 | ||
|
ed42f70248 | ||
|
0204d3c6a6 | ||
|
81f91aebc2 | ||
|
de6bba4609 | ||
|
c3a3cc76d8 | ||
|
74ff489e63 | ||
|
55b47bcb8e | ||
|
bcd8712ec3 | ||
|
b212aa0db3 | ||
|
8390e88e27 | ||
|
bc95700dcf | ||
|
12c6594c9b | ||
|
d800a51da4 | ||
|
73502fab0d | ||
|
2425711734 | ||
|
6be5bda8c9 | ||
|
2abcde03ad | ||
|
69ef1da58c | ||
|
8df32cd540 | ||
|
3aa06d0851 | ||
|
e611e97339 | ||
|
3000c8b349 | ||
|
6f88d04ac4 | ||
|
1c16ce8cf0 | ||
|
070728429b | ||
|
918f76f96a | ||
|
4202c4bf20 | ||
|
dc436e72f9 | ||
|
b74a97a4f6 | ||
|
2ef695da97 | ||
|
db437e7a45 | ||
|
538a50c9e8 | ||
|
0fe53dc5cf | ||
|
8b80f8ee05 | ||
|
3e7c7f51fa | ||
|
2f9063c1d6 | ||
|
65496ace20 | ||
|
64c7a0ad0d | ||
|
5046d5b181 | ||
|
6f8c91b651 | ||
|
e0db20c0cf | ||
|
e1e2c1c740 | ||
|
655b6300f5 | ||
|
d7f6c0775a | ||
|
a3d863f83b | ||
|
deb4c428fd | ||
|
b8e94b1d30 | ||
|
a151216e5e | ||
|
e09950d3fb | ||
|
3206e101f5 | ||
|
c6a648fad7 | ||
|
904eeddf36 | ||
|
07597dfd45 | ||
|
4360c360a4 | ||
|
befd8b0cb2 | ||
|
3f90fcae48 | ||
|
8ec1839f5d | ||
|
fb9735ef46 | ||
|
138de19e1e | ||
|
d3ae920bd0 | ||
|
f37f56d4f1 | ||
|
a0e4253edc | ||
|
c3d5634efa | ||
|
c05b2c5c59 | ||
|
f763a9ef56 | ||
|
81e6df0d57 | ||
|
d83fc02597 | ||
|
95c8bb4252 | ||
|
c1729addce | ||
|
3e3b034252 | ||
|
9d0c7f6ec7 | ||
|
c5a803a146 | ||
|
5f55ed2a40 | ||
|
7afcf92539 | ||
|
57da2e4af5 | ||
|
f837252ff1 | ||
|
fd128c7180 | ||
|
ea4b60a602 | ||
|
b73a6905a1 | ||
|
655d829314 | ||
|
1173a85c85 | ||
|
40791d886d | ||
|
7221887330 | ||
|
3c5a2f55c2 | ||
|
0b098a2eee | ||
|
1765d3c866 | ||
|
1fedf5b332 | ||
|
f94836783c | ||
|
2aecb2a4a3 | ||
|
bf1bceec87 | ||
|
01a3ac50af | ||
|
3ff9355e7b | ||
|
94ac890859 | ||
|
3d3d8b5b7b | ||
|
1788a68b1c | ||
|
480f44f16c | ||
|
9a089482dc | ||
|
fc31b463b1 | ||
|
539f8f91bd | ||
|
830e84adc4 | ||
|
40f2a128b8 | ||
|
1786890e32 | ||
|
435d1e3da7 | ||
|
642a24dc9c | ||
|
eab4e1cfa1 | ||
|
a9d8039082 | ||
|
2113b6f4bb | ||
|
7d493774c7 | ||
|
de0fc87c1c | ||
|
f0871e4f5e | ||
|
1b37c8affd | ||
|
3e29f2bdc2 | ||
|
3f4af438c5 | ||
|
2cb13bf852 | ||
|
0098387fbf | ||
|
7e07919d9d | ||
|
71baad59df | ||
|
9365d33243 | ||
|
3c18d841fa | ||
|
7dcb567e53 | ||
|
7a424e6b12 | ||
|
302bbc5dbd | ||
|
97a51c130f | ||
|
00d53714f9 | ||
|
a07063f119 | ||
|
9393be7a49 | ||
|
9d331715c0 | ||
|
0665bc6e4b | ||
|
35d4cca8bb | ||
|
b38748ad1a | ||
|
22979d9365 | ||
|
be3592adf6 | ||
|
a5f6069967 | ||
|
c481d2bcbb | ||
|
a1649f774e | ||
|
22155df759 | ||
|
7f31ec3398 | ||
|
121714c040 |
21
.codecov.yml
Normal file
21
.codecov.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
|
||||
#
|
||||
# This codecov.yml is the default configuration for
|
||||
# all repositories on Codecov. You may adjust the settings
|
||||
# below in your own codecov.yml in your repository.
|
||||
#
|
||||
coverage:
|
||||
precision: 2
|
||||
round: down
|
||||
range: 70...100
|
||||
|
||||
status:
|
||||
# Learn more at https://codecov.io/docs#yaml_default_commit_status
|
||||
project:
|
||||
default:
|
||||
threshold: 1% # allow this much decrease on project
|
||||
changes: false
|
||||
|
||||
comment:
|
||||
layout: "header, diff"
|
||||
behavior: default # update if exists else create new
|
2
.gitignore
vendored
2
.gitignore
vendored
@@ -10,3 +10,5 @@ rpc/test/.tendermint
|
||||
remote_dump
|
||||
.revision
|
||||
vendor
|
||||
.vagrant
|
||||
test/p2p/data/
|
||||
|
@@ -1,5 +1,16 @@
|
||||
# Docker
|
||||
|
||||
Tendermint uses docker for deployment of testnets via the [mintnet](github.com/tendermint/mintnet) tool.
|
||||
|
||||
For faster development iterations (ie. to avoid docker builds),
|
||||
the dockerfile just sets up the OS, and tendermint is fetched/installed at runtime.
|
||||
|
||||
For the deterministic docker builds used in testing, see the [tests directory](https://github.com/tendermint/tendermint/tree/master/test)
|
||||
|
||||
# Build and run a docker image and container
|
||||
|
||||
These are notes for the dev team.
|
||||
|
||||
```
|
||||
# Build base Docker image
|
||||
# Make sure ./run.sh exists.
|
||||
|
57
INSTALL.md
Normal file
57
INSTALL.md
Normal file
@@ -0,0 +1,57 @@
|
||||
# Install Go
|
||||
|
||||
[Install Go, set the `GOPATH`, and put `GOPATH/bin` on your `PATH`](https://github.com/tendermint/tendermint/wiki/Setting-GOPATH).
|
||||
|
||||
# Install Tendermint
|
||||
|
||||
You should be able to install the latest with a simple `go get -u github.com/tendermint/tendermint/cmd/tendermint`.
|
||||
The `-u` makes sure all dependencies are updated as well.
|
||||
|
||||
Run `tendermint version` and `tendermint --help`.
|
||||
|
||||
If the install falied, see [vendored dependencies below](#vendored-dependencies).
|
||||
|
||||
To start a one-node blockchain with a simple in-process application:
|
||||
|
||||
```
|
||||
tendermint init
|
||||
tendermint node --proxy_app=dummy
|
||||
```
|
||||
|
||||
See the [application developers guide](https://github.com/tendermint/tendermint/wiki/Application-Developers) for more details on building and running applications.
|
||||
|
||||
|
||||
## Vendored dependencies
|
||||
|
||||
If the `go get` failed, updated dependencies may have broken the build.
|
||||
Install the correct version of each dependency using `glide`.
|
||||
|
||||
Fist, install `glide`:
|
||||
|
||||
```
|
||||
go get github.com/Masterminds/glide
|
||||
```
|
||||
|
||||
Now, fetch the dependencies and install them with `glide` and `go`:
|
||||
|
||||
```
|
||||
cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
glide install
|
||||
go install ./cmd/tendermint
|
||||
```
|
||||
|
||||
Sometimes `glide install` is painfully slow. Hang in there champ.
|
||||
|
||||
The latest Tendermint Core version is now installed. Check by running `tendermint version`.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If `go get` failing bothers you, fetch the code using `git`:
|
||||
|
||||
```
|
||||
mkdir -p $GOPATH/src/github.com/tendermint
|
||||
git clone https://github.com/tendermint/tendermint $GOPATH/src/github.com/tendermint/tendermint
|
||||
cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
glide install
|
||||
go install ./cmd/tendermint
|
||||
```
|
@@ -1,21 +0,0 @@
|
||||
1. Fork github.com/tendermint/tendermint.
|
||||
2. Run "make", it should install the daemon, which we named "tendermint".
|
||||
3. Run "tendermint gen_account". Save the address, pub_key bytes, and priv_key bytes.
|
||||
This is your developer key for controlling the cloud nodes.
|
||||
4. Also run "tendermint gen_validator" 5 times, once for each cloud node. Save the output.
|
||||
5. Create a directory ~/.debora/ and copy cmd/debora/default.cfg into ~/.debora/default.cfg
|
||||
Copy the priv_key bytes from step 4 into ~/.debora/default.cfg where it says so.
|
||||
Change the list of hosts in ~/.debora/default.cfg with your own set of 5 cloud nodes.
|
||||
6. Replace cmd/barak/seed's pubkey with the pub_key bytes from step 3.
|
||||
7. Update config/tendermint/config.go's genesis with validator pubkeys from step 4.
|
||||
Give each of your nodes the same amount of voting power.
|
||||
Set up the accounts however you want.
|
||||
8. On each cloud node, follow the instructions here: https://github.com/tendermint/tendermint/tree/master/INSTALL
|
||||
Create tmuser, install go, and also install 'barak'.
|
||||
Then, run `barak -config="cmd/barak/seed"`.
|
||||
You don't need to start the node at this time.
|
||||
9. Now you can run "debora list" on your development machine and post commands to each cloud node.
|
||||
10. Run scripts/unsafe_upgrade_barak.sh to test that barak is running.
|
||||
The old barak you started on step 8 should now have quit.
|
||||
A new instance of barak should be running. Check with `ps -ef | grep "barak"`
|
||||
11. Run scripts/unsafe_restart_net.sh start your new testnet.
|
@@ -1,30 +0,0 @@
|
||||
NOTE: Only Ubuntu 14.04 64bit is supported at this time.
|
||||
|
||||
### Server setup / create `tmuser`
|
||||
|
||||
Secure the server, install dependencies, and create a new user `tmuser`
|
||||
|
||||
curl -L https://raw.githubusercontent.com/tendermint/tendermint/master/INSTALL/install_env.sh > install_env.sh
|
||||
source install_env.sh
|
||||
cd /home/tmuser
|
||||
|
||||
### Install Go as `tmuser`
|
||||
|
||||
Don't use `apt-get install golang`, it's still on an old version.
|
||||
|
||||
curl -L https://raw.githubusercontent.com/tendermint/tendermint/master/INSTALL/install_golang.sh > install_golang.sh
|
||||
source install_golang.sh
|
||||
|
||||
### Run Barak
|
||||
|
||||
WARNING: THIS STEP WILL GIVE CONTROL OF THE CURRENT USER TO THE DEV TEAM.
|
||||
|
||||
go get -u github.com/tendermint/tendermint/cmd/barak
|
||||
nohup barak -config="$GOPATH/src/github.com/tendermint/tendermint/cmd/barak/seed" &
|
||||
|
||||
### Install/Update MintDB
|
||||
|
||||
go get -u github.com/tendermint/tendermint/cmd/tendermint
|
||||
mkdir -p ~/.tendermint
|
||||
cp $GOPATH/src/github.com/tendermint/tendermint/config/tendermint/genesis.json ~/.tendermint/
|
||||
tendermint node --seeds="goldenalchemist.chaintest.net:46656"
|
@@ -1,63 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Run this as root user
|
||||
# This part is for hardening the server and setting up a user account
|
||||
|
||||
if [ `whoami` != "root" ];
|
||||
then
|
||||
echo "You must run this script as root"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
USER="tmuser"
|
||||
OPEN_PORTS=(46656 46657 46658 46659 46660 46661 46662 46663 46664 46665 46666 46667 46668 46669 46670 46671)
|
||||
SSH_PORT=22
|
||||
WHITELIST=()
|
||||
|
||||
# update and upgrade
|
||||
apt-get update -y
|
||||
apt-get upgrade -y
|
||||
|
||||
# fail2ban for monitoring logins
|
||||
apt-get install -y fail2ban
|
||||
|
||||
# set up the network time daemon
|
||||
apt-get install -y ntp
|
||||
|
||||
# install dependencies
|
||||
apt-get install -y make screen gcc git mercurial libc6-dev pkg-config libgmp-dev
|
||||
|
||||
# set up firewall
|
||||
echo "ENABLE FIREWALL ..."
|
||||
set -x
|
||||
# white list ssh access
|
||||
for ip in "${WHITELIST[@]}"; do
|
||||
ufw allow from $ip to any port $SSH_PORT
|
||||
done
|
||||
if [ ${#WHITELIST[@]} -eq 0 ]; then
|
||||
ufw allow $SSH_PORT
|
||||
fi
|
||||
# open ports
|
||||
for port in "${OPEN_PORTS[@]}"; do
|
||||
ufw allow $port
|
||||
done
|
||||
# apply
|
||||
ufw --force enable
|
||||
set +x
|
||||
# set up firewall END
|
||||
|
||||
# watch the logs and have them emailed to me
|
||||
# apt-get install -y logwatch
|
||||
# echo "/usr/sbin/logwatch --output mail --mailto $ADMIN_EMAIL --detail high" >> /etc/cron.daily/00logwatch
|
||||
|
||||
# set up user account
|
||||
echo "CREATE USER $USER ..."
|
||||
useradd $USER -d /home/$USER
|
||||
# This user should not have root access.
|
||||
# usermod -aG sudo $USER
|
||||
mkdir /home/$USER
|
||||
cp /etc/skel/.bashrc .
|
||||
cp /etc/skel/.profile .
|
||||
chown -R $USER:$USER /home/$USER
|
||||
|
||||
echo "Done setting env. Switching to $USER..."
|
||||
su $USER
|
@@ -1,29 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Run this as tmuser user
|
||||
# This part is for installing go
|
||||
|
||||
if [ `whoami` == "root" ];
|
||||
then
|
||||
echo "You should not run this script as root"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
USER=`whoami`
|
||||
PWD=`pwd`
|
||||
|
||||
# get dependencies
|
||||
# sudo apt-get install -y make screen gcc git mercurial libc6-dev pkg-config libgmp-dev
|
||||
|
||||
# install golang
|
||||
cd /home/$USER
|
||||
mkdir gocode
|
||||
wget https://storage.googleapis.com/golang/go1.4.2.src.tar.gz
|
||||
tar -xzvf go*.tar.gz
|
||||
cd go/src
|
||||
./make.bash
|
||||
mkdir -p /home/$USER/go/src
|
||||
echo 'export GOROOT=/home/$USER/go' >> /home/$USER/.bashrc
|
||||
echo 'export GOPATH=/home/$USER/gocode' >> /home/$USER/.bashrc
|
||||
echo 'export PATH=$PATH:$GOROOT/bin:$GOPATH/bin' >> /home/$USER/.bashrc
|
||||
source /home/$USER/.bashrc
|
||||
cd $PWD
|
9
Makefile
9
Makefile
@@ -12,19 +12,19 @@ NOVENDOR = go list github.com/tendermint/tendermint/... | grep -v /vendor/
|
||||
install: get_deps
|
||||
go install github.com/tendermint/tendermint/cmd/tendermint
|
||||
|
||||
build:
|
||||
build:
|
||||
go build -o build/tendermint github.com/tendermint/tendermint/cmd/tendermint
|
||||
|
||||
build_race:
|
||||
build_race:
|
||||
go build -race -o build/tendermint github.com/tendermint/tendermint/cmd/tendermint
|
||||
|
||||
test: build
|
||||
go test `${NOVENDOR}`
|
||||
|
||||
|
||||
test_race: build
|
||||
go test -race `${NOVENDOR}`
|
||||
|
||||
test_integrations:
|
||||
test_integrations:
|
||||
bash ./test/test.sh
|
||||
|
||||
test100: build
|
||||
@@ -48,6 +48,7 @@ get_deps:
|
||||
|
||||
get_vendor_deps:
|
||||
go get github.com/Masterminds/glide
|
||||
rm -rf vendor/
|
||||
glide install
|
||||
|
||||
update_deps:
|
||||
|
82
README.md
82
README.md
@@ -1,43 +1,65 @@
|
||||
# Tendermint
|
||||
Simple, Secure, Scalable Blockchain Platform
|
||||
|
||||
[](https://circleci.com/gh/tendermint/tendermint)
|
||||
[](https://codecov.io/gh/tendermint/tendermint)
|
||||
[Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance)
|
||||
[State Machine Replication](https://en.wikipedia.org/wiki/State_machine_replication).
|
||||
Or [Blockchain](https://en.wikipedia.org/wiki/Blockchain_(database)) for short.
|
||||
|
||||
[](https://github.com/tendermint/tendermint/releases/latest)
|
||||
[](https://godoc.org/github.com/tendermint/tendermint)
|
||||
[](http://forum.tendermint.com:3000/)
|
||||
[](https://github.com/tendermint/tendermint/blob/master/LICENSE)
|
||||
|
||||
Branch | Tests | Coverage | Report Card
|
||||
----------|-------|----------|-------------
|
||||
develop | [](https://circleci.com/gh/tendermint/tendermint/tree/develop) | [](https://codecov.io/gh/tendermint/tendermint) | [](https://goreportcard.com/report/github.com/tendermint/tendermint/tree/develop)
|
||||
master | [](https://circleci.com/gh/tendermint/tendermint/tree/master) | [](https://codecov.io/gh/tendermint/tendermint) | [](https://goreportcard.com/report/github.com/tendermint/tendermint/tree/master)
|
||||
|
||||
_NOTE: This is yet pre-alpha non-production-quality software._
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language,
|
||||
and replicates it on many machines.
|
||||
See the [application developers guide](https://github.com/tendermint/tendermint/wiki/Application-Developers) to get started.
|
||||
and securely replicates it on many machines.
|
||||
|
||||
## Contributing
|
||||
For more background, see the [introduction](https://tendermint.com/intro).
|
||||
|
||||
Yay open source! Please see our [contributing guidelines](https://github.com/tendermint/tendermint/wiki/Contributing).
|
||||
|
||||
## Resources
|
||||
|
||||
### Tendermint Core
|
||||
|
||||
- [Introduction](https://github.com/tendermint/tendermint/wiki/Introduction)
|
||||
- [Validators](https://github.com/tendermint/tendermint/wiki/Validators)
|
||||
- [Byzantine Consensus Algorithm](https://github.com/tendermint/tendermint/wiki/Byzantine-Consensus-Algorithm)
|
||||
- [Block Structure](https://github.com/tendermint/tendermint/wiki/Block-Structure)
|
||||
- [RPC](https://github.com/tendermint/tendermint/wiki/RPC)
|
||||
- [Genesis](https://github.com/tendermint/tendermint/wiki/Genesis)
|
||||
- [Configuration](https://github.com/tendermint/tendermint/wiki/Configuration)
|
||||
- [Light Client Protocol](https://github.com/tendermint/tendermint/wiki/Light-Client-Protocol)
|
||||
- [Roadmap for V2](https://github.com/tendermint/tendermint/wiki/Roadmap-for-V2)
|
||||
|
||||
### Sub-projects
|
||||
|
||||
* [TMSP](http://github.com/tendermint/tmsp)
|
||||
* [Mintnet](http://github.com/tendermint/mintnet)
|
||||
* [Go-Wire](http://github.com/tendermint/go-wire)
|
||||
* [Go-P2P](http://github.com/tendermint/go-p2p)
|
||||
* [Go-Merkle](http://github.com/tendermint/go-merkle)
|
||||
To get started developing applications, see the [application developers guide](https://tendermint.com/docs/guides/app-development).
|
||||
|
||||
## Install
|
||||
|
||||
`go get -u github.com/tendermint/tendermint/cmd/tendermint`
|
||||
|
||||
For more details, see the [install guide](https://github.com/tendermint/tendermint/wiki/Installation).
|
||||
For more details (or if it fails), see the [install guide](https://tendermint.com/intro/getting-started/install).
|
||||
|
||||
## Contributing
|
||||
|
||||
Yay open source! Please see our [contributing guidelines](https://tendermint.com/guides/contributing).
|
||||
|
||||
## Resources
|
||||
|
||||
### Tendermint Core
|
||||
|
||||
- [Introduction](https://tendermint.com/intro)
|
||||
- [Docs](https://tendermint.com/docs)
|
||||
- [Software using Tendermint](https://tendermint.com/ecosystem)
|
||||
|
||||
### Sub-projects
|
||||
|
||||
* [ABCI](http://github.com/tendermint/abci)
|
||||
* [Mintnet](http://github.com/tendermint/mintnet)
|
||||
* [Go-Wire](http://github.com/tendermint/go-wire)
|
||||
* [Go-P2P](http://github.com/tendermint/go-p2p)
|
||||
* [Go-Merkle](http://github.com/tendermint/go-merkle)
|
||||
|
||||
### Applications
|
||||
|
||||
* [Ethermint](http://github.com/tendermint/ethermint)
|
||||
* [Basecoin](http://github.com/tendermint/basecoin)
|
||||
|
||||
### More
|
||||
|
||||
* [Tendermint Blog](https://tendermint.com/blog)
|
||||
* [Cosmos Blog](https://cosmos.network/blog)
|
||||
* [Original Whitepaper (out-of-date)](http://www.the-blockchain.com/docs/Tendermint%20Consensus%20without%20Mining.pdf)
|
||||
* [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769)
|
||||
|
||||
|
44
Vagrantfile
vendored
44
Vagrantfile
vendored
@@ -1,25 +1,33 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "ubuntu/trusty64"
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
config.vm.box = "phusion-open-ubuntu-14.04-amd64"
|
||||
config.vm.box_url = "https://oss-binaries.phusionpassenger.com/vagrant/boxes/latest/ubuntu-14.04-amd64-vbox.box"
|
||||
# Or, for Ubuntu 12.04:
|
||||
|
||||
config.vm.provider :vmware_fusion do |f, override|
|
||||
override.vm.box_url = "https://oss-binaries.phusionpassenger.com/vagrant/boxes/latest/ubuntu-14.04-amd64-vmwarefusion.box"
|
||||
config.vm.provider "virtualbox" do |v|
|
||||
v.memory = 2048
|
||||
v.cpus = 2
|
||||
end
|
||||
|
||||
if Dir.glob("#{File.dirname(__FILE__)}/.vagrant/machines/default/*/id").empty?
|
||||
# Install Docker
|
||||
pkg_cmd = "wget -q -O - https://get.docker.io/gpg | apt-key add -;" \
|
||||
"echo deb http://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list;" \
|
||||
"apt-get update -qq; apt-get install -q -y --force-yes lxc-docker; "
|
||||
# Add vagrant user to the docker group
|
||||
pkg_cmd << "usermod -a -G docker vagrant; "
|
||||
config.vm.provision :shell, :inline => pkg_cmd
|
||||
end
|
||||
config.vm.provision "shell", inline: <<-SHELL
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends wget curl jq shellcheck bsdmainutils psmisc
|
||||
|
||||
wget -qO- https://get.docker.com/ | sh
|
||||
usermod -a -G docker vagrant
|
||||
|
||||
curl -O https://storage.googleapis.com/golang/go1.6.linux-amd64.tar.gz
|
||||
tar -xvf go1.7.linux-amd64.tar.gz
|
||||
mv go /usr/local
|
||||
echo 'export PATH=$PATH:/usr/local/go/bin' >> /home/vagrant/.profile
|
||||
mkdir -p /home/vagrant/go/bin
|
||||
chown -R vagrant:vagrant /home/vagrant/go
|
||||
echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.profile
|
||||
|
||||
mkdir -p /home/vagrant/go/src/github.com/tendermint
|
||||
ln -s /vagrant /home/vagrant/go/src/github.com/tendermint/tendermint
|
||||
|
||||
su - vagrant -c 'curl https://glide.sh/get | sh'
|
||||
su - vagrant -c 'cd /vagrant/ && glide install && make test'
|
||||
SHELL
|
||||
end
|
||||
|
@@ -5,8 +5,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
flow "github.com/tendermint/flowcontrol"
|
||||
. "github.com/tendermint/go-common"
|
||||
flow "github.com/tendermint/go-flowrate/flowrate"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -32,7 +32,7 @@ var peerTimeoutSeconds = time.Duration(15) // not const so we can override with
|
||||
*/
|
||||
|
||||
type BlockPool struct {
|
||||
QuitService
|
||||
BaseService
|
||||
startTime time.Time
|
||||
|
||||
mtx sync.Mutex
|
||||
@@ -58,19 +58,19 @@ func NewBlockPool(start int, requestsCh chan<- BlockRequest, timeoutsCh chan<- s
|
||||
requestsCh: requestsCh,
|
||||
timeoutsCh: timeoutsCh,
|
||||
}
|
||||
bp.QuitService = *NewQuitService(log, "BlockPool", bp)
|
||||
bp.BaseService = *NewBaseService(log, "BlockPool", bp)
|
||||
return bp
|
||||
}
|
||||
|
||||
func (pool *BlockPool) OnStart() error {
|
||||
pool.QuitService.OnStart()
|
||||
pool.BaseService.OnStart()
|
||||
go pool.makeRequestersRoutine()
|
||||
pool.startTime = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pool *BlockPool) OnStop() {
|
||||
pool.QuitService.OnStop()
|
||||
pool.BaseService.OnStop()
|
||||
}
|
||||
|
||||
// Run spawns requesters as needed.
|
||||
@@ -383,7 +383,7 @@ func (peer *bpPeer) onTimeout() {
|
||||
//-------------------------------------
|
||||
|
||||
type bpRequester struct {
|
||||
QuitService
|
||||
BaseService
|
||||
pool *BlockPool
|
||||
height int
|
||||
gotBlockCh chan struct{}
|
||||
@@ -404,12 +404,12 @@ func newBPRequester(pool *BlockPool, height int) *bpRequester {
|
||||
peerID: "",
|
||||
block: nil,
|
||||
}
|
||||
bpr.QuitService = *NewQuitService(nil, "bpRequester", bpr)
|
||||
bpr.BaseService = *NewBaseService(nil, "bpRequester", bpr)
|
||||
return bpr
|
||||
}
|
||||
|
||||
func (bpr *bpRequester) OnStart() error {
|
||||
bpr.QuitService.OnStart()
|
||||
bpr.BaseService.OnStart()
|
||||
go bpr.requestRoutine()
|
||||
return nil
|
||||
}
|
||||
|
@@ -8,7 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
. "github.com/tendermint/go-common"
|
||||
"github.com/tendermint/go-events"
|
||||
cfg "github.com/tendermint/go-config"
|
||||
"github.com/tendermint/go-p2p"
|
||||
"github.com/tendermint/go-wire"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
@@ -42,7 +42,7 @@ type consensusReactor interface {
|
||||
type BlockchainReactor struct {
|
||||
p2p.BaseReactor
|
||||
|
||||
sw *p2p.Switch
|
||||
config cfg.Config
|
||||
state *sm.State
|
||||
proxyAppConn proxy.AppConnConsensus // same as consensus.proxyAppConn
|
||||
store *BlockStore
|
||||
@@ -52,10 +52,10 @@ type BlockchainReactor struct {
|
||||
timeoutsCh chan string
|
||||
lastBlock *types.Block
|
||||
|
||||
evsw *events.EventSwitch
|
||||
evsw types.EventSwitch
|
||||
}
|
||||
|
||||
func NewBlockchainReactor(state *sm.State, proxyAppConn proxy.AppConnConsensus, store *BlockStore, fastSync bool) *BlockchainReactor {
|
||||
func NewBlockchainReactor(config cfg.Config, state *sm.State, proxyAppConn proxy.AppConnConsensus, store *BlockStore, fastSync bool) *BlockchainReactor {
|
||||
if state.LastBlockHeight == store.Height()-1 {
|
||||
store.height -= 1 // XXX HACK, make this better
|
||||
}
|
||||
@@ -70,6 +70,7 @@ func NewBlockchainReactor(state *sm.State, proxyAppConn proxy.AppConnConsensus,
|
||||
timeoutsCh,
|
||||
)
|
||||
bcR := &BlockchainReactor{
|
||||
config: config,
|
||||
state: state,
|
||||
proxyAppConn: proxyAppConn,
|
||||
store: store,
|
||||
@@ -130,7 +131,7 @@ func (bcR *BlockchainReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
|
||||
return
|
||||
}
|
||||
|
||||
log.Notice("Receive", "src", src, "chID", chID, "msg", msg)
|
||||
log.Debug("Receive", "src", src, "chID", chID, "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *bcBlockRequestMessage:
|
||||
@@ -220,33 +221,32 @@ FOR_LOOP:
|
||||
// We need both to sync the first block.
|
||||
break SYNC_LOOP
|
||||
}
|
||||
firstParts := first.MakePartSet()
|
||||
firstParts := first.MakePartSet(bcR.config.GetInt("block_part_size")) // TODO: put part size in parts header?
|
||||
firstPartsHeader := firstParts.Header()
|
||||
// Finally, verify the first block using the second's commit
|
||||
// NOTE: we can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
// currently necessary.
|
||||
err := bcR.state.Validators.VerifyCommit(
|
||||
bcR.state.ChainID, first.Hash(), firstPartsHeader, first.Height, second.LastCommit)
|
||||
bcR.state.ChainID, types.BlockID{first.Hash(), firstPartsHeader}, first.Height, second.LastCommit)
|
||||
if err != nil {
|
||||
log.Info("error in validation", "error", err)
|
||||
bcR.pool.RedoRequest(first.Height)
|
||||
break SYNC_LOOP
|
||||
} else {
|
||||
bcR.pool.PopRequest()
|
||||
// TODO: use ApplyBlock instead of Exec/Commit/SetAppHash/Save
|
||||
err := bcR.state.ExecBlock(bcR.evsw, bcR.proxyAppConn, first, firstPartsHeader)
|
||||
|
||||
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
// TODO: should we be firing events? need to fire NewBlock events manually ...
|
||||
// NOTE: we could improve performance if we
|
||||
// didn't make the app commit to disk every block
|
||||
// ... but we would need a way to get the hash without it persisting
|
||||
err := bcR.state.ApplyBlock(bcR.evsw, bcR.proxyAppConn, first, firstPartsHeader, sm.MockMempool{})
|
||||
if err != nil {
|
||||
// TODO This is bad, are we zombie?
|
||||
PanicQ(Fmt("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
// NOTE: we could improve performance if we
|
||||
// didn't make the app commit to disk every block
|
||||
// ... but we would need a way to get the hash without it persisting
|
||||
res := bcR.proxyAppConn.CommitSync()
|
||||
if res.IsErr() {
|
||||
// TODO Handle gracefully.
|
||||
PanicQ(Fmt("Failed to commit block at application: %v", res))
|
||||
}
|
||||
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
bcR.state.AppHash = res.Data
|
||||
bcR.state.Save()
|
||||
}
|
||||
}
|
||||
@@ -268,7 +268,7 @@ func (bcR *BlockchainReactor) BroadcastStatusRequest() error {
|
||||
}
|
||||
|
||||
// implements events.Eventable
|
||||
func (bcR *BlockchainReactor) SetEventSwitch(evsw *events.EventSwitch) {
|
||||
func (bcR *BlockchainReactor) SetEventSwitch(evsw types.EventSwitch) {
|
||||
bcR.evsw = evsw
|
||||
}
|
||||
|
||||
|
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
. "github.com/tendermint/go-common"
|
||||
dbm "github.com/tendermint/go-db"
|
||||
@@ -27,8 +28,10 @@ the Commit data outside the Block.
|
||||
Panics indicate probable corruption in the data
|
||||
*/
|
||||
type BlockStore struct {
|
||||
db dbm.DB
|
||||
|
||||
mtx sync.RWMutex
|
||||
height int
|
||||
db dbm.DB
|
||||
}
|
||||
|
||||
func NewBlockStore(db dbm.DB) *BlockStore {
|
||||
@@ -41,6 +44,8 @@ func NewBlockStore(db dbm.DB) *BlockStore {
|
||||
|
||||
// Height() returns the last known contiguous block height.
|
||||
func (bs *BlockStore) Height() int {
|
||||
bs.mtx.RLock()
|
||||
defer bs.mtx.RUnlock()
|
||||
return bs.height
|
||||
}
|
||||
|
||||
@@ -141,8 +146,8 @@ func (bs *BlockStore) LoadSeenCommit(height int) *types.Commit {
|
||||
// most recent height. Otherwise they'd stall at H-1.
|
||||
func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
height := block.Height
|
||||
if height != bs.height+1 {
|
||||
PanicSanity(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.height+1, height))
|
||||
if height != bs.Height()+1 {
|
||||
PanicSanity(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
|
||||
}
|
||||
if !blockParts.IsComplete() {
|
||||
PanicSanity(Fmt("BlockStore can only save complete block part sets"))
|
||||
@@ -163,6 +168,7 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
|
||||
bs.db.Set(calcBlockCommitKey(height-1), blockCommitBytes)
|
||||
|
||||
// Save seen commit (seen +2/3 precommits for block)
|
||||
// NOTE: we can delete this at a later height
|
||||
seenCommitBytes := wire.BinaryBytes(seenCommit)
|
||||
bs.db.Set(calcSeenCommitKey(height), seenCommitBytes)
|
||||
|
||||
@@ -170,12 +176,17 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
|
||||
BlockStoreStateJSON{Height: height}.Save(bs.db)
|
||||
|
||||
// Done!
|
||||
bs.mtx.Lock()
|
||||
bs.height = height
|
||||
bs.mtx.Unlock()
|
||||
|
||||
// Flush
|
||||
bs.db.SetSync(nil, nil)
|
||||
}
|
||||
|
||||
func (bs *BlockStore) saveBlockPart(height int, index int, part *types.Part) {
|
||||
if height != bs.height+1 {
|
||||
PanicSanity(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.height+1, height))
|
||||
if height != bs.Height()+1 {
|
||||
PanicSanity(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
|
||||
}
|
||||
partBytes := wire.BinaryBytes(part)
|
||||
bs.db.Set(calcBlockPartKey(height, index), partBytes)
|
||||
@@ -212,7 +223,7 @@ func (bsj BlockStoreStateJSON) Save(db dbm.DB) {
|
||||
if err != nil {
|
||||
PanicSanity(Fmt("Could not marshal state bytes: %v", err))
|
||||
}
|
||||
db.Set(blockStoreKey, bytes)
|
||||
db.SetSync(blockStoreKey, bytes)
|
||||
}
|
||||
|
||||
func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON {
|
||||
|
@@ -29,6 +29,10 @@ dependencies:
|
||||
|
||||
test:
|
||||
override:
|
||||
- "cd $REPO && make test_integrations"
|
||||
- "cd $REPO && set -o pipefail && make test_integrations | tee ~/test_integrations.log":
|
||||
timeout: 1800
|
||||
- "cp ~/test_integrations.log $CIRCLE_ARTIFACTS"
|
||||
post:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
- "cd $REPO && bash <(curl -s https://codecov.io/bash)"
|
||||
|
||||
|
||||
|
@@ -16,24 +16,34 @@ func parseFlags(config cfg.Config, args []string) {
|
||||
fastSync bool
|
||||
skipUPNP bool
|
||||
rpcLaddr string
|
||||
grpcLaddr string
|
||||
logLevel string
|
||||
proxyApp string
|
||||
tmspTransport string
|
||||
abciTransport string
|
||||
|
||||
pex bool
|
||||
)
|
||||
|
||||
// Declare flags
|
||||
var flags = flag.NewFlagSet("main", flag.ExitOnError)
|
||||
flags.BoolVar(&printHelp, "help", false, "Print this help message.")
|
||||
|
||||
// configuration options
|
||||
flags.StringVar(&moniker, "moniker", config.GetString("moniker"), "Node Name")
|
||||
flags.StringVar(&nodeLaddr, "node_laddr", config.GetString("node_laddr"), "Node listen address. (0.0.0.0:0 means any interface, any port)")
|
||||
flags.StringVar(&seeds, "seeds", config.GetString("seeds"), "Comma delimited host:port seed nodes")
|
||||
flags.BoolVar(&fastSync, "fast_sync", config.GetBool("fast_sync"), "Fast blockchain syncing")
|
||||
flags.BoolVar(&skipUPNP, "skip_upnp", config.GetBool("skip_upnp"), "Skip UPNP configuration")
|
||||
flags.StringVar(&rpcLaddr, "rpc_laddr", config.GetString("rpc_laddr"), "RPC listen address. Port required")
|
||||
flags.StringVar(&grpcLaddr, "grpc_laddr", config.GetString("grpc_laddr"), "GRPC listen address (BroadcastTx only). Port required")
|
||||
flags.StringVar(&logLevel, "log_level", config.GetString("log_level"), "Log level")
|
||||
flags.StringVar(&proxyApp, "proxy_app", config.GetString("proxy_app"),
|
||||
"Proxy app address, or 'nilapp' or 'dummy' for local testing.")
|
||||
flags.StringVar(&tmspTransport, "tmsp", config.GetString("tmsp"), "Specify tmsp transport (socket | grpc)")
|
||||
flags.StringVar(&abciTransport, "abci", config.GetString("abci"), "Specify abci transport (socket | grpc)")
|
||||
|
||||
// feature flags
|
||||
flags.BoolVar(&pex, "pex", config.GetBool("pex_reactor"), "Enable Peer-Exchange (dev feature)")
|
||||
|
||||
flags.Parse(args)
|
||||
if printHelp {
|
||||
flags.PrintDefaults()
|
||||
@@ -47,7 +57,10 @@ func parseFlags(config cfg.Config, args []string) {
|
||||
config.Set("fast_sync", fastSync)
|
||||
config.Set("skip_upnp", skipUPNP)
|
||||
config.Set("rpc_laddr", rpcLaddr)
|
||||
config.Set("grpc_laddr", grpcLaddr)
|
||||
config.Set("log_level", logLevel)
|
||||
config.Set("proxy_app", proxyApp)
|
||||
config.Set("tmsp", tmspTransport)
|
||||
config.Set("abci", abciTransport)
|
||||
|
||||
config.Set("pex_reactor", pex)
|
||||
}
|
||||
|
@@ -20,4 +20,5 @@ func init_files() {
|
||||
|
||||
genDoc.SaveAs(config.GetString("genesis_file"))
|
||||
|
||||
log.Notice("Initialized tendermint", "genesis", config.GetString("genesis_file"), "priv_validator", config.GetString("priv_validator_file"))
|
||||
}
|
||||
|
@@ -41,10 +41,10 @@ Commands:
|
||||
case "node":
|
||||
node.RunNode(config)
|
||||
case "replay":
|
||||
if len(args) > 1 && args[1] == "console" {
|
||||
node.RunReplayConsole(config)
|
||||
if len(args) > 2 && args[1] == "console" {
|
||||
node.RunReplayConsole(config, args[2])
|
||||
} else {
|
||||
node.RunReplay(config)
|
||||
node.RunReplay(config, args[1])
|
||||
}
|
||||
case "init":
|
||||
init_files()
|
||||
|
@@ -11,7 +11,7 @@ import (
|
||||
func reset_all() {
|
||||
reset_priv_validator()
|
||||
os.RemoveAll(config.GetString("db_dir"))
|
||||
os.Remove(config.GetString("cswal"))
|
||||
os.RemoveAll(config.GetString("cs_wal_dir"))
|
||||
}
|
||||
|
||||
// NOTE: this is totally unsafe.
|
||||
|
@@ -22,6 +22,7 @@ func getTMRoot(rootDir string) string {
|
||||
func initTMRoot(rootDir string) {
|
||||
rootDir = getTMRoot(rootDir)
|
||||
EnsureDir(rootDir, 0700)
|
||||
EnsureDir(rootDir+"/data", 0700)
|
||||
|
||||
configFilePath := path.Join(rootDir, "config.toml")
|
||||
|
||||
@@ -53,7 +54,7 @@ func GetConfig(rootDir string) cfg.Config {
|
||||
mapConfig.SetRequired("chain_id") // blows up if you try to use it before setting.
|
||||
mapConfig.SetDefault("genesis_file", rootDir+"/genesis.json")
|
||||
mapConfig.SetDefault("proxy_app", "tcp://127.0.0.1:46658")
|
||||
mapConfig.SetDefault("tmsp", "socket")
|
||||
mapConfig.SetDefault("abci", "socket")
|
||||
mapConfig.SetDefault("moniker", "anonymous")
|
||||
mapConfig.SetDefault("node_laddr", "tcp://0.0.0.0:46656")
|
||||
mapConfig.SetDefault("seeds", "")
|
||||
@@ -61,18 +62,22 @@ func GetConfig(rootDir string) cfg.Config {
|
||||
mapConfig.SetDefault("fast_sync", true)
|
||||
mapConfig.SetDefault("skip_upnp", false)
|
||||
mapConfig.SetDefault("addrbook_file", rootDir+"/addrbook.json")
|
||||
mapConfig.SetDefault("addrbook_strict", true) // disable to allow connections locally
|
||||
mapConfig.SetDefault("pex_reactor", false) // enable for peer exchange
|
||||
mapConfig.SetDefault("priv_validator_file", rootDir+"/priv_validator.json")
|
||||
mapConfig.SetDefault("db_backend", "leveldb")
|
||||
mapConfig.SetDefault("db_dir", rootDir+"/data")
|
||||
mapConfig.SetDefault("log_level", "info")
|
||||
mapConfig.SetDefault("rpc_laddr", "tcp://0.0.0.0:46657")
|
||||
mapConfig.SetDefault("grpc_laddr", "")
|
||||
mapConfig.SetDefault("prof_laddr", "")
|
||||
mapConfig.SetDefault("revision_file", rootDir+"/revision")
|
||||
mapConfig.SetDefault("cswal", rootDir+"/data/cswal")
|
||||
mapConfig.SetDefault("cswal_light", false)
|
||||
mapConfig.SetDefault("cs_wal_dir", rootDir+"/data/cs.wal")
|
||||
mapConfig.SetDefault("cs_wal_light", false)
|
||||
mapConfig.SetDefault("filter_peers", false)
|
||||
|
||||
mapConfig.SetDefault("block_size", 10000)
|
||||
mapConfig.SetDefault("block_size", 10000) // max number of txs
|
||||
mapConfig.SetDefault("block_part_size", 65536) // part size 64K
|
||||
mapConfig.SetDefault("disable_data_hash", false)
|
||||
mapConfig.SetDefault("timeout_propose", 3000)
|
||||
mapConfig.SetDefault("timeout_propose_delta", 500)
|
||||
@@ -81,9 +86,12 @@ func GetConfig(rootDir string) cfg.Config {
|
||||
mapConfig.SetDefault("timeout_precommit", 1000)
|
||||
mapConfig.SetDefault("timeout_precommit_delta", 500)
|
||||
mapConfig.SetDefault("timeout_commit", 1000)
|
||||
// make progress asap (no `timeout_commit`) on full precommit votes
|
||||
mapConfig.SetDefault("skip_timeout_commit", false)
|
||||
mapConfig.SetDefault("mempool_recheck", true)
|
||||
mapConfig.SetDefault("mempool_recheck_empty", true)
|
||||
mapConfig.SetDefault("mempool_broadcast", true)
|
||||
mapConfig.SetDefault("mempool_wal_dir", rootDir+"/data/mempool.wal")
|
||||
|
||||
return mapConfig
|
||||
}
|
||||
|
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
. "github.com/tendermint/go-common"
|
||||
cfg "github.com/tendermint/go-config"
|
||||
"github.com/tendermint/go-logger"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -33,6 +34,7 @@ func initTMRoot(rootDir string) {
|
||||
}
|
||||
// Create new dir
|
||||
EnsureDir(rootDir, 0700)
|
||||
EnsureDir(rootDir+"/data", 0700)
|
||||
|
||||
configFilePath := path.Join(rootDir, "config.toml")
|
||||
genesisFilePath := path.Join(rootDir, "genesis.json")
|
||||
@@ -68,35 +70,43 @@ func ResetConfig(localPath string) cfg.Config {
|
||||
mapConfig.SetDefault("chain_id", "tendermint_test")
|
||||
mapConfig.SetDefault("genesis_file", rootDir+"/genesis.json")
|
||||
mapConfig.SetDefault("proxy_app", "dummy")
|
||||
mapConfig.SetDefault("tmsp", "socket")
|
||||
mapConfig.SetDefault("abci", "socket")
|
||||
mapConfig.SetDefault("moniker", "anonymous")
|
||||
mapConfig.SetDefault("node_laddr", "tcp://0.0.0.0:36656")
|
||||
mapConfig.SetDefault("fast_sync", false)
|
||||
mapConfig.SetDefault("skip_upnp", true)
|
||||
mapConfig.SetDefault("addrbook_file", rootDir+"/addrbook.json")
|
||||
mapConfig.SetDefault("addrbook_strict", true) // disable to allow connections locally
|
||||
mapConfig.SetDefault("pex_reactor", false) // enable for peer exchange
|
||||
mapConfig.SetDefault("priv_validator_file", rootDir+"/priv_validator.json")
|
||||
mapConfig.SetDefault("db_backend", "memdb")
|
||||
mapConfig.SetDefault("db_dir", rootDir+"/data")
|
||||
mapConfig.SetDefault("log_level", "debug")
|
||||
mapConfig.SetDefault("log_level", "info")
|
||||
mapConfig.SetDefault("rpc_laddr", "tcp://0.0.0.0:36657")
|
||||
mapConfig.SetDefault("grpc_laddr", "tcp://0.0.0.0:36658")
|
||||
mapConfig.SetDefault("prof_laddr", "")
|
||||
mapConfig.SetDefault("revision_file", rootDir+"/revision")
|
||||
mapConfig.SetDefault("cswal", rootDir+"/data/cswal")
|
||||
mapConfig.SetDefault("cswal_light", false)
|
||||
mapConfig.SetDefault("cs_wal_dir", rootDir+"/data/cs.wal")
|
||||
mapConfig.SetDefault("cs_wal_light", false)
|
||||
mapConfig.SetDefault("filter_peers", false)
|
||||
|
||||
mapConfig.SetDefault("block_size", 10000)
|
||||
mapConfig.SetDefault("block_part_size", 65536) // part size 64K
|
||||
mapConfig.SetDefault("disable_data_hash", false)
|
||||
mapConfig.SetDefault("timeout_propose", 2000)
|
||||
mapConfig.SetDefault("timeout_propose_delta", 500)
|
||||
mapConfig.SetDefault("timeout_prevote", 1000)
|
||||
mapConfig.SetDefault("timeout_prevote_delta", 500)
|
||||
mapConfig.SetDefault("timeout_precommit", 1000)
|
||||
mapConfig.SetDefault("timeout_precommit_delta", 500)
|
||||
mapConfig.SetDefault("timeout_commit", 100)
|
||||
mapConfig.SetDefault("timeout_propose_delta", 1)
|
||||
mapConfig.SetDefault("timeout_prevote", 10)
|
||||
mapConfig.SetDefault("timeout_prevote_delta", 1)
|
||||
mapConfig.SetDefault("timeout_precommit", 10)
|
||||
mapConfig.SetDefault("timeout_precommit_delta", 1)
|
||||
mapConfig.SetDefault("timeout_commit", 10)
|
||||
mapConfig.SetDefault("skip_timeout_commit", true)
|
||||
mapConfig.SetDefault("mempool_recheck", true)
|
||||
mapConfig.SetDefault("mempool_recheck_empty", true)
|
||||
mapConfig.SetDefault("mempool_broadcast", true)
|
||||
mapConfig.SetDefault("mempool_wal_dir", "")
|
||||
|
||||
logger.SetLogLevel(mapConfig.GetString("log_level"))
|
||||
|
||||
return mapConfig
|
||||
}
|
||||
@@ -110,7 +120,7 @@ node_laddr = "tcp://0.0.0.0:36656"
|
||||
seeds = ""
|
||||
fast_sync = false
|
||||
db_backend = "memdb"
|
||||
log_level = "debug"
|
||||
log_level = "info"
|
||||
rpc_laddr = "tcp://0.0.0.0:36657"
|
||||
`
|
||||
|
||||
|
297
consensus/byzantine_test.go
Normal file
297
consensus/byzantine_test.go
Normal file
@@ -0,0 +1,297 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/config/tendermint_test"
|
||||
|
||||
. "github.com/tendermint/go-common"
|
||||
cfg "github.com/tendermint/go-config"
|
||||
"github.com/tendermint/go-events"
|
||||
"github.com/tendermint/go-p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
config = tendermint_test.ResetConfig("consensus_byzantine_test")
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// byzantine failures
|
||||
|
||||
// 4 validators. 1 is byzantine. The other three are partitioned into A (1 val) and B (2 vals).
|
||||
// byzantine validator sends conflicting proposals into A and B,
|
||||
// and prevotes/precommits on both of them.
|
||||
// B sees a commit, A doesn't.
|
||||
// Byzantine validator refuses to prevote.
|
||||
// Heal partition and ensure A sees the commit
|
||||
func TestByzantine(t *testing.T) {
|
||||
N := 4
|
||||
css := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), newCounter)
|
||||
|
||||
// give the byzantine validator a normal ticker
|
||||
css[0].SetTimeoutTicker(NewTimeoutTicker())
|
||||
|
||||
switches := make([]*p2p.Switch, N)
|
||||
for i := 0; i < N; i++ {
|
||||
switches[i] = p2p.NewSwitch(cfg.NewMapConfig(nil))
|
||||
}
|
||||
|
||||
reactors := make([]p2p.Reactor, N)
|
||||
defer func() {
|
||||
for _, r := range reactors {
|
||||
if rr, ok := r.(*ByzantineReactor); ok {
|
||||
rr.reactor.Switch.Stop()
|
||||
} else {
|
||||
r.(*ConsensusReactor).Switch.Stop()
|
||||
}
|
||||
}
|
||||
}()
|
||||
eventChans := make([]chan interface{}, N)
|
||||
for i := 0; i < N; i++ {
|
||||
if i == 0 {
|
||||
css[i].privValidator = NewByzantinePrivValidator(css[i].privValidator.(*types.PrivValidator))
|
||||
// make byzantine
|
||||
css[i].decideProposal = func(j int) func(int, int) {
|
||||
return func(height, round int) {
|
||||
byzantineDecideProposalFunc(height, round, css[j], switches[j])
|
||||
}
|
||||
}(i)
|
||||
css[i].doPrevote = func(height, round int) {}
|
||||
}
|
||||
|
||||
eventSwitch := events.NewEventSwitch()
|
||||
_, err := eventSwitch.Start()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start switch: %v", err)
|
||||
}
|
||||
eventChans[i] = subscribeToEvent(eventSwitch, "tester", types.EventStringNewBlock(), 1)
|
||||
|
||||
conR := NewConsensusReactor(css[i], true) // so we dont start the consensus states
|
||||
conR.SetEventSwitch(eventSwitch)
|
||||
|
||||
var conRI p2p.Reactor
|
||||
conRI = conR
|
||||
if i == 0 {
|
||||
conRI = NewByzantineReactor(conR)
|
||||
}
|
||||
reactors[i] = conRI
|
||||
}
|
||||
|
||||
p2p.MakeConnectedSwitches(N, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
// ignore new switch s, we already made ours
|
||||
switches[i].AddReactor("CONSENSUS", reactors[i])
|
||||
return switches[i]
|
||||
}, func(sws []*p2p.Switch, i, j int) {
|
||||
// the network starts partitioned with globally active adversary
|
||||
if i != 0 {
|
||||
return
|
||||
}
|
||||
p2p.Connect2Switches(sws, i, j)
|
||||
})
|
||||
|
||||
// start the state machines
|
||||
byzR := reactors[0].(*ByzantineReactor)
|
||||
s := byzR.reactor.conS.GetState()
|
||||
byzR.reactor.SwitchToConsensus(s)
|
||||
for i := 1; i < N; i++ {
|
||||
cr := reactors[i].(*ConsensusReactor)
|
||||
cr.SwitchToConsensus(cr.conS.GetState())
|
||||
}
|
||||
|
||||
// byz proposer sends one block to peers[0]
|
||||
// and the other block to peers[1] and peers[2].
|
||||
// note peers and switches order don't match.
|
||||
peers := switches[0].Peers().List()
|
||||
ind0 := getSwitchIndex(switches, peers[0])
|
||||
ind1 := getSwitchIndex(switches, peers[1])
|
||||
ind2 := getSwitchIndex(switches, peers[2])
|
||||
|
||||
// connect the 2 peers in the larger partition
|
||||
p2p.Connect2Switches(switches, ind1, ind2)
|
||||
|
||||
// wait for someone in the big partition to make a block
|
||||
|
||||
select {
|
||||
case <-eventChans[ind2]:
|
||||
}
|
||||
|
||||
log.Notice("A block has been committed. Healing partition")
|
||||
|
||||
// connect the partitions
|
||||
p2p.Connect2Switches(switches, ind0, ind1)
|
||||
p2p.Connect2Switches(switches, ind0, ind2)
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
// (one of them already has)
|
||||
wg := new(sync.WaitGroup)
|
||||
wg.Add(2)
|
||||
for i := 1; i < N-1; i++ {
|
||||
go func(j int) {
|
||||
<-eventChans[j]
|
||||
wg.Done()
|
||||
}(i)
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
tick := time.NewTicker(time.Second * 10)
|
||||
select {
|
||||
case <-done:
|
||||
case <-tick.C:
|
||||
for i, reactor := range reactors {
|
||||
t.Log(Fmt("Consensus Reactor %v", i))
|
||||
t.Log(Fmt("%v", reactor))
|
||||
}
|
||||
t.Fatalf("Timed out waiting for all validators to commit first block")
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------
|
||||
// byzantine consensus functions
|
||||
|
||||
func byzantineDecideProposalFunc(height, round int, cs *ConsensusState, sw *p2p.Switch) {
|
||||
// byzantine user should create two proposals and try to split the vote.
|
||||
// Avoid sending on internalMsgQueue and running consensus state.
|
||||
|
||||
// Create a new proposal block from state/txs from the mempool.
|
||||
block1, blockParts1 := cs.createProposalBlock()
|
||||
polRound, polBlockID := cs.Votes.POLInfo()
|
||||
proposal1 := types.NewProposal(height, round, blockParts1.Header(), polRound, polBlockID)
|
||||
cs.privValidator.SignProposal(cs.state.ChainID, proposal1) // byzantine doesnt err
|
||||
|
||||
// Create a new proposal block from state/txs from the mempool.
|
||||
block2, blockParts2 := cs.createProposalBlock()
|
||||
polRound, polBlockID = cs.Votes.POLInfo()
|
||||
proposal2 := types.NewProposal(height, round, blockParts2.Header(), polRound, polBlockID)
|
||||
cs.privValidator.SignProposal(cs.state.ChainID, proposal2) // byzantine doesnt err
|
||||
|
||||
block1Hash := block1.Hash()
|
||||
block2Hash := block2.Hash()
|
||||
|
||||
// broadcast conflicting proposals/block parts to peers
|
||||
peers := sw.Peers().List()
|
||||
log.Notice("Byzantine: broadcasting conflicting proposals", "peers", len(peers))
|
||||
for i, peer := range peers {
|
||||
if i < len(peers)/2 {
|
||||
go sendProposalAndParts(height, round, cs, peer, proposal1, block1Hash, blockParts1)
|
||||
} else {
|
||||
go sendProposalAndParts(height, round, cs, peer, proposal2, block2Hash, blockParts2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sendProposalAndParts(height, round int, cs *ConsensusState, peer *p2p.Peer, proposal *types.Proposal, blockHash []byte, parts *types.PartSet) {
|
||||
// proposal
|
||||
msg := &ProposalMessage{Proposal: proposal}
|
||||
peer.Send(DataChannel, struct{ ConsensusMessage }{msg})
|
||||
|
||||
// parts
|
||||
for i := 0; i < parts.Total(); i++ {
|
||||
part := parts.GetPart(i)
|
||||
msg := &BlockPartMessage{
|
||||
Height: height, // This tells peer that this part applies to us.
|
||||
Round: round, // This tells peer that this part applies to us.
|
||||
Part: part,
|
||||
}
|
||||
peer.Send(DataChannel, struct{ ConsensusMessage }{msg})
|
||||
}
|
||||
|
||||
// votes
|
||||
cs.mtx.Lock()
|
||||
prevote, _ := cs.signVote(types.VoteTypePrevote, blockHash, parts.Header())
|
||||
precommit, _ := cs.signVote(types.VoteTypePrecommit, blockHash, parts.Header())
|
||||
cs.mtx.Unlock()
|
||||
|
||||
peer.Send(VoteChannel, struct{ ConsensusMessage }{&VoteMessage{prevote}})
|
||||
peer.Send(VoteChannel, struct{ ConsensusMessage }{&VoteMessage{precommit}})
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// byzantine consensus reactor
|
||||
|
||||
type ByzantineReactor struct {
|
||||
Service
|
||||
reactor *ConsensusReactor
|
||||
}
|
||||
|
||||
func NewByzantineReactor(conR *ConsensusReactor) *ByzantineReactor {
|
||||
return &ByzantineReactor{
|
||||
Service: conR,
|
||||
reactor: conR,
|
||||
}
|
||||
}
|
||||
|
||||
func (br *ByzantineReactor) SetSwitch(s *p2p.Switch) { br.reactor.SetSwitch(s) }
|
||||
func (br *ByzantineReactor) GetChannels() []*p2p.ChannelDescriptor { return br.reactor.GetChannels() }
|
||||
func (br *ByzantineReactor) AddPeer(peer *p2p.Peer) {
|
||||
if !br.reactor.IsRunning() {
|
||||
return
|
||||
}
|
||||
|
||||
// Create peerState for peer
|
||||
peerState := NewPeerState(peer)
|
||||
peer.Data.Set(types.PeerStateKey, peerState)
|
||||
|
||||
// Send our state to peer.
|
||||
// If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
|
||||
if !br.reactor.fastSync {
|
||||
br.reactor.sendNewRoundStepMessage(peer)
|
||||
}
|
||||
}
|
||||
func (br *ByzantineReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
|
||||
br.reactor.RemovePeer(peer, reason)
|
||||
}
|
||||
func (br *ByzantineReactor) Receive(chID byte, peer *p2p.Peer, msgBytes []byte) {
|
||||
br.reactor.Receive(chID, peer, msgBytes)
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// byzantine privValidator
|
||||
|
||||
type ByzantinePrivValidator struct {
|
||||
Address []byte `json:"address"`
|
||||
types.Signer `json:"-"`
|
||||
|
||||
mtx sync.Mutex
|
||||
}
|
||||
|
||||
// Return a priv validator that will sign anything
|
||||
func NewByzantinePrivValidator(pv *types.PrivValidator) *ByzantinePrivValidator {
|
||||
return &ByzantinePrivValidator{
|
||||
Address: pv.Address,
|
||||
Signer: pv.Signer,
|
||||
}
|
||||
}
|
||||
|
||||
func (privVal *ByzantinePrivValidator) GetAddress() []byte {
|
||||
return privVal.Address
|
||||
}
|
||||
|
||||
func (privVal *ByzantinePrivValidator) SignVote(chainID string, vote *types.Vote) error {
|
||||
privVal.mtx.Lock()
|
||||
defer privVal.mtx.Unlock()
|
||||
|
||||
// Sign
|
||||
vote.Signature = privVal.Sign(types.SignBytes(chainID, vote))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (privVal *ByzantinePrivValidator) SignProposal(chainID string, proposal *types.Proposal) error {
|
||||
privVal.mtx.Lock()
|
||||
defer privVal.mtx.Unlock()
|
||||
|
||||
// Sign
|
||||
proposal.Signature = privVal.Sign(types.SignBytes(chainID, proposal))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (privVal *ByzantinePrivValidator) String() string {
|
||||
return Fmt("PrivValidator{%X}", privVal.Address)
|
||||
}
|
@@ -1,15 +1,29 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"github.com/tendermint/go-events"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// NOTE: this is blocking
|
||||
func subscribeToEvent(evsw *events.EventSwitch, receiver, eventID string, chanCap int) chan interface{} {
|
||||
// listen for new round
|
||||
// XXX: WARNING: these functions can halt the consensus as firing events is synchronous.
|
||||
// Make sure to read off the channels, and in the case of subscribeToEventRespond, to write back on it
|
||||
|
||||
// NOTE: if chanCap=0, this blocks on the event being consumed
|
||||
func subscribeToEvent(evsw types.EventSwitch, receiver, eventID string, chanCap int) chan interface{} {
|
||||
// listen for event
|
||||
ch := make(chan interface{}, chanCap)
|
||||
evsw.AddListenerForEvent(receiver, eventID, func(data events.EventData) {
|
||||
types.AddListenerForEvent(evsw, receiver, eventID, func(data types.TMEventData) {
|
||||
ch <- data
|
||||
})
|
||||
return ch
|
||||
}
|
||||
|
||||
// NOTE: this blocks on receiving a response after the event is consumed
|
||||
func subscribeToEventRespond(evsw types.EventSwitch, receiver, eventID string) chan interface{} {
|
||||
// listen for event
|
||||
ch := make(chan interface{})
|
||||
types.AddListenerForEvent(evsw, receiver, eventID, func(data types.TMEventData) {
|
||||
ch <- data
|
||||
<-ch
|
||||
})
|
||||
return ch
|
||||
}
|
||||
|
@@ -3,52 +3,72 @@ package consensus
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/tendermint/go-common"
|
||||
cfg "github.com/tendermint/go-config"
|
||||
dbm "github.com/tendermint/go-db"
|
||||
"github.com/tendermint/go-events"
|
||||
"github.com/tendermint/go-p2p"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/config/tendermint_test"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmspcli "github.com/tendermint/tmsp/client"
|
||||
tmsp "github.com/tendermint/tmsp/types"
|
||||
abcicli "github.com/tendermint/abci/client"
|
||||
abci "github.com/tendermint/abci/types"
|
||||
|
||||
"github.com/tendermint/tmsp/example/counter"
|
||||
"github.com/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/abci/example/dummy"
|
||||
)
|
||||
|
||||
var config cfg.Config // NOTE: must be reset for each _test.go file
|
||||
var ensureTimeout = time.Duration(2)
|
||||
|
||||
func ensureDir(dir string, mode os.FileMode) {
|
||||
if err := EnsureDir(dir, mode); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------------
|
||||
// validator stub (a dummy consensus peer we control)
|
||||
|
||||
type validatorStub struct {
|
||||
Index int // Validator index. NOTE: we don't assume validator set changes.
|
||||
Height int
|
||||
Round int
|
||||
*types.PrivValidator
|
||||
}
|
||||
|
||||
func NewValidatorStub(privValidator *types.PrivValidator) *validatorStub {
|
||||
var testMinPower = 10
|
||||
|
||||
func NewValidatorStub(privValidator *types.PrivValidator, valIndex int) *validatorStub {
|
||||
return &validatorStub{
|
||||
Index: valIndex,
|
||||
PrivValidator: privValidator,
|
||||
}
|
||||
}
|
||||
|
||||
func (vs *validatorStub) signVote(voteType byte, hash []byte, header types.PartSetHeader) (*types.Vote, error) {
|
||||
vote := &types.Vote{
|
||||
ValidatorIndex: vs.Index,
|
||||
ValidatorAddress: vs.PrivValidator.Address,
|
||||
Height: vs.Height,
|
||||
Round: vs.Round,
|
||||
Type: voteType,
|
||||
BlockHash: hash,
|
||||
BlockPartsHeader: header,
|
||||
BlockID: types.BlockID{hash, header},
|
||||
}
|
||||
err := vs.PrivValidator.SignVote(config.GetString("chain_id"), vote)
|
||||
return vote, err
|
||||
}
|
||||
|
||||
// convenienve function for testing
|
||||
// Sign vote for type/hash/header
|
||||
func signVote(vs *validatorStub, voteType byte, hash []byte, header types.PartSetHeader) *types.Vote {
|
||||
v, err := vs.signVote(voteType, hash, header)
|
||||
if err != nil {
|
||||
@@ -57,102 +77,7 @@ func signVote(vs *validatorStub, voteType byte, hash []byte, header types.PartSe
|
||||
return v
|
||||
}
|
||||
|
||||
// create proposal block from cs1 but sign it with vs
|
||||
func decideProposal(cs1 *ConsensusState, cs2 *validatorStub, height, round int) (proposal *types.Proposal, block *types.Block) {
|
||||
block, blockParts := cs1.createProposalBlock()
|
||||
if block == nil { // on error
|
||||
panic("error creating proposal block")
|
||||
}
|
||||
|
||||
// Make proposal
|
||||
proposal = types.NewProposal(height, round, blockParts.Header(), cs1.Votes.POLRound())
|
||||
if err := cs2.SignProposal(config.GetString("chain_id"), proposal); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------------
|
||||
// utils
|
||||
|
||||
/*
|
||||
func nilRound(t *testing.T, cs1 *ConsensusState, vss ...*validatorStub) {
|
||||
cs1.mtx.Lock()
|
||||
height, round := cs1.Height, cs1.Round
|
||||
cs1.mtx.Unlock()
|
||||
|
||||
waitFor(t, cs1, height, round, RoundStepPrevote)
|
||||
|
||||
signAddVoteToFromMany(types.VoteTypePrevote, cs1, nil, cs1.ProposalBlockParts.Header(), vss...)
|
||||
|
||||
waitFor(t, cs1, height, round, RoundStepPrecommit)
|
||||
|
||||
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, cs1.ProposalBlockParts.Header(), vss...)
|
||||
|
||||
waitFor(t, cs1, height, round+1, RoundStepNewRound)
|
||||
}
|
||||
*/
|
||||
|
||||
// NOTE: this switches the propser as far as `perspectiveOf` is concerned,
|
||||
// but for simplicity we return a block it generated.
|
||||
func changeProposer(t *testing.T, perspectiveOf *ConsensusState, newProposer *validatorStub) *types.Block {
|
||||
_, v1 := perspectiveOf.Validators.GetByAddress(perspectiveOf.privValidator.Address)
|
||||
v1.Accum, v1.VotingPower = 0, 0
|
||||
if updated := perspectiveOf.Validators.Update(v1); !updated {
|
||||
panic("failed to update validator")
|
||||
}
|
||||
_, v2 := perspectiveOf.Validators.GetByAddress(newProposer.Address)
|
||||
v2.Accum, v2.VotingPower = 100, 100
|
||||
if updated := perspectiveOf.Validators.Update(v2); !updated {
|
||||
panic("failed to update validator")
|
||||
}
|
||||
|
||||
// make the proposal
|
||||
propBlock, _ := perspectiveOf.createProposalBlock()
|
||||
if propBlock == nil {
|
||||
panic("Failed to create proposal block with cs2")
|
||||
}
|
||||
return propBlock
|
||||
}
|
||||
|
||||
func fixVotingPower(t *testing.T, cs1 *ConsensusState, addr2 []byte) {
|
||||
_, v1 := cs1.Validators.GetByAddress(cs1.privValidator.Address)
|
||||
_, v2 := cs1.Validators.GetByAddress(addr2)
|
||||
v1.Accum, v1.VotingPower = v2.Accum, v2.VotingPower
|
||||
if updated := cs1.Validators.Update(v1); !updated {
|
||||
panic("failed to update validator")
|
||||
}
|
||||
}
|
||||
|
||||
func addVoteToFromMany(to *ConsensusState, votes []*types.Vote, froms ...*validatorStub) {
|
||||
if len(votes) != len(froms) {
|
||||
panic("len(votes) and len(froms) must match")
|
||||
}
|
||||
|
||||
for i, from := range froms {
|
||||
addVoteToFrom(to, from, votes[i])
|
||||
}
|
||||
}
|
||||
|
||||
func addVoteToFrom(to *ConsensusState, from *validatorStub, vote *types.Vote) {
|
||||
to.mtx.Lock() // NOTE: wont need this when the vote comes with the index!
|
||||
valIndex, _ := to.Validators.GetByAddress(from.PrivValidator.Address)
|
||||
to.mtx.Unlock()
|
||||
|
||||
to.peerMsgQueue <- msgInfo{Msg: &VoteMessage{valIndex, vote}}
|
||||
// added, err := to.TryAddVote(valIndex, vote, "")
|
||||
/*
|
||||
if _, ok := err.(*types.ErrVoteConflictingSignature); ok {
|
||||
// let it fly
|
||||
} else if !added {
|
||||
fmt.Println("to, from, vote:", to.Height, from.Height, vote.Height)
|
||||
panic(fmt.Sprintln("Failed to add vote. Err:", err))
|
||||
} else if err != nil {
|
||||
panic(fmt.Sprintln("Failed to add vote:", err))
|
||||
}*/
|
||||
}
|
||||
|
||||
func signVoteMany(voteType byte, hash []byte, header types.PartSetHeader, vss ...*validatorStub) []*types.Vote {
|
||||
func signVotes(voteType byte, hash []byte, header types.PartSetHeader, vss ...*validatorStub) []*types.Vote {
|
||||
votes := make([]*types.Vote, len(vss))
|
||||
for i, vs := range vss {
|
||||
votes[i] = signVote(vs, voteType, hash, header)
|
||||
@@ -160,79 +85,6 @@ func signVoteMany(voteType byte, hash []byte, header types.PartSetHeader, vss ..
|
||||
return votes
|
||||
}
|
||||
|
||||
// add vote to one cs from another
|
||||
// if voteCh is not nil, read all votes
|
||||
func signAddVoteToFromMany(voteType byte, to *ConsensusState, hash []byte, header types.PartSetHeader, voteCh chan interface{}, froms ...*validatorStub) {
|
||||
var wg chan struct{} // when done reading all votes
|
||||
if voteCh != nil {
|
||||
wg = readVotes(voteCh, len(froms))
|
||||
}
|
||||
for _, from := range froms {
|
||||
vote := signVote(from, voteType, hash, header)
|
||||
addVoteToFrom(to, from, vote)
|
||||
}
|
||||
|
||||
if voteCh != nil {
|
||||
<-wg
|
||||
}
|
||||
}
|
||||
|
||||
func signAddVoteToFrom(voteType byte, to *ConsensusState, from *validatorStub, hash []byte, header types.PartSetHeader, voteCh chan interface{}) *types.Vote {
|
||||
var wg chan struct{} // when done reading all votes
|
||||
if voteCh != nil {
|
||||
wg = readVotes(voteCh, 1)
|
||||
}
|
||||
vote := signVote(from, voteType, hash, header)
|
||||
addVoteToFrom(to, from, vote)
|
||||
if voteCh != nil {
|
||||
<-wg
|
||||
}
|
||||
return vote
|
||||
}
|
||||
|
||||
func ensureNoNewStep(stepCh chan interface{}) {
|
||||
timeout := time.NewTicker(ensureTimeout * time.Second)
|
||||
select {
|
||||
case <-timeout.C:
|
||||
break
|
||||
case <-stepCh:
|
||||
panic("We should be stuck waiting for more votes, not moving to the next step")
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
func ensureNoNewStep(t *testing.T, cs *ConsensusState) {
|
||||
timeout := time.NewTicker(ensureTimeout * time.Second)
|
||||
select {
|
||||
case <-timeout.C:
|
||||
break
|
||||
case <-cs.NewStepCh():
|
||||
panic("We should be stuck waiting for more votes, not moving to the next step")
|
||||
}
|
||||
}
|
||||
|
||||
func ensureNewStep(t *testing.T, cs *ConsensusState) *RoundState {
|
||||
timeout := time.NewTicker(ensureTimeout * time.Second)
|
||||
select {
|
||||
case <-timeout.C:
|
||||
panic("We should have gone to the next step, not be stuck waiting")
|
||||
case rs := <-cs.NewStepCh():
|
||||
return rs
|
||||
}
|
||||
}
|
||||
|
||||
func waitFor(t *testing.T, cs *ConsensusState, height int, round int, step RoundStepType) {
|
||||
for {
|
||||
rs := ensureNewStep(t, cs)
|
||||
if CompareHRS(rs.Height, rs.Round, rs.Step, height, round, step) < 0 {
|
||||
continue
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
func incrementHeight(vss ...*validatorStub) {
|
||||
for _, vs := range vss {
|
||||
vs.Height += 1
|
||||
@@ -245,6 +97,41 @@ func incrementRound(vss ...*validatorStub) {
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------------
|
||||
// Functions for transitioning the consensus state
|
||||
|
||||
func startTestRound(cs *ConsensusState, height, round int) {
|
||||
cs.enterNewRound(height, round)
|
||||
cs.startRoutines(0)
|
||||
}
|
||||
|
||||
// Create proposal block from cs1 but sign it with vs
|
||||
func decideProposal(cs1 *ConsensusState, vs *validatorStub, height, round int) (proposal *types.Proposal, block *types.Block) {
|
||||
block, blockParts := cs1.createProposalBlock()
|
||||
if block == nil { // on error
|
||||
panic("error creating proposal block")
|
||||
}
|
||||
|
||||
// Make proposal
|
||||
polRound, polBlockID := cs1.Votes.POLInfo()
|
||||
proposal = types.NewProposal(height, round, blockParts.Header(), polRound, polBlockID)
|
||||
if err := vs.SignProposal(config.GetString("chain_id"), proposal); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func addVotes(to *ConsensusState, votes ...*types.Vote) {
|
||||
for _, vote := range votes {
|
||||
to.peerMsgQueue <- msgInfo{Msg: &VoteMessage{vote}}
|
||||
}
|
||||
}
|
||||
|
||||
func signAddVotes(to *ConsensusState, voteType byte, hash []byte, header types.PartSetHeader, vss ...*validatorStub) {
|
||||
votes := signVotes(voteType, hash, header, vss...)
|
||||
addVotes(to, votes...)
|
||||
}
|
||||
|
||||
func validatePrevote(t *testing.T, cs *ConsensusState, round int, privVal *validatorStub, blockHash []byte) {
|
||||
prevotes := cs.Votes.Prevotes(round)
|
||||
var vote *types.Vote
|
||||
@@ -252,12 +139,12 @@ func validatePrevote(t *testing.T, cs *ConsensusState, round int, privVal *valid
|
||||
panic("Failed to find prevote from validator")
|
||||
}
|
||||
if blockHash == nil {
|
||||
if vote.BlockHash != nil {
|
||||
panic(fmt.Sprintf("Expected prevote to be for nil, got %X", vote.BlockHash))
|
||||
if vote.BlockID.Hash != nil {
|
||||
panic(fmt.Sprintf("Expected prevote to be for nil, got %X", vote.BlockID.Hash))
|
||||
}
|
||||
} else {
|
||||
if !bytes.Equal(vote.BlockHash, blockHash) {
|
||||
panic(fmt.Sprintf("Expected prevote to be for %X, got %X", blockHash, vote.BlockHash))
|
||||
if !bytes.Equal(vote.BlockID.Hash, blockHash) {
|
||||
panic(fmt.Sprintf("Expected prevote to be for %X, got %X", blockHash, vote.BlockID.Hash))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -268,8 +155,8 @@ func validateLastPrecommit(t *testing.T, cs *ConsensusState, privVal *validatorS
|
||||
if vote = votes.GetByAddress(privVal.Address); vote == nil {
|
||||
panic("Failed to find precommit from validator")
|
||||
}
|
||||
if !bytes.Equal(vote.BlockHash, blockHash) {
|
||||
panic(fmt.Sprintf("Expected precommit to be for %X, got %X", blockHash, vote.BlockHash))
|
||||
if !bytes.Equal(vote.BlockID.Hash, blockHash) {
|
||||
panic(fmt.Sprintf("Expected precommit to be for %X, got %X", blockHash, vote.BlockID.Hash))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -281,11 +168,11 @@ func validatePrecommit(t *testing.T, cs *ConsensusState, thisRound, lockRound in
|
||||
}
|
||||
|
||||
if votedBlockHash == nil {
|
||||
if vote.BlockHash != nil {
|
||||
if vote.BlockID.Hash != nil {
|
||||
panic("Expected precommit to be for nil")
|
||||
}
|
||||
} else {
|
||||
if !bytes.Equal(vote.BlockHash, votedBlockHash) {
|
||||
if !bytes.Equal(vote.BlockID.Hash, votedBlockHash) {
|
||||
panic("Expected precommit to be for proposal block")
|
||||
}
|
||||
}
|
||||
@@ -311,56 +198,7 @@ func validatePrevoteAndPrecommit(t *testing.T, cs *ConsensusState, thisRound, lo
|
||||
cs.mtx.Unlock()
|
||||
}
|
||||
|
||||
func fixedConsensusState() *ConsensusState {
|
||||
stateDB := dbm.NewMemDB()
|
||||
state := sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
|
||||
privValidatorFile := config.GetString("priv_validator_file")
|
||||
privValidator := types.LoadOrGenPrivValidator(privValidatorFile)
|
||||
privValidator.Reset()
|
||||
cs := newConsensusState(state, privValidator, counter.NewCounterApplication(true))
|
||||
return cs
|
||||
}
|
||||
|
||||
func newConsensusState(state *sm.State, pv *types.PrivValidator, app tmsp.Application) *ConsensusState {
|
||||
// Get BlockStore
|
||||
blockDB := dbm.NewMemDB()
|
||||
blockStore := bc.NewBlockStore(blockDB)
|
||||
|
||||
// one for mempool, one for consensus
|
||||
mtx := new(sync.Mutex)
|
||||
proxyAppConnMem := tmspcli.NewLocalClient(mtx, app)
|
||||
proxyAppConnCon := tmspcli.NewLocalClient(mtx, app)
|
||||
|
||||
// Make Mempool
|
||||
mempool := mempl.NewMempool(config, proxyAppConnMem)
|
||||
|
||||
// Make ConsensusReactor
|
||||
cs := NewConsensusState(config, state, proxyAppConnCon, blockStore, mempool)
|
||||
cs.SetPrivValidator(pv)
|
||||
|
||||
evsw := events.NewEventSwitch()
|
||||
cs.SetEventSwitch(evsw)
|
||||
evsw.Start()
|
||||
return cs
|
||||
}
|
||||
|
||||
func randConsensusState(nValidators int) (*ConsensusState, []*validatorStub) {
|
||||
// Get State
|
||||
state, privVals := randGenesisState(nValidators, false, 10)
|
||||
|
||||
vss := make([]*validatorStub, nValidators)
|
||||
|
||||
cs := newConsensusState(state, privVals[0], counter.NewCounterApplication(true))
|
||||
|
||||
for i := 0; i < nValidators; i++ {
|
||||
vss[i] = NewValidatorStub(privVals[i])
|
||||
}
|
||||
// since cs1 starts at 1
|
||||
incrementHeight(vss[1:]...)
|
||||
|
||||
return cs, vss
|
||||
}
|
||||
|
||||
// genesis
|
||||
func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
|
||||
voteCh0 := subscribeToEvent(cs.evsw, "tester", types.EventStringVote(), 1)
|
||||
voteCh := make(chan interface{})
|
||||
@@ -369,7 +207,7 @@ func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
|
||||
v := <-voteCh0
|
||||
vote := v.(types.EventDataVote)
|
||||
// we only fire for our own votes
|
||||
if bytes.Equal(addr, vote.Address) {
|
||||
if bytes.Equal(addr, vote.Vote.ValidatorAddress) {
|
||||
voteCh <- v
|
||||
}
|
||||
}
|
||||
@@ -388,14 +226,145 @@ func readVotes(ch chan interface{}, reads int) chan struct{} {
|
||||
return wg
|
||||
}
|
||||
|
||||
func randGenesisState(numValidators int, randPower bool, minPower int64) (*sm.State, []*types.PrivValidator) {
|
||||
db := dbm.NewMemDB()
|
||||
genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower)
|
||||
s0 := sm.MakeGenesisState(db, genDoc)
|
||||
s0.Save()
|
||||
return s0, privValidators
|
||||
//-------------------------------------------------------------------------------
|
||||
// consensus states
|
||||
|
||||
func newConsensusState(state *sm.State, pv *types.PrivValidator, app abci.Application) *ConsensusState {
|
||||
return newConsensusStateWithConfig(config, state, pv, app)
|
||||
}
|
||||
|
||||
func newConsensusStateWithConfig(thisConfig cfg.Config, state *sm.State, pv *types.PrivValidator, app abci.Application) *ConsensusState {
|
||||
// Get BlockStore
|
||||
blockDB := dbm.NewMemDB()
|
||||
blockStore := bc.NewBlockStore(blockDB)
|
||||
|
||||
// one for mempool, one for consensus
|
||||
mtx := new(sync.Mutex)
|
||||
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
|
||||
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
|
||||
|
||||
// Make Mempool
|
||||
mempool := mempl.NewMempool(thisConfig, proxyAppConnMem)
|
||||
|
||||
// Make ConsensusReactor
|
||||
cs := NewConsensusState(thisConfig, state, proxyAppConnCon, blockStore, mempool)
|
||||
cs.SetPrivValidator(pv)
|
||||
|
||||
evsw := types.NewEventSwitch()
|
||||
cs.SetEventSwitch(evsw)
|
||||
evsw.Start()
|
||||
return cs
|
||||
}
|
||||
|
||||
func loadPrivValidator(conf cfg.Config) *types.PrivValidator {
|
||||
privValidatorFile := conf.GetString("priv_validator_file")
|
||||
ensureDir(path.Dir(privValidatorFile), 0700)
|
||||
privValidator := types.LoadOrGenPrivValidator(privValidatorFile)
|
||||
privValidator.Reset()
|
||||
return privValidator
|
||||
}
|
||||
|
||||
func fixedConsensusState() *ConsensusState {
|
||||
stateDB := dbm.NewMemDB()
|
||||
state := sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
|
||||
privValidator := loadPrivValidator(config)
|
||||
cs := newConsensusState(state, privValidator, counter.NewCounterApplication(true))
|
||||
return cs
|
||||
}
|
||||
|
||||
func fixedConsensusStateDummy() *ConsensusState {
|
||||
stateDB := dbm.NewMemDB()
|
||||
state := sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
|
||||
privValidator := loadPrivValidator(config)
|
||||
cs := newConsensusState(state, privValidator, dummy.NewDummyApplication())
|
||||
return cs
|
||||
}
|
||||
|
||||
func randConsensusState(nValidators int) (*ConsensusState, []*validatorStub) {
|
||||
// Get State
|
||||
state, privVals := randGenesisState(nValidators, false, 10)
|
||||
|
||||
vss := make([]*validatorStub, nValidators)
|
||||
|
||||
cs := newConsensusState(state, privVals[0], counter.NewCounterApplication(true))
|
||||
|
||||
for i := 0; i < nValidators; i++ {
|
||||
vss[i] = NewValidatorStub(privVals[i], i)
|
||||
}
|
||||
// since cs1 starts at 1
|
||||
incrementHeight(vss[1:]...)
|
||||
|
||||
return cs, vss
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------------
|
||||
|
||||
func ensureNoNewStep(stepCh chan interface{}) {
|
||||
timeout := time.NewTicker(ensureTimeout * time.Second)
|
||||
select {
|
||||
case <-timeout.C:
|
||||
break
|
||||
case <-stepCh:
|
||||
panic("We should be stuck waiting for more votes, not moving to the next step")
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------------
|
||||
// consensus nets
|
||||
|
||||
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application) []*ConsensusState {
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, 10)
|
||||
css := make([]*ConsensusState, nValidators)
|
||||
for i := 0; i < nValidators; i++ {
|
||||
db := dbm.NewMemDB() // each state needs its own db
|
||||
state := sm.MakeGenesisState(db, genDoc)
|
||||
state.Save()
|
||||
thisConfig := tendermint_test.ResetConfig(Fmt("%s_%d", testName, i))
|
||||
ensureDir(thisConfig.GetString("cs_wal_dir"), 0700) // dir for wal
|
||||
css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], appFunc())
|
||||
css[i].SetTimeoutTicker(tickerFunc())
|
||||
}
|
||||
return css
|
||||
}
|
||||
|
||||
// nPeers = nValidators + nNotValidator
|
||||
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application) []*ConsensusState {
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, int64(testMinPower))
|
||||
css := make([]*ConsensusState, nPeers)
|
||||
for i := 0; i < nPeers; i++ {
|
||||
db := dbm.NewMemDB() // each state needs its own db
|
||||
state := sm.MakeGenesisState(db, genDoc)
|
||||
state.Save()
|
||||
thisConfig := tendermint_test.ResetConfig(Fmt("%s_%d", testName, i))
|
||||
ensureDir(thisConfig.GetString("cs_wal_dir"), 0700) // dir for wal
|
||||
var privVal *types.PrivValidator
|
||||
if i < nValidators {
|
||||
privVal = privVals[i]
|
||||
} else {
|
||||
privVal = types.GenPrivValidator()
|
||||
_, tempFilePath := Tempfile("priv_validator_")
|
||||
privVal.SetFile(tempFilePath)
|
||||
}
|
||||
|
||||
css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, appFunc())
|
||||
css[i].SetTimeoutTicker(tickerFunc())
|
||||
}
|
||||
return css
|
||||
}
|
||||
|
||||
func getSwitchIndex(switches []*p2p.Switch, peer *p2p.Peer) int {
|
||||
for i, s := range switches {
|
||||
if bytes.Equal(peer.NodeInfo.PubKey.Address(), s.NodeInfo().PubKey.Address()) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
panic("didnt find peer in switches")
|
||||
return -1
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------------
|
||||
// genesis
|
||||
|
||||
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []*types.PrivValidator) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]*types.PrivValidator, numValidators)
|
||||
@@ -413,10 +382,69 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
|
||||
ChainID: config.GetString("chain_id"),
|
||||
Validators: validators,
|
||||
}, privValidators
|
||||
|
||||
}
|
||||
|
||||
func startTestRound(cs *ConsensusState, height, round int) {
|
||||
cs.enterNewRound(height, round)
|
||||
cs.startRoutines(0)
|
||||
func randGenesisState(numValidators int, randPower bool, minPower int64) (*sm.State, []*types.PrivValidator) {
|
||||
genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower)
|
||||
db := dbm.NewMemDB()
|
||||
s0 := sm.MakeGenesisState(db, genDoc)
|
||||
s0.Save()
|
||||
return s0, privValidators
|
||||
}
|
||||
|
||||
//------------------------------------
|
||||
// mock ticker
|
||||
|
||||
func newMockTickerFunc(onlyOnce bool) func() TimeoutTicker {
|
||||
return func() TimeoutTicker {
|
||||
return &mockTicker{
|
||||
c: make(chan timeoutInfo, 10),
|
||||
onlyOnce: onlyOnce,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// mock ticker only fires on RoundStepNewHeight
|
||||
// and only once if onlyOnce=true
|
||||
type mockTicker struct {
|
||||
c chan timeoutInfo
|
||||
|
||||
mtx sync.Mutex
|
||||
onlyOnce bool
|
||||
fired bool
|
||||
}
|
||||
|
||||
func (m *mockTicker) Start() (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (m *mockTicker) Stop() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *mockTicker) ScheduleTimeout(ti timeoutInfo) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
if m.onlyOnce && m.fired {
|
||||
return
|
||||
}
|
||||
if ti.Step == RoundStepNewHeight {
|
||||
m.c <- ti
|
||||
m.fired = true
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mockTicker) Chan() <-chan timeoutInfo {
|
||||
return m.c
|
||||
}
|
||||
|
||||
//------------------------------------
|
||||
|
||||
func newCounter() abci.Application {
|
||||
return counter.NewCounterApplication(true)
|
||||
}
|
||||
|
||||
func newPersistentDummy() abci.Application {
|
||||
dir, _ := ioutil.TempDir("/tmp", "persistent-dummy")
|
||||
return dummy.NewPersistentDummyApplication(dir)
|
||||
}
|
||||
|
@@ -24,6 +24,8 @@ but which round is not known in advance, so when a peer
|
||||
provides a precommit for a round greater than mtx.round,
|
||||
we create a new entry in roundVoteSets but also remember the
|
||||
peer to prevent abuse.
|
||||
We let each peer provide us with up to 2 unexpected "catchup" rounds.
|
||||
One for their LastCommit round, and another for the official commit round.
|
||||
*/
|
||||
type HeightVoteSet struct {
|
||||
chainID string
|
||||
@@ -33,7 +35,7 @@ type HeightVoteSet struct {
|
||||
mtx sync.Mutex
|
||||
round int // max tracked round
|
||||
roundVoteSets map[int]RoundVoteSet // keys: [0...round]
|
||||
peerCatchupRounds map[string]int // keys: peer.Key; values: round
|
||||
peerCatchupRounds map[string][]int // keys: peer.Key; values: at most 2 rounds
|
||||
}
|
||||
|
||||
func NewHeightVoteSet(chainID string, height int, valSet *types.ValidatorSet) *HeightVoteSet {
|
||||
@@ -51,7 +53,7 @@ func (hvs *HeightVoteSet) Reset(height int, valSet *types.ValidatorSet) {
|
||||
hvs.height = height
|
||||
hvs.valSet = valSet
|
||||
hvs.roundVoteSets = make(map[int]RoundVoteSet)
|
||||
hvs.peerCatchupRounds = make(map[string]int)
|
||||
hvs.peerCatchupRounds = make(map[string][]int)
|
||||
|
||||
hvs.addRound(0)
|
||||
hvs.round = 0
|
||||
@@ -100,15 +102,18 @@ func (hvs *HeightVoteSet) addRound(round int) {
|
||||
|
||||
// Duplicate votes return added=false, err=nil.
|
||||
// By convention, peerKey is "" if origin is self.
|
||||
func (hvs *HeightVoteSet) AddByIndex(valIndex int, vote *types.Vote, peerKey string) (added bool, address []byte, err error) {
|
||||
func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerKey string) (added bool, err error) {
|
||||
hvs.mtx.Lock()
|
||||
defer hvs.mtx.Unlock()
|
||||
if !types.IsVoteTypeValid(vote.Type) {
|
||||
return
|
||||
}
|
||||
voteSet := hvs.getVoteSet(vote.Round, vote.Type)
|
||||
if voteSet == nil {
|
||||
if _, ok := hvs.peerCatchupRounds[peerKey]; !ok {
|
||||
if rndz := hvs.peerCatchupRounds[peerKey]; len(rndz) < 2 {
|
||||
hvs.addRound(vote.Round)
|
||||
voteSet = hvs.getVoteSet(vote.Round, vote.Type)
|
||||
hvs.peerCatchupRounds[peerKey] = vote.Round
|
||||
hvs.peerCatchupRounds[peerKey] = append(rndz, vote.Round)
|
||||
} else {
|
||||
// Peer has sent a vote that does not match our round,
|
||||
// for more than one round. Bad peer!
|
||||
@@ -117,7 +122,7 @@ func (hvs *HeightVoteSet) AddByIndex(valIndex int, vote *types.Vote, peerKey str
|
||||
return
|
||||
}
|
||||
}
|
||||
added, address, err = voteSet.AddByIndex(valIndex, vote)
|
||||
added, err = voteSet.AddVote(vote)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -133,17 +138,19 @@ func (hvs *HeightVoteSet) Precommits(round int) *types.VoteSet {
|
||||
return hvs.getVoteSet(round, types.VoteTypePrecommit)
|
||||
}
|
||||
|
||||
// Last round that has +2/3 prevotes for a particular block or nil.
|
||||
// Last round and blockID that has +2/3 prevotes for a particular block or nil.
|
||||
// Returns -1 if no such round exists.
|
||||
func (hvs *HeightVoteSet) POLRound() int {
|
||||
func (hvs *HeightVoteSet) POLInfo() (polRound int, polBlockID types.BlockID) {
|
||||
hvs.mtx.Lock()
|
||||
defer hvs.mtx.Unlock()
|
||||
for r := hvs.round; r >= 0; r-- {
|
||||
if hvs.getVoteSet(r, types.VoteTypePrevote).HasTwoThirdsMajority() {
|
||||
return r
|
||||
rvs := hvs.getVoteSet(r, types.VoteTypePrevote)
|
||||
polBlockID, ok := rvs.TwoThirdsMajority()
|
||||
if ok {
|
||||
return r, polBlockID
|
||||
}
|
||||
}
|
||||
return -1
|
||||
return -1, types.BlockID{}
|
||||
}
|
||||
|
||||
func (hvs *HeightVoteSet) getVoteSet(round int, type_ byte) *types.VoteSet {
|
||||
@@ -167,6 +174,8 @@ func (hvs *HeightVoteSet) String() string {
|
||||
}
|
||||
|
||||
func (hvs *HeightVoteSet) StringIndented(indent string) string {
|
||||
hvs.mtx.Lock()
|
||||
defer hvs.mtx.Unlock()
|
||||
vsStrings := make([]string, 0, (len(hvs.roundVoteSets)+1)*2)
|
||||
// rounds 0 ~ hvs.round inclusive
|
||||
for round := 0; round <= hvs.round; round++ {
|
||||
@@ -192,3 +201,20 @@ func (hvs *HeightVoteSet) StringIndented(indent string) string {
|
||||
indent, strings.Join(vsStrings, "\n"+indent+" "),
|
||||
indent)
|
||||
}
|
||||
|
||||
// If a peer claims that it has 2/3 majority for given blockKey, call this.
|
||||
// NOTE: if there are too many peers, or too much peer churn,
|
||||
// this can cause memory issues.
|
||||
// TODO: implement ability to remove peers too
|
||||
func (hvs *HeightVoteSet) SetPeerMaj23(round int, type_ byte, peerID string, blockID types.BlockID) {
|
||||
hvs.mtx.Lock()
|
||||
defer hvs.mtx.Unlock()
|
||||
if !types.IsVoteTypeValid(type_) {
|
||||
return
|
||||
}
|
||||
voteSet := hvs.getVoteSet(round, type_)
|
||||
if voteSet == nil {
|
||||
return
|
||||
}
|
||||
voteSet.SetPeerMaj23(peerID, blockID)
|
||||
}
|
||||
|
@@ -17,31 +17,40 @@ func TestPeerCatchupRounds(t *testing.T) {
|
||||
|
||||
hvs := NewHeightVoteSet(config.GetString("chain_id"), 1, valSet)
|
||||
|
||||
vote999_0 := makeVoteHR(t, 1, 999, privVals[0])
|
||||
added, _, err := hvs.AddByIndex(0, vote999_0, "peer1")
|
||||
vote999_0 := makeVoteHR(t, 1, 999, privVals, 0)
|
||||
added, err := hvs.AddVote(vote999_0, "peer1")
|
||||
if !added || err != nil {
|
||||
t.Error("Expected to successfully add vote from peer", added, err)
|
||||
}
|
||||
|
||||
vote1000_0 := makeVoteHR(t, 1, 1000, privVals[0])
|
||||
added, _, err = hvs.AddByIndex(0, vote1000_0, "peer1")
|
||||
vote1000_0 := makeVoteHR(t, 1, 1000, privVals, 0)
|
||||
added, err = hvs.AddVote(vote1000_0, "peer1")
|
||||
if !added || err != nil {
|
||||
t.Error("Expected to successfully add vote from peer", added, err)
|
||||
}
|
||||
|
||||
vote1001_0 := makeVoteHR(t, 1, 1001, privVals, 0)
|
||||
added, err = hvs.AddVote(vote1001_0, "peer1")
|
||||
if added {
|
||||
t.Error("Expected to *not* add vote from peer, too many catchup rounds.")
|
||||
}
|
||||
|
||||
added, _, err = hvs.AddByIndex(0, vote1000_0, "peer2")
|
||||
added, err = hvs.AddVote(vote1001_0, "peer2")
|
||||
if !added || err != nil {
|
||||
t.Error("Expected to successfully add vote from another peer")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func makeVoteHR(t *testing.T, height, round int, privVal *types.PrivValidator) *types.Vote {
|
||||
func makeVoteHR(t *testing.T, height, round int, privVals []*types.PrivValidator, valIndex int) *types.Vote {
|
||||
privVal := privVals[valIndex]
|
||||
vote := &types.Vote{
|
||||
Height: height,
|
||||
Round: round,
|
||||
Type: types.VoteTypePrecommit,
|
||||
BlockHash: []byte("fakehash"),
|
||||
ValidatorAddress: privVal.Address,
|
||||
ValidatorIndex: valIndex,
|
||||
Height: height,
|
||||
Round: round,
|
||||
Type: types.VoteTypePrecommit,
|
||||
BlockID: types.BlockID{[]byte("fakehash"), types.PartSetHeader{}},
|
||||
}
|
||||
chainID := config.GetString("chain_id")
|
||||
err := privVal.SignVote(chainID, vote)
|
||||
|
@@ -2,13 +2,12 @@ package consensus
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
// "math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/config/tendermint_test"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmsp "github.com/tendermint/tmsp/types"
|
||||
abci "github.com/tendermint/abci/types"
|
||||
|
||||
. "github.com/tendermint/go-common"
|
||||
)
|
||||
@@ -24,8 +23,8 @@ func TestTxConcurrentWithCommit(t *testing.T) {
|
||||
height, round := cs.Height, cs.Round
|
||||
newBlockCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewBlock(), 1)
|
||||
|
||||
appendTxsRange := func(start, end int) {
|
||||
// Append some txs.
|
||||
deliverTxsRange := func(start, end int) {
|
||||
// Deliver some txs.
|
||||
for i := start; i < end; i++ {
|
||||
txBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(txBytes, uint64(i))
|
||||
@@ -38,7 +37,7 @@ func TestTxConcurrentWithCommit(t *testing.T) {
|
||||
}
|
||||
|
||||
NTxs := 10000
|
||||
go appendTxsRange(0, NTxs)
|
||||
go deliverTxsRange(0, NTxs)
|
||||
|
||||
startTestRound(cs, height, round)
|
||||
ticker := time.NewTicker(time.Second * 20)
|
||||
@@ -52,6 +51,67 @@ func TestTxConcurrentWithCommit(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestRmBadTx(t *testing.T) {
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
app := NewCounterApplication()
|
||||
cs := newConsensusState(state, privVals[0], app)
|
||||
|
||||
// increment the counter by 1
|
||||
txBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(txBytes, uint64(0))
|
||||
app.DeliverTx(txBytes)
|
||||
app.Commit()
|
||||
|
||||
ch := make(chan struct{})
|
||||
cbCh := make(chan struct{})
|
||||
go func() {
|
||||
// Try to send the tx through the mempool.
|
||||
// CheckTx should not err, but the app should return a bad abci code
|
||||
// and the tx should get removed from the pool
|
||||
err := cs.mempool.CheckTx(txBytes, func(r *abci.Response) {
|
||||
if r.GetCheckTx().Code != abci.CodeType_BadNonce {
|
||||
t.Fatalf("expected checktx to return bad nonce, got %v", r)
|
||||
}
|
||||
cbCh <- struct{}{}
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal("Error after CheckTx: %v", err)
|
||||
}
|
||||
|
||||
// check for the tx
|
||||
for {
|
||||
time.Sleep(time.Second)
|
||||
select {
|
||||
case <-ch:
|
||||
default:
|
||||
txs := cs.mempool.Reap(1)
|
||||
if len(txs) == 0 {
|
||||
ch <- struct{}{}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait until the tx returns
|
||||
ticker := time.After(time.Second * 5)
|
||||
select {
|
||||
case <-cbCh:
|
||||
// success
|
||||
case <-ticker:
|
||||
t.Fatalf("Timed out waiting for tx to return")
|
||||
}
|
||||
|
||||
// Wait until the tx is removed
|
||||
ticker = time.After(time.Second * 5)
|
||||
select {
|
||||
case <-ch:
|
||||
// success
|
||||
case <-ticker:
|
||||
t.Fatalf("Timed out waiting for tx to be removed")
|
||||
}
|
||||
}
|
||||
|
||||
// CounterApplication that maintains a mempool state and resets it upon commit
|
||||
type CounterApplication struct {
|
||||
txCount int
|
||||
@@ -62,49 +122,45 @@ func NewCounterApplication() *CounterApplication {
|
||||
return &CounterApplication{}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) Info() string {
|
||||
return Fmt("txs:%v", app.txCount)
|
||||
func (app *CounterApplication) Info() abci.ResponseInfo {
|
||||
return abci.ResponseInfo{Data: Fmt("txs:%v", app.txCount)}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) SetOption(key string, value string) (log string) {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (app *CounterApplication) AppendTx(tx []byte) tmsp.Result {
|
||||
func (app *CounterApplication) DeliverTx(tx []byte) abci.Result {
|
||||
return runTx(tx, &app.txCount)
|
||||
}
|
||||
|
||||
func (app *CounterApplication) CheckTx(tx []byte) tmsp.Result {
|
||||
func (app *CounterApplication) CheckTx(tx []byte) abci.Result {
|
||||
return runTx(tx, &app.mempoolTxCount)
|
||||
}
|
||||
|
||||
func runTx(tx []byte, countPtr *int) tmsp.Result {
|
||||
func runTx(tx []byte, countPtr *int) abci.Result {
|
||||
count := *countPtr
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(tx):], tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue != uint64(count) {
|
||||
return tmsp.Result{
|
||||
Code: tmsp.CodeType_BadNonce,
|
||||
Data: nil,
|
||||
Log: Fmt("Invalid nonce. Expected %v, got %v", count, txValue),
|
||||
}
|
||||
return abci.ErrBadNonce.AppendLog(Fmt("Invalid nonce. Expected %v, got %v", count, txValue))
|
||||
}
|
||||
*countPtr += 1
|
||||
return tmsp.OK
|
||||
return abci.OK
|
||||
}
|
||||
|
||||
func (app *CounterApplication) Commit() tmsp.Result {
|
||||
func (app *CounterApplication) Commit() abci.Result {
|
||||
app.mempoolTxCount = app.txCount
|
||||
if app.txCount == 0 {
|
||||
return tmsp.OK
|
||||
return abci.OK
|
||||
} else {
|
||||
hash := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
|
||||
return tmsp.NewResultOK(hash, "")
|
||||
return abci.NewResultOK(hash, "")
|
||||
}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) Query(query []byte) tmsp.Result {
|
||||
return tmsp.NewResultOK(nil, Fmt("Query is not supported"))
|
||||
func (app *CounterApplication) Query(query []byte) abci.Result {
|
||||
return abci.NewResultOK(nil, Fmt("Query is not supported"))
|
||||
}
|
||||
|
@@ -9,39 +9,37 @@ import (
|
||||
"time"
|
||||
|
||||
. "github.com/tendermint/go-common"
|
||||
"github.com/tendermint/go-events"
|
||||
"github.com/tendermint/go-p2p"
|
||||
"github.com/tendermint/go-wire"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
StateChannel = byte(0x20)
|
||||
DataChannel = byte(0x21)
|
||||
VoteChannel = byte(0x22)
|
||||
StateChannel = byte(0x20)
|
||||
DataChannel = byte(0x21)
|
||||
VoteChannel = byte(0x22)
|
||||
VoteSetBitsChannel = byte(0x23)
|
||||
|
||||
peerGossipSleepDuration = 100 * time.Millisecond // Time to sleep if there's nothing to send.
|
||||
maxConsensusMessageSize = 1048576 // 1MB; NOTE: keep in sync with types.PartSet sizes.
|
||||
peerGossipSleepDuration = 100 * time.Millisecond // Time to sleep if there's nothing to send.
|
||||
peerQueryMaj23SleepDuration = 2 * time.Second // Time to sleep after each VoteSetMaj23Message sent
|
||||
maxConsensusMessageSize = 1048576 // 1MB; NOTE: keep in sync with types.PartSet sizes.
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
type ConsensusReactor struct {
|
||||
p2p.BaseReactor // QuitService + p2p.Switch
|
||||
p2p.BaseReactor // BaseService + p2p.Switch
|
||||
|
||||
blockStore *bc.BlockStore
|
||||
conS *ConsensusState
|
||||
fastSync bool
|
||||
evsw *events.EventSwitch
|
||||
conS *ConsensusState
|
||||
fastSync bool
|
||||
evsw types.EventSwitch
|
||||
}
|
||||
|
||||
func NewConsensusReactor(consensusState *ConsensusState, blockStore *bc.BlockStore, fastSync bool) *ConsensusReactor {
|
||||
func NewConsensusReactor(consensusState *ConsensusState, fastSync bool) *ConsensusReactor {
|
||||
conR := &ConsensusReactor{
|
||||
blockStore: blockStore,
|
||||
conS: consensusState,
|
||||
fastSync: fastSync,
|
||||
conS: consensusState,
|
||||
fastSync: fastSync,
|
||||
}
|
||||
conR.BaseReactor = *p2p.NewBaseReactor(log, "ConsensusReactor", conR)
|
||||
return conR
|
||||
@@ -102,6 +100,12 @@ func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
SendQueueCapacity: 100,
|
||||
RecvBufferCapacity: 100 * 100,
|
||||
},
|
||||
&p2p.ChannelDescriptor{
|
||||
ID: VoteSetBitsChannel,
|
||||
Priority: 1,
|
||||
SendQueueCapacity: 2,
|
||||
RecvBufferCapacity: 1024,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,9 +119,10 @@ func (conR *ConsensusReactor) AddPeer(peer *p2p.Peer) {
|
||||
peerState := NewPeerState(peer)
|
||||
peer.Data.Set(types.PeerStateKey, peerState)
|
||||
|
||||
// Begin gossip routines for this peer.
|
||||
// Begin routines for this peer.
|
||||
go conR.gossipDataRoutine(peer, peerState)
|
||||
go conR.gossipVotesRoutine(peer, peerState)
|
||||
go conR.queryMaj23Routine(peer, peerState)
|
||||
|
||||
// Send our state to peer.
|
||||
// If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
|
||||
@@ -153,7 +158,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
|
||||
// TODO punish peer?
|
||||
return
|
||||
}
|
||||
log.Info("Receive", "src", src, "chId", chID, "msg", msg)
|
||||
log.Debug("Receive", "src", src, "chId", chID, "msg", msg)
|
||||
|
||||
// Get peer states
|
||||
ps := src.Data.Get(types.PeerStateKey).(*PeerState)
|
||||
@@ -167,6 +172,36 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
|
||||
ps.ApplyCommitStepMessage(msg)
|
||||
case *HasVoteMessage:
|
||||
ps.ApplyHasVoteMessage(msg)
|
||||
case *VoteSetMaj23Message:
|
||||
cs := conR.conS
|
||||
cs.mtx.Lock()
|
||||
height, votes := cs.Height, cs.Votes
|
||||
cs.mtx.Unlock()
|
||||
if height != msg.Height {
|
||||
return
|
||||
}
|
||||
// Peer claims to have a maj23 for some BlockID at H,R,S,
|
||||
votes.SetPeerMaj23(msg.Round, msg.Type, ps.Peer.Key, msg.BlockID)
|
||||
// Respond with a VoteSetBitsMessage showing which votes we have.
|
||||
// (and consequently shows which we don't have)
|
||||
var ourVotes *BitArray
|
||||
switch msg.Type {
|
||||
case types.VoteTypePrevote:
|
||||
ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID)
|
||||
case types.VoteTypePrecommit:
|
||||
ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID)
|
||||
default:
|
||||
log.Warn("Bad VoteSetBitsMessage field Type")
|
||||
return
|
||||
}
|
||||
src.TrySend(VoteSetBitsChannel, struct{ ConsensusMessage }{&VoteSetBitsMessage{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: msg.BlockID,
|
||||
Votes: ourVotes,
|
||||
}})
|
||||
|
||||
default:
|
||||
log.Warn(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
@@ -202,7 +237,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
|
||||
cs.mtx.Unlock()
|
||||
ps.EnsureVoteBitArrays(height, valSize)
|
||||
ps.EnsureVoteBitArrays(height-1, lastCommitSize)
|
||||
ps.SetHasVote(msg.Vote, msg.ValidatorIndex)
|
||||
ps.SetHasVote(msg.Vote)
|
||||
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, src.Key}
|
||||
|
||||
@@ -210,6 +245,39 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
|
||||
// don't punish (leave room for soft upgrades)
|
||||
log.Warn(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
|
||||
case VoteSetBitsChannel:
|
||||
if conR.fastSync {
|
||||
log.Warn("Ignoring message received during fastSync", "msg", msg)
|
||||
return
|
||||
}
|
||||
switch msg := msg.(type) {
|
||||
case *VoteSetBitsMessage:
|
||||
cs := conR.conS
|
||||
cs.mtx.Lock()
|
||||
height, votes := cs.Height, cs.Votes
|
||||
cs.mtx.Unlock()
|
||||
|
||||
if height == msg.Height {
|
||||
var ourVotes *BitArray
|
||||
switch msg.Type {
|
||||
case types.VoteTypePrevote:
|
||||
ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID)
|
||||
case types.VoteTypePrecommit:
|
||||
ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID)
|
||||
default:
|
||||
log.Warn("Bad VoteSetBitsMessage field Type")
|
||||
return
|
||||
}
|
||||
ps.ApplyVoteSetBitsMessage(msg, ourVotes)
|
||||
} else {
|
||||
ps.ApplyVoteSetBitsMessage(msg, nil)
|
||||
}
|
||||
default:
|
||||
// don't punish (leave room for soft upgrades)
|
||||
log.Warn(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
|
||||
default:
|
||||
log.Warn(Fmt("Unknown chId %X", chID))
|
||||
}
|
||||
@@ -219,13 +287,8 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
|
||||
}
|
||||
}
|
||||
|
||||
// Sets our private validator account for signing votes.
|
||||
func (conR *ConsensusReactor) SetPrivValidator(priv *types.PrivValidator) {
|
||||
conR.conS.SetPrivValidator(priv)
|
||||
}
|
||||
|
||||
// implements events.Eventable
|
||||
func (conR *ConsensusReactor) SetEventSwitch(evsw *events.EventSwitch) {
|
||||
func (conR *ConsensusReactor) SetEventSwitch(evsw types.EventSwitch) {
|
||||
conR.evsw = evsw
|
||||
conR.conS.SetEventSwitch(evsw)
|
||||
}
|
||||
@@ -236,14 +299,14 @@ func (conR *ConsensusReactor) SetEventSwitch(evsw *events.EventSwitch) {
|
||||
// broadcasting the result to peers
|
||||
func (conR *ConsensusReactor) registerEventCallbacks() {
|
||||
|
||||
conR.evsw.AddListenerForEvent("conR", types.EventStringNewRoundStep(), func(data events.EventData) {
|
||||
types.AddListenerForEvent(conR.evsw, "conR", types.EventStringNewRoundStep(), func(data types.TMEventData) {
|
||||
rs := data.(types.EventDataRoundState).RoundState.(*RoundState)
|
||||
conR.broadcastNewRoundStep(rs)
|
||||
})
|
||||
|
||||
conR.evsw.AddListenerForEvent("conR", types.EventStringVote(), func(data events.EventData) {
|
||||
types.AddListenerForEvent(conR.evsw, "conR", types.EventStringVote(), func(data types.TMEventData) {
|
||||
edv := data.(types.EventDataVote)
|
||||
conR.broadcastHasVoteMessage(edv.Vote, edv.Index)
|
||||
conR.broadcastHasVoteMessage(edv.Vote)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -259,12 +322,12 @@ func (conR *ConsensusReactor) broadcastNewRoundStep(rs *RoundState) {
|
||||
}
|
||||
|
||||
// Broadcasts HasVoteMessage to peers that care.
|
||||
func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote, index int) {
|
||||
func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote) {
|
||||
msg := &HasVoteMessage{
|
||||
Height: vote.Height,
|
||||
Round: vote.Round,
|
||||
Type: vote.Type,
|
||||
Index: index,
|
||||
Index: vote.ValidatorIndex,
|
||||
}
|
||||
conR.Switch.Broadcast(StateChannel, struct{ ConsensusMessage }{msg})
|
||||
/*
|
||||
@@ -347,15 +410,19 @@ OUTER_LOOP:
|
||||
//log.Info("Data catchup", "height", rs.Height, "peerHeight", prs.Height, "peerProposalBlockParts", prs.ProposalBlockParts)
|
||||
if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok {
|
||||
// Ensure that the peer's PartSetHeader is correct
|
||||
blockMeta := conR.blockStore.LoadBlockMeta(prs.Height)
|
||||
if !blockMeta.PartsHeader.Equals(prs.ProposalBlockPartsHeader) {
|
||||
blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
|
||||
if blockMeta == nil {
|
||||
log.Warn("Failed to load block meta", "peer height", prs.Height, "our height", rs.Height, "blockstore height", conR.conS.blockStore.Height(), "pv", conR.conS.privValidator)
|
||||
time.Sleep(peerGossipSleepDuration)
|
||||
continue OUTER_LOOP
|
||||
} else if !blockMeta.PartsHeader.Equals(prs.ProposalBlockPartsHeader) {
|
||||
log.Info("Peer ProposalBlockPartsHeader mismatch, sleeping",
|
||||
"peerHeight", prs.Height, "blockPartsHeader", blockMeta.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader)
|
||||
time.Sleep(peerGossipSleepDuration)
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
// Load the part
|
||||
part := conR.blockStore.LoadBlockPart(prs.Height, index)
|
||||
part := conR.conS.blockStore.LoadBlockPart(prs.Height, index)
|
||||
if part == nil {
|
||||
log.Warn("Could not load part", "index", index,
|
||||
"peerHeight", prs.Height, "blockPartsHeader", blockMeta.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader)
|
||||
@@ -392,13 +459,13 @@ OUTER_LOOP:
|
||||
|
||||
// Send Proposal && ProposalPOL BitArray?
|
||||
if rs.Proposal != nil && !prs.Proposal {
|
||||
// Proposal
|
||||
// Proposal: share the proposal metadata with peer.
|
||||
{
|
||||
msg := &ProposalMessage{Proposal: rs.Proposal}
|
||||
peer.Send(DataChannel, struct{ ConsensusMessage }{msg})
|
||||
ps.SetHasProposal(rs.Proposal)
|
||||
}
|
||||
// ProposalPOL.
|
||||
// ProposalPOL: lets peer know which POL votes we have so far.
|
||||
// Peer must receive ProposalMessage first.
|
||||
// rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round,
|
||||
// so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound).
|
||||
@@ -450,21 +517,21 @@ OUTER_LOOP:
|
||||
// If there are lastCommits to send...
|
||||
if prs.Step == RoundStepNewHeight {
|
||||
if ps.PickSendVote(rs.LastCommit) {
|
||||
log.Info("Picked rs.LastCommit to send")
|
||||
log.Debug("Picked rs.LastCommit to send")
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
}
|
||||
// If there are prevotes to send...
|
||||
if prs.Step <= RoundStepPrevote && prs.Round != -1 && prs.Round <= rs.Round {
|
||||
if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) {
|
||||
log.Info("Picked rs.Prevotes(prs.Round) to send")
|
||||
log.Debug("Picked rs.Prevotes(prs.Round) to send")
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
}
|
||||
// If there are precommits to send...
|
||||
if prs.Step <= RoundStepPrecommit && prs.Round != -1 && prs.Round <= rs.Round {
|
||||
if ps.PickSendVote(rs.Votes.Precommits(prs.Round)) {
|
||||
log.Info("Picked rs.Precommits(prs.Round) to send")
|
||||
log.Debug("Picked rs.Precommits(prs.Round) to send")
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
}
|
||||
@@ -472,7 +539,7 @@ OUTER_LOOP:
|
||||
if prs.ProposalPOLRound != -1 {
|
||||
if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil {
|
||||
if ps.PickSendVote(polPrevotes) {
|
||||
log.Info("Picked rs.Prevotes(prs.ProposalPOLRound) to send")
|
||||
log.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send")
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
}
|
||||
@@ -483,7 +550,7 @@ OUTER_LOOP:
|
||||
// If peer is lagging by height 1, send LastCommit.
|
||||
if prs.Height != 0 && rs.Height == prs.Height+1 {
|
||||
if ps.PickSendVote(rs.LastCommit) {
|
||||
log.Info("Picked rs.LastCommit to send")
|
||||
log.Debug("Picked rs.LastCommit to send")
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
}
|
||||
@@ -493,10 +560,10 @@ OUTER_LOOP:
|
||||
if prs.Height != 0 && rs.Height >= prs.Height+2 {
|
||||
// Load the block commit for prs.Height,
|
||||
// which contains precommit signatures for prs.Height.
|
||||
commit := conR.blockStore.LoadBlockCommit(prs.Height)
|
||||
commit := conR.conS.blockStore.LoadBlockCommit(prs.Height)
|
||||
log.Info("Loaded BlockCommit for catch-up", "height", prs.Height, "commit", commit)
|
||||
if ps.PickSendVote(commit) {
|
||||
log.Info("Picked Catchup commit to send")
|
||||
log.Debug("Picked Catchup commit to send")
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
}
|
||||
@@ -504,7 +571,7 @@ OUTER_LOOP:
|
||||
if sleeping == 0 {
|
||||
// We sent nothing. Sleep...
|
||||
sleeping = 1
|
||||
log.Info("No votes to send, sleeping", "peer", peer,
|
||||
log.Debug("No votes to send, sleeping", "peer", peer,
|
||||
"localPV", rs.Votes.Prevotes(rs.Round).BitArray(), "peerPV", prs.Prevotes,
|
||||
"localPC", rs.Votes.Precommits(rs.Round).BitArray(), "peerPC", prs.Precommits)
|
||||
} else if sleeping == 2 {
|
||||
@@ -517,6 +584,110 @@ OUTER_LOOP:
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: `queryMaj23Routine` has a simple crude design since it only comes
|
||||
// into play for liveness when there's a signature DDoS attack happening.
|
||||
func (conR *ConsensusReactor) queryMaj23Routine(peer *p2p.Peer, ps *PeerState) {
|
||||
log := log.New("peer", peer)
|
||||
|
||||
OUTER_LOOP:
|
||||
for {
|
||||
// Manage disconnects from self or peer.
|
||||
if !peer.IsRunning() || !conR.IsRunning() {
|
||||
log.Notice(Fmt("Stopping queryMaj23Routine for %v.", peer))
|
||||
return
|
||||
}
|
||||
|
||||
// Maybe send Height/Round/Prevotes
|
||||
{
|
||||
rs := conR.conS.GetRoundState()
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height {
|
||||
if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok {
|
||||
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: types.VoteTypePrevote,
|
||||
BlockID: maj23,
|
||||
}})
|
||||
time.Sleep(peerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Maybe send Height/Round/Precommits
|
||||
{
|
||||
rs := conR.conS.GetRoundState()
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height {
|
||||
if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok {
|
||||
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: types.VoteTypePrecommit,
|
||||
BlockID: maj23,
|
||||
}})
|
||||
time.Sleep(peerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Maybe send Height/Round/ProposalPOL
|
||||
{
|
||||
rs := conR.conS.GetRoundState()
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 {
|
||||
if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok {
|
||||
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: prs.ProposalPOLRound,
|
||||
Type: types.VoteTypePrevote,
|
||||
BlockID: maj23,
|
||||
}})
|
||||
time.Sleep(peerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Little point sending LastCommitRound/LastCommit,
|
||||
// These are fleeting and non-blocking.
|
||||
|
||||
// Maybe send Height/CatchupCommitRound/CatchupCommit.
|
||||
{
|
||||
prs := ps.GetRoundState()
|
||||
if prs.CatchupCommitRound != -1 && 0 < prs.Height && prs.Height <= conR.conS.blockStore.Height() {
|
||||
commit := conR.conS.LoadCommit(prs.Height)
|
||||
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: commit.Round(),
|
||||
Type: types.VoteTypePrecommit,
|
||||
BlockID: commit.BlockID,
|
||||
}})
|
||||
time.Sleep(peerQueryMaj23SleepDuration)
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(peerQueryMaj23SleepDuration)
|
||||
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
func (conR *ConsensusReactor) String() string {
|
||||
// better not to access shared variables
|
||||
return "ConsensusReactor" // conR.StringIndented("")
|
||||
}
|
||||
|
||||
func (conR *ConsensusReactor) StringIndented(indent string) string {
|
||||
s := "ConsensusReactor{\n"
|
||||
s += indent + " " + conR.conS.StringIndented(indent+" ") + "\n"
|
||||
for _, peer := range conR.Switch.Peers().List() {
|
||||
ps := peer.Data.Get(types.PeerStateKey).(*PeerState)
|
||||
s += indent + " " + ps.StringIndented(indent+" ") + "\n"
|
||||
}
|
||||
s += indent + "}"
|
||||
return s
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
// Read only when returned by PeerState.GetRoundState().
|
||||
@@ -538,6 +709,30 @@ type PeerRoundState struct {
|
||||
CatchupCommit *BitArray // All commit precommits peer has for this height & CatchupCommitRound
|
||||
}
|
||||
|
||||
func (prs PeerRoundState) String() string {
|
||||
return prs.StringIndented("")
|
||||
}
|
||||
|
||||
func (prs PeerRoundState) StringIndented(indent string) string {
|
||||
return fmt.Sprintf(`PeerRoundState{
|
||||
%s %v/%v/%v @%v
|
||||
%s Proposal %v -> %v
|
||||
%s POL %v (round %v)
|
||||
%s Prevotes %v
|
||||
%s Precommits %v
|
||||
%s LastCommit %v (round %v)
|
||||
%s Catchup %v (round %v)
|
||||
%s}`,
|
||||
indent, prs.Height, prs.Round, prs.Step, prs.StartTime,
|
||||
indent, prs.ProposalBlockPartsHeader, prs.ProposalBlockParts,
|
||||
indent, prs.ProposalPOL, prs.ProposalPOLRound,
|
||||
indent, prs.Prevotes,
|
||||
indent, prs.Precommits,
|
||||
indent, prs.LastCommit, prs.LastCommitRound,
|
||||
indent, prs.CatchupCommit, prs.CatchupCommitRound,
|
||||
indent)
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
var (
|
||||
@@ -614,8 +809,8 @@ func (ps *PeerState) SetHasProposalBlockPart(height int, round int, index int) {
|
||||
// Convenience function to send vote to peer.
|
||||
// Returns true if vote was sent.
|
||||
func (ps *PeerState) PickSendVote(votes types.VoteSetReader) (ok bool) {
|
||||
if index, vote, ok := ps.PickVoteToSend(votes); ok {
|
||||
msg := &VoteMessage{index, vote}
|
||||
if vote, ok := ps.PickVoteToSend(votes); ok {
|
||||
msg := &VoteMessage{vote}
|
||||
ps.Peer.Send(VoteChannel, struct{ ConsensusMessage }{msg})
|
||||
return true
|
||||
}
|
||||
@@ -623,12 +818,12 @@ func (ps *PeerState) PickSendVote(votes types.VoteSetReader) (ok bool) {
|
||||
}
|
||||
|
||||
// votes: Must be the correct Size() for the Height().
|
||||
func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (index int, vote *types.Vote, ok bool) {
|
||||
func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote, ok bool) {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
if votes.Size() == 0 {
|
||||
return 0, nil, false
|
||||
return nil, false
|
||||
}
|
||||
|
||||
height, round, type_, size := votes.Height(), votes.Round(), votes.Type(), votes.Size()
|
||||
@@ -641,16 +836,20 @@ func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (index int, vote
|
||||
|
||||
psVotes := ps.getVoteBitArray(height, round, type_)
|
||||
if psVotes == nil {
|
||||
return 0, nil, false // Not something worth sending
|
||||
return nil, false // Not something worth sending
|
||||
}
|
||||
if index, ok := votes.BitArray().Sub(psVotes).PickRandom(); ok {
|
||||
ps.setHasVote(height, round, type_, index)
|
||||
return index, votes.GetByIndex(index), true
|
||||
return votes.GetByIndex(index), true
|
||||
}
|
||||
return 0, nil, false
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (ps *PeerState) getVoteBitArray(height, round int, type_ byte) *BitArray {
|
||||
if !types.IsVoteTypeValid(type_) {
|
||||
PanicSanity("Invalid vote type")
|
||||
}
|
||||
|
||||
if ps.Height == height {
|
||||
if ps.Round == round {
|
||||
switch type_ {
|
||||
@@ -658,8 +857,6 @@ func (ps *PeerState) getVoteBitArray(height, round int, type_ byte) *BitArray {
|
||||
return ps.Prevotes
|
||||
case types.VoteTypePrecommit:
|
||||
return ps.Precommits
|
||||
default:
|
||||
PanicSanity(Fmt("Unexpected vote type %X", type_))
|
||||
}
|
||||
}
|
||||
if ps.CatchupCommitRound == round {
|
||||
@@ -668,8 +865,14 @@ func (ps *PeerState) getVoteBitArray(height, round int, type_ byte) *BitArray {
|
||||
return nil
|
||||
case types.VoteTypePrecommit:
|
||||
return ps.CatchupCommit
|
||||
default:
|
||||
PanicSanity(Fmt("Unexpected vote type %X", type_))
|
||||
}
|
||||
}
|
||||
if ps.ProposalPOLRound == round {
|
||||
switch type_ {
|
||||
case types.VoteTypePrevote:
|
||||
return ps.ProposalPOL
|
||||
case types.VoteTypePrecommit:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -681,8 +884,6 @@ func (ps *PeerState) getVoteBitArray(height, round int, type_ byte) *BitArray {
|
||||
return nil
|
||||
case types.VoteTypePrecommit:
|
||||
return ps.LastCommit
|
||||
default:
|
||||
PanicSanity(Fmt("Unexpected vote type %X", type_))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -742,56 +943,19 @@ func (ps *PeerState) ensureVoteBitArrays(height int, numValidators int) {
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *PeerState) SetHasVote(vote *types.Vote, index int) {
|
||||
func (ps *PeerState) SetHasVote(vote *types.Vote) {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
ps.setHasVote(vote.Height, vote.Round, vote.Type, index)
|
||||
ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex)
|
||||
}
|
||||
|
||||
func (ps *PeerState) setHasVote(height int, round int, type_ byte, index int) {
|
||||
log := log.New("peer", ps.Peer, "peerRound", ps.Round, "height", height, "round", round)
|
||||
if type_ != types.VoteTypePrevote && type_ != types.VoteTypePrecommit {
|
||||
PanicSanity("Invalid vote type")
|
||||
}
|
||||
log.Debug("setHasVote(LastCommit)", "lastCommit", ps.LastCommit, "index", index)
|
||||
|
||||
if ps.Height == height {
|
||||
if ps.Round == round {
|
||||
switch type_ {
|
||||
case types.VoteTypePrevote:
|
||||
ps.Prevotes.SetIndex(index, true)
|
||||
log.Info("SetHasVote(round-match)", "prevotes", ps.Prevotes, "index", index)
|
||||
case types.VoteTypePrecommit:
|
||||
ps.Precommits.SetIndex(index, true)
|
||||
log.Info("SetHasVote(round-match)", "precommits", ps.Precommits, "index", index)
|
||||
}
|
||||
} else if ps.CatchupCommitRound == round {
|
||||
switch type_ {
|
||||
case types.VoteTypePrevote:
|
||||
case types.VoteTypePrecommit:
|
||||
ps.CatchupCommit.SetIndex(index, true)
|
||||
log.Info("SetHasVote(CatchupCommit)", "precommits", ps.Precommits, "index", index)
|
||||
}
|
||||
} else if ps.ProposalPOLRound == round {
|
||||
switch type_ {
|
||||
case types.VoteTypePrevote:
|
||||
ps.ProposalPOL.SetIndex(index, true)
|
||||
log.Info("SetHasVote(ProposalPOL)", "prevotes", ps.Prevotes, "index", index)
|
||||
case types.VoteTypePrecommit:
|
||||
}
|
||||
}
|
||||
} else if ps.Height == height+1 {
|
||||
if ps.LastCommitRound == round {
|
||||
switch type_ {
|
||||
case types.VoteTypePrevote:
|
||||
case types.VoteTypePrecommit:
|
||||
ps.LastCommit.SetIndex(index, true)
|
||||
log.Info("setHasVote(LastCommit)", "lastCommit", ps.LastCommit, "index", index)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Does not apply.
|
||||
}
|
||||
// NOTE: some may be nil BitArrays -> no side effects.
|
||||
ps.getVoteBitArray(height, round, type_).SetIndex(index, true)
|
||||
}
|
||||
|
||||
func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) {
|
||||
@@ -859,17 +1023,6 @@ func (ps *PeerState) ApplyCommitStepMessage(msg *CommitStepMessage) {
|
||||
ps.ProposalBlockParts = msg.BlockParts
|
||||
}
|
||||
|
||||
func (ps *PeerState) ApplyHasVoteMessage(msg *HasVoteMessage) {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
if ps.Height != msg.Height {
|
||||
return
|
||||
}
|
||||
|
||||
ps.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index)
|
||||
}
|
||||
|
||||
func (ps *PeerState) ApplyProposalPOLMessage(msg *ProposalPOLMessage) {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
@@ -886,6 +1039,52 @@ func (ps *PeerState) ApplyProposalPOLMessage(msg *ProposalPOLMessage) {
|
||||
ps.ProposalPOL = msg.ProposalPOL
|
||||
}
|
||||
|
||||
func (ps *PeerState) ApplyHasVoteMessage(msg *HasVoteMessage) {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
if ps.Height != msg.Height {
|
||||
return
|
||||
}
|
||||
|
||||
ps.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index)
|
||||
}
|
||||
|
||||
// The peer has responded with a bitarray of votes that it has
|
||||
// of the corresponding BlockID.
|
||||
// ourVotes: BitArray of votes we have for msg.BlockID
|
||||
// NOTE: if ourVotes is nil (e.g. msg.Height < rs.Height),
|
||||
// we conservatively overwrite ps's votes w/ msg.Votes.
|
||||
func (ps *PeerState) ApplyVoteSetBitsMessage(msg *VoteSetBitsMessage, ourVotes *BitArray) {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
votes := ps.getVoteBitArray(msg.Height, msg.Round, msg.Type)
|
||||
if votes != nil {
|
||||
if ourVotes == nil {
|
||||
votes.Update(msg.Votes)
|
||||
} else {
|
||||
otherVotes := votes.Sub(ourVotes)
|
||||
hasVotes := otherVotes.Or(msg.Votes)
|
||||
votes.Update(hasVotes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *PeerState) String() string {
|
||||
return ps.StringIndented("")
|
||||
}
|
||||
|
||||
func (ps *PeerState) StringIndented(indent string) string {
|
||||
return fmt.Sprintf(`PeerState{
|
||||
%s Key %v
|
||||
%s PRS %v
|
||||
%s}`,
|
||||
indent, ps.Peer.Key,
|
||||
indent, ps.PeerRoundState.StringIndented(indent+" "),
|
||||
indent)
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Messages
|
||||
|
||||
@@ -897,6 +1096,8 @@ const (
|
||||
msgTypeBlockPart = byte(0x13) // both block & POL
|
||||
msgTypeVote = byte(0x14)
|
||||
msgTypeHasVote = byte(0x15)
|
||||
msgTypeVoteSetMaj23 = byte(0x16)
|
||||
msgTypeVoteSetBits = byte(0x17)
|
||||
)
|
||||
|
||||
type ConsensusMessage interface{}
|
||||
@@ -910,6 +1111,8 @@ var _ = wire.RegisterInterface(
|
||||
wire.ConcreteType{&BlockPartMessage{}, msgTypeBlockPart},
|
||||
wire.ConcreteType{&VoteMessage{}, msgTypeVote},
|
||||
wire.ConcreteType{&HasVoteMessage{}, msgTypeHasVote},
|
||||
wire.ConcreteType{&VoteSetMaj23Message{}, msgTypeVoteSetMaj23},
|
||||
wire.ConcreteType{&VoteSetBitsMessage{}, msgTypeVoteSetBits},
|
||||
)
|
||||
|
||||
// TODO: check for unnecessary extra bytes at the end.
|
||||
@@ -986,12 +1189,11 @@ func (m *BlockPartMessage) String() string {
|
||||
//-------------------------------------
|
||||
|
||||
type VoteMessage struct {
|
||||
ValidatorIndex int
|
||||
Vote *types.Vote
|
||||
Vote *types.Vote
|
||||
}
|
||||
|
||||
func (m *VoteMessage) String() string {
|
||||
return fmt.Sprintf("[Vote VI:%v V:%v VI:%v]", m.ValidatorIndex, m.Vote, m.ValidatorIndex)
|
||||
return fmt.Sprintf("[Vote %v]", m.Vote)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
@@ -1006,3 +1208,30 @@ type HasVoteMessage struct {
|
||||
func (m *HasVoteMessage) String() string {
|
||||
return fmt.Sprintf("[HasVote VI:%v V:{%v/%02d/%v} VI:%v]", m.Index, m.Height, m.Round, m.Type, m.Index)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type VoteSetMaj23Message struct {
|
||||
Height int
|
||||
Round int
|
||||
Type byte
|
||||
BlockID types.BlockID
|
||||
}
|
||||
|
||||
func (m *VoteSetMaj23Message) String() string {
|
||||
return fmt.Sprintf("[VSM23 %v/%02d/%v %v]", m.Height, m.Round, m.Type, m.BlockID)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type VoteSetBitsMessage struct {
|
||||
Height int
|
||||
Round int
|
||||
Type byte
|
||||
BlockID types.BlockID
|
||||
Votes *BitArray
|
||||
}
|
||||
|
||||
func (m *VoteSetBitsMessage) String() string {
|
||||
return fmt.Sprintf("[VSB %v/%02d/%v %v %v]", m.Height, m.Round, m.Type, m.BlockID, m.Votes)
|
||||
}
|
||||
|
309
consensus/reactor_test.go
Normal file
309
consensus/reactor_test.go
Normal file
@@ -0,0 +1,309 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/config/tendermint_test"
|
||||
|
||||
"github.com/tendermint/go-events"
|
||||
"github.com/tendermint/go-p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/abci/example/dummy"
|
||||
)
|
||||
|
||||
func init() {
|
||||
config = tendermint_test.ResetConfig("consensus_reactor_test")
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// in-process testnets
|
||||
|
||||
func startConsensusNet(t *testing.T, css []*ConsensusState, N int, subscribeEventRespond bool) ([]*ConsensusReactor, []chan interface{}) {
|
||||
reactors := make([]*ConsensusReactor, N)
|
||||
eventChans := make([]chan interface{}, N)
|
||||
for i := 0; i < N; i++ {
|
||||
reactors[i] = NewConsensusReactor(css[i], true) // so we dont start the consensus states
|
||||
|
||||
eventSwitch := events.NewEventSwitch()
|
||||
_, err := eventSwitch.Start()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start switch: %v", err)
|
||||
}
|
||||
|
||||
reactors[i].SetEventSwitch(eventSwitch)
|
||||
if subscribeEventRespond {
|
||||
eventChans[i] = subscribeToEventRespond(eventSwitch, "tester", types.EventStringNewBlock())
|
||||
} else {
|
||||
eventChans[i] = subscribeToEvent(eventSwitch, "tester", types.EventStringNewBlock(), 1)
|
||||
}
|
||||
}
|
||||
// make connected switches and start all reactors
|
||||
p2p.MakeConnectedSwitches(N, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("CONSENSUS", reactors[i])
|
||||
return s
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
// now that everyone is connected, start the state machines
|
||||
// If we started the state machines before everyone was connected,
|
||||
// we'd block when the cs fires NewBlockEvent and the peers are trying to start their reactors
|
||||
for i := 0; i < N; i++ {
|
||||
s := reactors[i].conS.GetState()
|
||||
reactors[i].SwitchToConsensus(s)
|
||||
}
|
||||
return reactors, eventChans
|
||||
}
|
||||
|
||||
func stopConsensusNet(reactors []*ConsensusReactor) {
|
||||
for _, r := range reactors {
|
||||
r.Switch.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure a testnet makes blocks
|
||||
func TestReactor(t *testing.T) {
|
||||
N := 4
|
||||
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
reactors, eventChans := startConsensusNet(t, css, N, false)
|
||||
defer stopConsensusNet(reactors)
|
||||
// wait till everyone makes the first new block
|
||||
timeoutWaitGroup(t, N, func(wg *sync.WaitGroup, j int) {
|
||||
<-eventChans[j]
|
||||
wg.Done()
|
||||
}, css)
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------
|
||||
// ensure we can make blocks despite cycling a validator set
|
||||
|
||||
func TestVotingPowerChange(t *testing.T) {
|
||||
nVals := 4
|
||||
css := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentDummy)
|
||||
reactors, eventChans := startConsensusNet(t, css, nVals, true)
|
||||
defer stopConsensusNet(reactors)
|
||||
|
||||
// map of active validators
|
||||
activeVals := make(map[string]struct{})
|
||||
for i := 0; i < nVals; i++ {
|
||||
activeVals[string(css[i].privValidator.GetAddress())] = struct{}{}
|
||||
}
|
||||
|
||||
// wait till everyone makes block 1
|
||||
timeoutWaitGroup(t, nVals, func(wg *sync.WaitGroup, j int) {
|
||||
<-eventChans[j]
|
||||
eventChans[j] <- struct{}{}
|
||||
wg.Done()
|
||||
}, css)
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
log.Info("---------------------------- Testing changing the voting power of one validator a few times")
|
||||
|
||||
val1PubKey := css[0].privValidator.(*types.PrivValidator).PubKey
|
||||
updateValidatorTx := dummy.MakeValSetChangeTx(val1PubKey.Bytes(), 25)
|
||||
previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
|
||||
|
||||
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
|
||||
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
|
||||
}
|
||||
|
||||
updateValidatorTx = dummy.MakeValSetChangeTx(val1PubKey.Bytes(), 2)
|
||||
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
|
||||
|
||||
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
|
||||
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
|
||||
}
|
||||
|
||||
updateValidatorTx = dummy.MakeValSetChangeTx(val1PubKey.Bytes(), 100)
|
||||
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
|
||||
|
||||
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
|
||||
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatorSetChanges(t *testing.T) {
|
||||
nPeers := 7
|
||||
nVals := 4
|
||||
css := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentDummy)
|
||||
reactors, eventChans := startConsensusNet(t, css, nPeers, true)
|
||||
defer stopConsensusNet(reactors)
|
||||
|
||||
// map of active validators
|
||||
activeVals := make(map[string]struct{})
|
||||
for i := 0; i < nVals; i++ {
|
||||
activeVals[string(css[i].privValidator.GetAddress())] = struct{}{}
|
||||
}
|
||||
|
||||
// wait till everyone makes block 1
|
||||
timeoutWaitGroup(t, nPeers, func(wg *sync.WaitGroup, j int) {
|
||||
<-eventChans[j]
|
||||
eventChans[j] <- struct{}{}
|
||||
wg.Done()
|
||||
}, css)
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
log.Info("---------------------------- Testing adding one validator")
|
||||
|
||||
newValidatorPubKey1 := css[nVals].privValidator.(*types.PrivValidator).PubKey
|
||||
newValidatorTx1 := dummy.MakeValSetChangeTx(newValidatorPubKey1.Bytes(), uint64(testMinPower))
|
||||
|
||||
// wait till everyone makes block 2
|
||||
// ensure the commit includes all validators
|
||||
// send newValTx to change vals in block 3
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx1)
|
||||
|
||||
// wait till everyone makes block 3.
|
||||
// it includes the commit for block 2, which is by the original validator set
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
|
||||
|
||||
// wait till everyone makes block 4.
|
||||
// it includes the commit for block 3, which is by the original validator set
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
|
||||
|
||||
// the commits for block 4 should be with the updated validator set
|
||||
activeVals[string(newValidatorPubKey1.Address())] = struct{}{}
|
||||
|
||||
// wait till everyone makes block 5
|
||||
// it includes the commit for block 4, which should have the updated validator set
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
log.Info("---------------------------- Testing changing the voting power of one validator")
|
||||
|
||||
updateValidatorPubKey1 := css[nVals].privValidator.(*types.PrivValidator).PubKey
|
||||
updateValidatorTx1 := dummy.MakeValSetChangeTx(updateValidatorPubKey1.Bytes(), 25)
|
||||
previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, updateValidatorTx1)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
|
||||
|
||||
if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
|
||||
t.Errorf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[nVals].GetRoundState().LastValidators.TotalVotingPower())
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
log.Info("---------------------------- Testing adding two validators at once")
|
||||
|
||||
newValidatorPubKey2 := css[nVals+1].privValidator.(*types.PrivValidator).PubKey
|
||||
newValidatorTx2 := dummy.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), uint64(testMinPower))
|
||||
|
||||
newValidatorPubKey3 := css[nVals+2].privValidator.(*types.PrivValidator).PubKey
|
||||
newValidatorTx3 := dummy.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), uint64(testMinPower))
|
||||
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
|
||||
activeVals[string(newValidatorPubKey2.Address())] = struct{}{}
|
||||
activeVals[string(newValidatorPubKey3.Address())] = struct{}{}
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
log.Info("---------------------------- Testing removing two validators at once")
|
||||
|
||||
removeValidatorTx2 := dummy.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), 0)
|
||||
removeValidatorTx3 := dummy.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), 0)
|
||||
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, removeValidatorTx2, removeValidatorTx3)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
|
||||
delete(activeVals, string(newValidatorPubKey2.Address()))
|
||||
delete(activeVals, string(newValidatorPubKey3.Address()))
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
|
||||
}
|
||||
|
||||
// Check we can make blocks with skip_timeout_commit=false
|
||||
func TestReactorWithTimeoutCommit(t *testing.T) {
|
||||
N := 4
|
||||
css := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter)
|
||||
// override default SkipTimeoutCommit == true for tests
|
||||
for i := 0; i < N; i++ {
|
||||
css[i].timeoutParams.SkipTimeoutCommit = false
|
||||
}
|
||||
|
||||
reactors, eventChans := startConsensusNet(t, css, N-1, false)
|
||||
defer stopConsensusNet(reactors)
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
timeoutWaitGroup(t, N-1, func(wg *sync.WaitGroup, j int) {
|
||||
<-eventChans[j]
|
||||
wg.Done()
|
||||
}, css)
|
||||
}
|
||||
|
||||
func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState, txs ...[]byte) {
|
||||
timeoutWaitGroup(t, n, func(wg *sync.WaitGroup, j int) {
|
||||
newBlockI := <-eventChans[j]
|
||||
newBlock := newBlockI.(types.EventDataNewBlock).Block
|
||||
log.Warn("Got block", "height", newBlock.Height, "validator", j)
|
||||
err := validateBlock(newBlock, activeVals)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, tx := range txs {
|
||||
css[j].mempool.CheckTx(tx, nil)
|
||||
}
|
||||
|
||||
eventChans[j] <- struct{}{}
|
||||
wg.Done()
|
||||
log.Warn("Done wait group", "height", newBlock.Height, "validator", j)
|
||||
}, css)
|
||||
}
|
||||
|
||||
// expects high synchrony!
|
||||
func validateBlock(block *types.Block, activeVals map[string]struct{}) error {
|
||||
if block.LastCommit.Size() != len(activeVals) {
|
||||
return fmt.Errorf("Commit size doesn't match number of active validators. Got %d, expected %d", block.LastCommit.Size(), len(activeVals))
|
||||
}
|
||||
|
||||
for _, vote := range block.LastCommit.Precommits {
|
||||
if _, ok := activeVals[string(vote.ValidatorAddress)]; !ok {
|
||||
return fmt.Errorf("Found vote for unactive validator %X", vote.ValidatorAddress)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func timeoutWaitGroup(t *testing.T, n int, f func(*sync.WaitGroup, int), css []*ConsensusState) {
|
||||
wg := new(sync.WaitGroup)
|
||||
wg.Add(n)
|
||||
for i := 0; i < n; i++ {
|
||||
go f(wg, i)
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(time.Second * 10):
|
||||
for i, cs := range css {
|
||||
fmt.Println("#################")
|
||||
fmt.Println("Validator", i)
|
||||
fmt.Println(cs.GetRoundState())
|
||||
fmt.Println("")
|
||||
}
|
||||
panic("Timed out waiting for all validators to commit a block")
|
||||
}
|
||||
}
|
@@ -11,6 +11,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
auto "github.com/tendermint/go-autofile"
|
||||
. "github.com/tendermint/go-common"
|
||||
"github.com/tendermint/go-wire"
|
||||
|
||||
@@ -18,12 +19,17 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// unmarshal and apply a single message to the consensus state
|
||||
// Unmarshal and apply a single message to the consensus state
|
||||
// as if it were received in receiveRoutine
|
||||
// Lines that start with "#" are ignored.
|
||||
// NOTE: receiveRoutine should not be running
|
||||
func (cs *ConsensusState) readReplayMessage(msgBytes []byte, newStepCh chan interface{}) error {
|
||||
// Skip over empty and meta lines
|
||||
if len(msgBytes) == 0 || msgBytes[0] == '#' {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
var msg ConsensusLogMessage
|
||||
var msg TimedWALMessage
|
||||
wire.ReadJSON(&msg, msgBytes, &err)
|
||||
if err != nil {
|
||||
fmt.Println("MsgBytes:", msgBytes, string(msgBytes))
|
||||
@@ -62,7 +68,7 @@ func (cs *ConsensusState) readReplayMessage(msgBytes []byte, newStepCh chan inte
|
||||
case *VoteMessage:
|
||||
v := msg.Vote
|
||||
log.Notice("Replay: Vote", "height", v.Height, "round", v.Round, "type", v.Type,
|
||||
"hash", v.BlockHash, "header", v.BlockPartsHeader, "peer", peerKey)
|
||||
"blockID", v.BlockID, "peer", peerKey)
|
||||
}
|
||||
|
||||
cs.handleMsg(m, cs.RoundState)
|
||||
@@ -70,77 +76,60 @@ func (cs *ConsensusState) readReplayMessage(msgBytes []byte, newStepCh chan inte
|
||||
log.Notice("Replay: Timeout", "height", m.Height, "round", m.Round, "step", m.Step, "dur", m.Duration)
|
||||
cs.handleTimeout(m, cs.RoundState)
|
||||
default:
|
||||
return fmt.Errorf("Replay: Unknown ConsensusLogMessage type: %v", reflect.TypeOf(msg.Msg))
|
||||
return fmt.Errorf("Replay: Unknown TimedWALMessage type: %v", reflect.TypeOf(msg.Msg))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// replay only those messages since the last block.
|
||||
// timeoutRoutine should run concurrently to read off tickChan
|
||||
func (cs *ConsensusState) catchupReplay(height int) error {
|
||||
if !cs.wal.Exists() {
|
||||
return nil
|
||||
}
|
||||
func (cs *ConsensusState) catchupReplay(csHeight int) error {
|
||||
|
||||
// set replayMode
|
||||
cs.replayMode = true
|
||||
defer func() { cs.replayMode = false }()
|
||||
|
||||
// starting from end of file,
|
||||
// read messages until a new height is found
|
||||
nLines, err := cs.wal.SeekFromEnd(func(lineBytes []byte) bool {
|
||||
var err error
|
||||
var msg ConsensusLogMessage
|
||||
wire.ReadJSON(&msg, lineBytes, &err)
|
||||
if err != nil {
|
||||
panic(Fmt("Failed to read cs_msg_log json: %v", err))
|
||||
}
|
||||
m, ok := msg.Msg.(types.EventDataRoundState)
|
||||
if ok && m.Step == RoundStepNewHeight.String() {
|
||||
// TODO: ensure the height matches
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
// Ensure that height+1 doesn't exist
|
||||
gr, found, err := cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(csHeight+1))
|
||||
if found {
|
||||
return errors.New(Fmt("WAL should not contain height %d.", csHeight+1))
|
||||
}
|
||||
if gr != nil {
|
||||
gr.Close()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Search for height marker
|
||||
gr, found, err = cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(csHeight))
|
||||
if err == io.EOF {
|
||||
log.Warn("Replay: wal.group.Search returned EOF", "height", csHeight)
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var beginning bool // if we had to go back to the beginning
|
||||
if c, _ := cs.wal.fp.Seek(0, 1); c == 0 {
|
||||
beginning = true
|
||||
if !found {
|
||||
return errors.New(Fmt("WAL does not contain height %d.", csHeight))
|
||||
}
|
||||
defer gr.Close()
|
||||
|
||||
log.Notice("Catchup by replaying consensus messages", "n", nLines)
|
||||
log.Notice("Catchup by replaying consensus messages", "height", csHeight)
|
||||
|
||||
// now we can replay the latest nLines on consensus state
|
||||
// note we can't use scan because we've already been reading from the file
|
||||
reader := bufio.NewReader(cs.wal.fp)
|
||||
for i := 0; i < nLines; i++ {
|
||||
msgBytes, err := reader.ReadBytes('\n')
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else if len(msgBytes) == 0 {
|
||||
continue
|
||||
} else if len(msgBytes) == 1 && msgBytes[0] == '\n' {
|
||||
continue
|
||||
for {
|
||||
line, err := gr.ReadLine()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// the first msg is the NewHeight event (if we're not at the beginning), so we can ignore it
|
||||
if !beginning && i == 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
// NOTE: since the priv key is set when the msgs are received
|
||||
// it will attempt to eg double sign but we can just ignore it
|
||||
// since the votes will be replayed and we'll get to the next step
|
||||
if err := cs.readReplayMessage(msgBytes, nil); err != nil {
|
||||
if err := cs.readReplayMessage([]byte(line), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
log.Notice("Done catchup replay")
|
||||
log.Notice("Replay: Done")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -227,6 +216,7 @@ func newPlayback(fileName string, fp *os.File, cs *ConsensusState, genState *sm.
|
||||
func (pb *playback) replayReset(count int, newStepCh chan interface{}) error {
|
||||
|
||||
pb.cs.Stop()
|
||||
pb.cs.Wait()
|
||||
|
||||
newCS := NewConsensusState(pb.cs.config, pb.genesisState.Copy(), pb.cs.proxyAppConn, pb.cs.blockStore, pb.cs.mempool)
|
||||
newCS.SetEventSwitch(pb.cs.evsw)
|
||||
@@ -256,16 +246,18 @@ func (cs *ConsensusState) startForReplay() {
|
||||
// don't want to start full cs
|
||||
cs.BaseService.OnStart()
|
||||
|
||||
log.Warn("Replay commands are disabled until someone updates them and writes tests")
|
||||
/* TODO:!
|
||||
// since we replay tocks we just ignore ticks
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-cs.tickChan:
|
||||
case <-cs.Quit:
|
||||
return
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-cs.tickChan:
|
||||
case <-cs.Quit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}()*/
|
||||
}
|
||||
|
||||
// console function for parsing input and running commands
|
||||
@@ -358,3 +350,28 @@ func (pb *playback) replayConsoleLoop() int {
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
|
||||
// Parses marker lines of the form:
|
||||
// #HEIGHT: 12345
|
||||
func makeHeightSearchFunc(height int) auto.SearchFunc {
|
||||
return func(line string) (int, error) {
|
||||
line = strings.TrimRight(line, "\n")
|
||||
parts := strings.Split(line, " ")
|
||||
if len(parts) != 2 {
|
||||
return -1, errors.New("Line did not have 2 parts")
|
||||
}
|
||||
i, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return -1, errors.New("Failed to parse INFO: " + err.Error())
|
||||
}
|
||||
if height < i {
|
||||
return 1, nil
|
||||
} else if height == i {
|
||||
return 0, nil
|
||||
} else {
|
||||
return -1, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -4,137 +4,173 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/config/tendermint_test"
|
||||
|
||||
. "github.com/tendermint/go-common"
|
||||
"github.com/tendermint/go-events"
|
||||
"github.com/tendermint/go-wire"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
/*
|
||||
The easiest way to generate this data is to copy ~/.tendermint_test/somedir/* to ~/.tendermint
|
||||
and to run a local node.
|
||||
Be sure to set the db to "leveldb" to create a cswal file in ~/.tendermint/data/cswal.
|
||||
|
||||
If you need to change the signatures, you can use a script as follows:
|
||||
The privBytes comes from config/tendermint_test/...
|
||||
|
||||
```
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/go-crypto"
|
||||
)
|
||||
|
||||
func main() {
|
||||
signBytes, err := hex.DecodeString("7B22636861696E5F6964223A2274656E6465726D696E745F74657374222C22766F7465223A7B22626C6F636B5F68617368223A2242453544373939433846353044354645383533364334333932464443384537423342313830373638222C22626C6F636B5F70617274735F686561646572223A506172745365747B543A31204236323237323535464632307D2C22686569676874223A312C22726F756E64223A302C2274797065223A327D7D")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
privBytes, err := hex.DecodeString("27F82582AEFAE7AB151CFB01C48BB6C1A0DA78F9BDDA979A9F70A84D074EB07D3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
privKey := crypto.PrivKeyEd25519{}
|
||||
copy(privKey[:], privBytes)
|
||||
signature := privKey.Sign(signBytes)
|
||||
signatureEd25519 := signature.(crypto.SignatureEd25519)
|
||||
fmt.Printf("Signature Bytes: %X\n", signatureEd25519[:])
|
||||
}
|
||||
```
|
||||
*/
|
||||
|
||||
var testLog = `{"time":"2016-04-03T11:23:54.387Z","msg":[3,{"duration":972835254,"height":1,"round":0,"step":1}]}
|
||||
{"time":"2016-04-03T11:23:54.388Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]}
|
||||
{"time":"2016-04-03T11:23:54.388Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":1,"hash":"3BA1E90CB868DA6B4FD7F3589826EC461E9EB4EF"},"pol_round":-1,"signature":"3A2ECD5023B21EC144EC16CFF1B992A4321317B83EEDD8969FDFEA6EB7BF4389F38DDA3E7BB109D63A07491C16277A197B241CF1F05F5E485C59882ECACD9E07"}}],"peer_key":""}]}
|
||||
{"time":"2016-04-03T11:23:54.389Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":0,"bytes":"0101010F74656E6465726D696E745F7465737401011441D59F4B718AC00000000000000114C4B01D3810579550997AC5641E759E20D99B51C10001000100","proof":{"aunts":[]}}}],"peer_key":""}]}
|
||||
{"time":"2016-04-03T11:23:54.390Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrevote"}]}
|
||||
{"time":"2016-04-03T11:23:54.390Z","msg":[2,{"msg":[20,{"ValidatorIndex":0,"Vote":{"height":1,"round":0,"type":1,"block_hash":"4291966B8A9DFBA00AEC7C700F2718E61DF4331D","block_parts_header":{"total":1,"hash":"3BA1E90CB868DA6B4FD7F3589826EC461E9EB4EF"},"signature":"47D2A75A4E2F15DB1F0D1B656AC0637AF9AADDFEB6A156874F6553C73895E5D5DC948DBAEF15E61276C5342D0E638DFCB77C971CD282096EA8735A564A90F008"}}],"peer_key":""}]}
|
||||
{"time":"2016-04-03T11:23:54.392Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrecommit"}]}
|
||||
{"time":"2016-04-03T11:23:54.392Z","msg":[2,{"msg":[20,{"ValidatorIndex":0,"Vote":{"height":1,"round":0,"type":2,"block_hash":"4291966B8A9DFBA00AEC7C700F2718E61DF4331D","block_parts_header":{"total":1,"hash":"3BA1E90CB868DA6B4FD7F3589826EC461E9EB4EF"},"signature":"39147DA595F08B73CF8C899967C8403B5872FD9042FFA4E239159E0B6C5D9665C9CA81D766EACA2AE658872F94C2FCD1E34BF51859CD5B274DA8512BACE4B50D"}}],"peer_key":""}]}
|
||||
`
|
||||
|
||||
// map lines in the above wal to privVal step
|
||||
var mapPrivValStep = map[int]int8{
|
||||
0: 0,
|
||||
1: 0,
|
||||
2: 1,
|
||||
3: 1,
|
||||
4: 1,
|
||||
5: 2,
|
||||
6: 2,
|
||||
7: 3,
|
||||
func init() {
|
||||
config = tendermint_test.ResetConfig("consensus_replay_test")
|
||||
}
|
||||
|
||||
func writeWAL(log string) string {
|
||||
fmt.Println("writing", log)
|
||||
// write the needed wal to file
|
||||
f, err := ioutil.TempFile(os.TempDir(), "replay_test_")
|
||||
// TODO: these tests ensure we can always recover from any state of the wal,
|
||||
// assuming it comes with a correct related state for the priv_validator.json.
|
||||
// It would be better to verify explicitly which states we can recover from without the wal
|
||||
// and which ones we need the wal for - then we'd also be able to only flush the
|
||||
// wal writer when we need to, instead of with every message.
|
||||
|
||||
var data_dir = path.Join(GoPath, "src/github.com/tendermint/tendermint/consensus", "test_data")
|
||||
|
||||
// the priv validator changes step at these lines for a block with 1 val and 1 part
|
||||
var baseStepChanges = []int{3, 6, 8}
|
||||
|
||||
// test recovery from each line in each testCase
|
||||
var testCases = []*testCase{
|
||||
newTestCase("empty_block", baseStepChanges), // empty block (has 1 block part)
|
||||
newTestCase("small_block1", baseStepChanges), // small block with txs in 1 block part
|
||||
newTestCase("small_block2", []int{3, 10, 12}), // small block with txs across 5 smaller block parts
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
name string
|
||||
log string //full cs wal
|
||||
stepMap map[int]int8 // map lines of log to privval step
|
||||
|
||||
proposeLine int
|
||||
prevoteLine int
|
||||
precommitLine int
|
||||
}
|
||||
|
||||
func newTestCase(name string, stepChanges []int) *testCase {
|
||||
if len(stepChanges) != 3 {
|
||||
panic(Fmt("a full wal has 3 step changes! Got array %v", stepChanges))
|
||||
}
|
||||
return &testCase{
|
||||
name: name,
|
||||
log: readWAL(path.Join(data_dir, name+".cswal")),
|
||||
stepMap: newMapFromChanges(stepChanges),
|
||||
|
||||
proposeLine: stepChanges[0],
|
||||
prevoteLine: stepChanges[1],
|
||||
precommitLine: stepChanges[2],
|
||||
}
|
||||
}
|
||||
|
||||
func newMapFromChanges(changes []int) map[int]int8 {
|
||||
changes = append(changes, changes[2]+1) // so we add the last step change to the map
|
||||
m := make(map[int]int8)
|
||||
var count int
|
||||
for changeNum, nextChange := range changes {
|
||||
for ; count < nextChange; count++ {
|
||||
m[count] = int8(changeNum)
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func readWAL(p string) string {
|
||||
b, err := ioutil.ReadFile(p)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
_, err = f.WriteString(log)
|
||||
func writeWAL(walMsgs string) string {
|
||||
tempDir := os.TempDir()
|
||||
walDir := tempDir + "/wal" + RandStr(12)
|
||||
// Create WAL directory
|
||||
err := EnsureDir(walDir, 0700)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
name := f.Name()
|
||||
f.Close()
|
||||
return name
|
||||
// Write the needed WAL to file
|
||||
err = WriteFile(walDir+"/wal", []byte(walMsgs), 0600)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return walDir
|
||||
}
|
||||
|
||||
func waitForBlock(newBlockCh chan interface{}) {
|
||||
func waitForBlock(newBlockCh chan interface{}, thisCase *testCase, i int) {
|
||||
after := time.After(time.Second * 10)
|
||||
select {
|
||||
case <-newBlockCh:
|
||||
case <-after:
|
||||
panic("Timed out waiting for new block")
|
||||
panic(Fmt("Timed out waiting for new block for case '%s' line %d", thisCase.name, i))
|
||||
}
|
||||
}
|
||||
|
||||
func runReplayTest(t *testing.T, cs *ConsensusState, fileName string, newBlockCh chan interface{}) {
|
||||
cs.config.Set("cswal", fileName)
|
||||
func runReplayTest(t *testing.T, cs *ConsensusState, walDir string, newBlockCh chan interface{},
|
||||
thisCase *testCase, i int) {
|
||||
|
||||
cs.config.Set("cs_wal_dir", walDir)
|
||||
cs.Start()
|
||||
// Wait to make a new block.
|
||||
// This is just a signal that we haven't halted; its not something contained in the WAL itself.
|
||||
// Assuming the consensus state is running, replay of any WAL, including the empty one,
|
||||
// should eventually be followed by a new block, or else something is wrong
|
||||
waitForBlock(newBlockCh)
|
||||
waitForBlock(newBlockCh, thisCase, i)
|
||||
cs.evsw.Stop()
|
||||
cs.Stop()
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-newBlockCh:
|
||||
default:
|
||||
break LOOP
|
||||
}
|
||||
}
|
||||
cs.Wait()
|
||||
}
|
||||
|
||||
func setupReplayTest(nLines int, crashAfter bool) (*ConsensusState, chan interface{}, string, string) {
|
||||
func toPV(pv PrivValidator) *types.PrivValidator {
|
||||
return pv.(*types.PrivValidator)
|
||||
}
|
||||
|
||||
func setupReplayTest(thisCase *testCase, nLines int, crashAfter bool) (*ConsensusState, chan interface{}, string, string) {
|
||||
fmt.Println("-------------------------------------")
|
||||
log.Notice(Fmt("Starting replay test of %d lines of WAL (crash before write)", nLines))
|
||||
log.Notice(Fmt("Starting replay test %v (of %d lines of WAL). Crash after = %v", thisCase.name, nLines, crashAfter))
|
||||
|
||||
lineStep := nLines
|
||||
if crashAfter {
|
||||
lineStep -= 1
|
||||
}
|
||||
|
||||
split := strings.Split(testLog, "\n")
|
||||
split := strings.Split(thisCase.log, "\n")
|
||||
lastMsg := split[nLines]
|
||||
|
||||
// we write those lines up to (not including) one with the signature
|
||||
fileName := writeWAL(strings.Join(split[:nLines], "\n") + "\n")
|
||||
walDir := writeWAL(strings.Join(split[:nLines], "\n") + "\n")
|
||||
|
||||
cs := fixedConsensusState()
|
||||
cs := fixedConsensusStateDummy()
|
||||
|
||||
// set the last step according to when we crashed vs the wal
|
||||
cs.privValidator.LastHeight = 1 // first block
|
||||
cs.privValidator.LastStep = mapPrivValStep[lineStep]
|
||||
toPV(cs.privValidator).LastHeight = 1 // first block
|
||||
toPV(cs.privValidator).LastStep = thisCase.stepMap[lineStep]
|
||||
|
||||
fmt.Println("LAST STEP", cs.privValidator.LastStep)
|
||||
log.Warn("setupReplayTest", "LastStep", toPV(cs.privValidator).LastStep)
|
||||
|
||||
newBlockCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewBlock(), 1)
|
||||
|
||||
return cs, newBlockCh, lastMsg, fileName
|
||||
return cs, newBlockCh, lastMsg, walDir
|
||||
}
|
||||
|
||||
func readTimedWALMessage(t *testing.T, walMsg string) TimedWALMessage {
|
||||
var err error
|
||||
var msg TimedWALMessage
|
||||
wire.ReadJSON(&msg, []byte(walMsg), &err)
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading json data: %v", err)
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
//-----------------------------------------------
|
||||
@@ -142,10 +178,12 @@ func setupReplayTest(nLines int, crashAfter bool) (*ConsensusState, chan interfa
|
||||
// as if the log was written after signing, before the crash
|
||||
|
||||
func TestReplayCrashAfterWrite(t *testing.T) {
|
||||
split := strings.Split(testLog, "\n")
|
||||
for i := 0; i < len(split)-1; i++ {
|
||||
cs, newBlockCh, _, f := setupReplayTest(i+1, true)
|
||||
runReplayTest(t, cs, f, newBlockCh)
|
||||
for _, thisCase := range testCases {
|
||||
split := strings.Split(thisCase.log, "\n")
|
||||
for i := 0; i < len(split)-1; i++ {
|
||||
cs, newBlockCh, _, walDir := setupReplayTest(thisCase, i+1, true)
|
||||
runReplayTest(t, cs, walDir, newBlockCh, thisCase, i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -154,50 +192,40 @@ func TestReplayCrashAfterWrite(t *testing.T) {
|
||||
// This relies on privValidator.LastSignature being set
|
||||
|
||||
func TestReplayCrashBeforeWritePropose(t *testing.T) {
|
||||
cs, newBlockCh, proposalMsg, f := setupReplayTest(2, false) // propose
|
||||
// Set LastSig
|
||||
var err error
|
||||
var msg ConsensusLogMessage
|
||||
wire.ReadJSON(&msg, []byte(proposalMsg), &err)
|
||||
proposal := msg.Msg.(msgInfo).Msg.(*ProposalMessage)
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading json data: %v", err)
|
||||
for _, thisCase := range testCases {
|
||||
lineNum := thisCase.proposeLine
|
||||
// setup replay test where last message is a proposal
|
||||
cs, newBlockCh, proposalMsg, walDir := setupReplayTest(thisCase, lineNum, false)
|
||||
msg := readTimedWALMessage(t, proposalMsg)
|
||||
proposal := msg.Msg.(msgInfo).Msg.(*ProposalMessage)
|
||||
// Set LastSig
|
||||
toPV(cs.privValidator).LastSignBytes = types.SignBytes(cs.state.ChainID, proposal.Proposal)
|
||||
toPV(cs.privValidator).LastSignature = proposal.Proposal.Signature
|
||||
runReplayTest(t, cs, walDir, newBlockCh, thisCase, lineNum)
|
||||
}
|
||||
cs.privValidator.LastSignBytes = types.SignBytes(cs.state.ChainID, proposal.Proposal)
|
||||
cs.privValidator.LastSignature = proposal.Proposal.Signature
|
||||
runReplayTest(t, cs, f, newBlockCh)
|
||||
}
|
||||
|
||||
func TestReplayCrashBeforeWritePrevote(t *testing.T) {
|
||||
cs, newBlockCh, voteMsg, f := setupReplayTest(5, false) // prevote
|
||||
cs.evsw.AddListenerForEvent("tester", types.EventStringCompleteProposal(), func(data events.EventData) {
|
||||
// Set LastSig
|
||||
var err error
|
||||
var msg ConsensusLogMessage
|
||||
wire.ReadJSON(&msg, []byte(voteMsg), &err)
|
||||
vote := msg.Msg.(msgInfo).Msg.(*VoteMessage)
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading json data: %v", err)
|
||||
}
|
||||
cs.privValidator.LastSignBytes = types.SignBytes(cs.state.ChainID, vote.Vote)
|
||||
cs.privValidator.LastSignature = vote.Vote.Signature
|
||||
})
|
||||
runReplayTest(t, cs, f, newBlockCh)
|
||||
for _, thisCase := range testCases {
|
||||
testReplayCrashBeforeWriteVote(t, thisCase, thisCase.prevoteLine, types.EventStringCompleteProposal())
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplayCrashBeforeWritePrecommit(t *testing.T) {
|
||||
cs, newBlockCh, voteMsg, f := setupReplayTest(7, false) // precommit
|
||||
cs.evsw.AddListenerForEvent("tester", types.EventStringPolka(), func(data events.EventData) {
|
||||
// Set LastSig
|
||||
var err error
|
||||
var msg ConsensusLogMessage
|
||||
wire.ReadJSON(&msg, []byte(voteMsg), &err)
|
||||
vote := msg.Msg.(msgInfo).Msg.(*VoteMessage)
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading json data: %v", err)
|
||||
}
|
||||
cs.privValidator.LastSignBytes = types.SignBytes(cs.state.ChainID, vote.Vote)
|
||||
cs.privValidator.LastSignature = vote.Vote.Signature
|
||||
})
|
||||
runReplayTest(t, cs, f, newBlockCh)
|
||||
for _, thisCase := range testCases {
|
||||
testReplayCrashBeforeWriteVote(t, thisCase, thisCase.precommitLine, types.EventStringPolka())
|
||||
}
|
||||
}
|
||||
|
||||
func testReplayCrashBeforeWriteVote(t *testing.T, thisCase *testCase, lineNum int, eventString string) {
|
||||
// setup replay test where last message is a vote
|
||||
cs, newBlockCh, voteMsg, walDir := setupReplayTest(thisCase, lineNum, false)
|
||||
types.AddListenerForEvent(cs.evsw, "tester", eventString, func(data types.TMEventData) {
|
||||
msg := readTimedWALMessage(t, voteMsg)
|
||||
vote := msg.Msg.(msgInfo).Msg.(*VoteMessage)
|
||||
// Set LastSig
|
||||
toPV(cs.privValidator).LastSignBytes = types.SignBytes(cs.state.ChainID, vote.Vote)
|
||||
toPV(cs.privValidator).LastSignature = vote.Vote.Signature
|
||||
})
|
||||
runReplayTest(t, cs, walDir, newBlockCh, thisCase, lineNum)
|
||||
}
|
||||
|
@@ -4,13 +4,15 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ebuchman/fail-test"
|
||||
|
||||
. "github.com/tendermint/go-common"
|
||||
cfg "github.com/tendermint/go-config"
|
||||
"github.com/tendermint/go-events"
|
||||
"github.com/tendermint/go-wire"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
@@ -22,15 +24,17 @@ import (
|
||||
//-----------------------------------------------------------------------------
|
||||
// Timeout Parameters
|
||||
|
||||
// All in milliseconds
|
||||
// TimeoutParams holds timeouts and deltas for each round step.
|
||||
// All timeouts and deltas in milliseconds.
|
||||
type TimeoutParams struct {
|
||||
Propose0 int
|
||||
ProposeDelta int
|
||||
Prevote0 int
|
||||
PrevoteDelta int
|
||||
Precommit0 int
|
||||
PrecommitDelta int
|
||||
Commit0 int
|
||||
Propose0 int
|
||||
ProposeDelta int
|
||||
Prevote0 int
|
||||
PrevoteDelta int
|
||||
Precommit0 int
|
||||
PrecommitDelta int
|
||||
Commit0 int
|
||||
SkipTimeoutCommit bool
|
||||
}
|
||||
|
||||
// Wait this long for a proposal
|
||||
@@ -53,16 +57,17 @@ func (tp *TimeoutParams) Commit(t time.Time) time.Time {
|
||||
return t.Add(time.Duration(tp.Commit0) * time.Millisecond)
|
||||
}
|
||||
|
||||
// Initialize parameters from config
|
||||
// InitTimeoutParamsFromConfig initializes parameters from config
|
||||
func InitTimeoutParamsFromConfig(config cfg.Config) *TimeoutParams {
|
||||
return &TimeoutParams{
|
||||
Propose0: config.GetInt("timeout_propose"),
|
||||
ProposeDelta: config.GetInt("timeout_propose_delta"),
|
||||
Prevote0: config.GetInt("timeout_prevote"),
|
||||
PrevoteDelta: config.GetInt("timeout_prevote_delta"),
|
||||
Precommit0: config.GetInt("timeout_precommit"),
|
||||
PrecommitDelta: config.GetInt("timeout_precommit_delta"),
|
||||
Commit0: config.GetInt("timeout_commit"),
|
||||
Propose0: config.GetInt("timeout_propose"),
|
||||
ProposeDelta: config.GetInt("timeout_propose_delta"),
|
||||
Prevote0: config.GetInt("timeout_prevote"),
|
||||
PrevoteDelta: config.GetInt("timeout_prevote_delta"),
|
||||
Precommit0: config.GetInt("timeout_precommit"),
|
||||
PrecommitDelta: config.GetInt("timeout_precommit_delta"),
|
||||
Commit0: config.GetInt("timeout_commit"),
|
||||
SkipTimeoutCommit: config.GetBool("skip_timeout_commit"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -188,8 +193,7 @@ func (rs *RoundState) StringShort() string {
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
var (
|
||||
msgQueueSize = 1000
|
||||
tickTockBufferSize = 10
|
||||
msgQueueSize = 1000
|
||||
)
|
||||
|
||||
// msgs from the reactor which may update the state
|
||||
@@ -210,33 +214,45 @@ func (ti *timeoutInfo) String() string {
|
||||
return fmt.Sprintf("%v ; %d/%d %v", ti.Duration, ti.Height, ti.Round, ti.Step)
|
||||
}
|
||||
|
||||
type PrivValidator interface {
|
||||
GetAddress() []byte
|
||||
SignVote(chainID string, vote *types.Vote) error
|
||||
SignProposal(chainID string, proposal *types.Proposal) error
|
||||
}
|
||||
|
||||
// Tracks consensus state across block heights and rounds.
|
||||
type ConsensusState struct {
|
||||
QuitService
|
||||
BaseService
|
||||
|
||||
config cfg.Config
|
||||
proxyAppConn proxy.AppConnConsensus
|
||||
blockStore *bc.BlockStore
|
||||
mempool *mempl.Mempool
|
||||
privValidator *types.PrivValidator
|
||||
config cfg.Config
|
||||
proxyAppConn proxy.AppConnConsensus
|
||||
blockStore *bc.BlockStore
|
||||
mempool *mempl.Mempool
|
||||
|
||||
privValidator PrivValidator // for signing votes
|
||||
|
||||
mtx sync.Mutex
|
||||
RoundState
|
||||
state *sm.State // State until height-1.
|
||||
|
||||
peerMsgQueue chan msgInfo // serializes msgs affecting state (proposals, block parts, votes)
|
||||
internalMsgQueue chan msgInfo // like peerMsgQueue but for our own proposals, parts, votes
|
||||
timeoutTicker *time.Ticker // ticker for timeouts
|
||||
tickChan chan timeoutInfo // start the timeoutTicker in the timeoutRoutine
|
||||
tockChan chan timeoutInfo // timeouts are relayed on tockChan to the receiveRoutine
|
||||
timeoutParams *TimeoutParams // parameters and functions for timeout intervals
|
||||
peerMsgQueue chan msgInfo // serializes msgs affecting state (proposals, block parts, votes)
|
||||
internalMsgQueue chan msgInfo // like peerMsgQueue but for our own proposals, parts, votes
|
||||
timeoutTicker TimeoutTicker // ticker for timeouts
|
||||
timeoutParams *TimeoutParams // parameters and functions for timeout intervals
|
||||
|
||||
evsw *events.EventSwitch
|
||||
evsw types.EventSwitch
|
||||
|
||||
wal *WAL
|
||||
replayMode bool // so we don't log signing errors during replay
|
||||
|
||||
nSteps int // used for testing to limit the number of transitions the state makes
|
||||
|
||||
// allow certain function to be overwritten for testing
|
||||
decideProposal func(height, round int)
|
||||
doPrevote func(height, round int)
|
||||
setProposal func(proposal *types.Proposal) error
|
||||
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func NewConsensusState(config cfg.Config, state *sm.State, proxyAppConn proxy.AppConnConsensus, blockStore *bc.BlockStore, mempool *mempl.Mempool) *ConsensusState {
|
||||
@@ -247,16 +263,20 @@ func NewConsensusState(config cfg.Config, state *sm.State, proxyAppConn proxy.Ap
|
||||
mempool: mempool,
|
||||
peerMsgQueue: make(chan msgInfo, msgQueueSize),
|
||||
internalMsgQueue: make(chan msgInfo, msgQueueSize),
|
||||
timeoutTicker: new(time.Ticker),
|
||||
tickChan: make(chan timeoutInfo, tickTockBufferSize),
|
||||
tockChan: make(chan timeoutInfo, tickTockBufferSize),
|
||||
timeoutTicker: NewTimeoutTicker(),
|
||||
timeoutParams: InitTimeoutParamsFromConfig(config),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
// set function defaults (may be overwritten before calling Start)
|
||||
cs.decideProposal = cs.defaultDecideProposal
|
||||
cs.doPrevote = cs.defaultDoPrevote
|
||||
cs.setProposal = cs.defaultSetProposal
|
||||
|
||||
cs.updateToState(state)
|
||||
// Don't call scheduleRound0 yet.
|
||||
// We do that upon Start().
|
||||
cs.reconstructLastCommit(state)
|
||||
cs.QuitService = *NewQuitService(log, "ConsensusState", cs)
|
||||
cs.BaseService = *NewBaseService(log, "ConsensusState", cs)
|
||||
return cs
|
||||
}
|
||||
|
||||
@@ -264,7 +284,7 @@ func NewConsensusState(config cfg.Config, state *sm.State, proxyAppConn proxy.Ap
|
||||
// Public interface
|
||||
|
||||
// implements events.Eventable
|
||||
func (cs *ConsensusState) SetEventSwitch(evsw *events.EventSwitch) {
|
||||
func (cs *ConsensusState) SetEventSwitch(evsw types.EventSwitch) {
|
||||
cs.evsw = evsw
|
||||
}
|
||||
|
||||
@@ -290,26 +310,73 @@ func (cs *ConsensusState) getRoundState() *RoundState {
|
||||
return &rs
|
||||
}
|
||||
|
||||
func (cs *ConsensusState) SetPrivValidator(priv *types.PrivValidator) {
|
||||
func (cs *ConsensusState) GetValidators() (int, []*types.Validator) {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
return cs.state.LastBlockHeight, cs.state.Validators.Copy().Validators
|
||||
}
|
||||
|
||||
// Sets our private validator account for signing votes.
|
||||
func (cs *ConsensusState) SetPrivValidator(priv PrivValidator) {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
cs.privValidator = priv
|
||||
}
|
||||
|
||||
func (cs *ConsensusState) OnStart() error {
|
||||
cs.QuitService.OnStart()
|
||||
// Set the local timer
|
||||
func (cs *ConsensusState) SetTimeoutTicker(timeoutTicker TimeoutTicker) {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
cs.timeoutTicker = timeoutTicker
|
||||
}
|
||||
|
||||
err := cs.OpenWAL(cs.config.GetString("cswal"))
|
||||
func (cs *ConsensusState) LoadCommit(height int) *types.Commit {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
if height == cs.blockStore.Height() {
|
||||
return cs.blockStore.LoadSeenCommit(height)
|
||||
}
|
||||
return cs.blockStore.LoadBlockCommit(height)
|
||||
}
|
||||
|
||||
func (cs *ConsensusState) OnStart() error {
|
||||
cs.BaseService.OnStart()
|
||||
|
||||
walDir := cs.config.GetString("cs_wal_dir")
|
||||
err := EnsureDir(walDir, 0700)
|
||||
if err != nil {
|
||||
log.Error("Error ensuring ConsensusState wal dir", "error", err.Error())
|
||||
return err
|
||||
}
|
||||
err = cs.OpenWAL(walDir)
|
||||
if err != nil {
|
||||
log.Error("Error loading ConsensusState wal", "error", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
// If the latest block was applied in the abci handshake,
|
||||
// we may not have written the current height to the wal,
|
||||
// so check here and write it if not found.
|
||||
// TODO: remove this and run the handhsake/replay
|
||||
// through the consensus state with a mock app
|
||||
gr, found, err := cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(cs.Height))
|
||||
if (err == io.EOF || !found) && cs.Step == RoundStepNewHeight {
|
||||
log.Warn("Height not found in wal. Writing new height", "height", cs.Height)
|
||||
rs := cs.RoundStateEvent()
|
||||
cs.wal.Save(rs)
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
if gr != nil {
|
||||
gr.Close()
|
||||
}
|
||||
|
||||
// we need the timeoutRoutine for replay so
|
||||
// we don't block on the tick chan.
|
||||
// NOTE: we will get a build up of garbage go routines
|
||||
// firing on the tockChan until the receiveRoutine is started
|
||||
// to deal with them (by that point, at most one will be valid)
|
||||
go cs.timeoutRoutine()
|
||||
cs.timeoutTicker.Start()
|
||||
|
||||
// we may have lost some votes if the process crashed
|
||||
// reload from consensus log to catchup
|
||||
@@ -331,23 +398,32 @@ func (cs *ConsensusState) OnStart() error {
|
||||
// timeoutRoutine: receive requests for timeouts on tickChan and fire timeouts on tockChan
|
||||
// receiveRoutine: serializes processing of proposoals, block parts, votes; coordinates state transitions
|
||||
func (cs *ConsensusState) startRoutines(maxSteps int) {
|
||||
go cs.timeoutRoutine()
|
||||
cs.timeoutTicker.Start()
|
||||
go cs.receiveRoutine(maxSteps)
|
||||
}
|
||||
|
||||
func (cs *ConsensusState) OnStop() {
|
||||
cs.QuitService.OnStop()
|
||||
cs.BaseService.OnStop()
|
||||
|
||||
cs.timeoutTicker.Stop()
|
||||
|
||||
// Make BaseService.Wait() wait until cs.wal.Wait()
|
||||
if cs.wal != nil && cs.IsRunning() {
|
||||
cs.wal.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: be sure to Stop() the event switch and drain
|
||||
// any event channels or this may deadlock
|
||||
func (cs *ConsensusState) Wait() {
|
||||
<-cs.done
|
||||
}
|
||||
|
||||
// Open file to log all consensus messages and timeouts for deterministic accountability
|
||||
func (cs *ConsensusState) OpenWAL(file string) (err error) {
|
||||
func (cs *ConsensusState) OpenWAL(walDir string) (err error) {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
wal, err := NewWAL(file, cs.config.GetBool("cswal_light"))
|
||||
wal, err := NewWAL(walDir, cs.config.GetBool("cs_wal_light"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -361,15 +437,15 @@ func (cs *ConsensusState) OpenWAL(file string) (err error) {
|
||||
// TODO: should these return anything or let callers just use events?
|
||||
|
||||
// May block on send if queue is full.
|
||||
func (cs *ConsensusState) AddVote(valIndex int, vote *types.Vote, peerKey string) (added bool, address []byte, err error) {
|
||||
func (cs *ConsensusState) AddVote(vote *types.Vote, peerKey string) (added bool, err error) {
|
||||
if peerKey == "" {
|
||||
cs.internalMsgQueue <- msgInfo{&VoteMessage{valIndex, vote}, ""}
|
||||
cs.internalMsgQueue <- msgInfo{&VoteMessage{vote}, ""}
|
||||
} else {
|
||||
cs.peerMsgQueue <- msgInfo{&VoteMessage{valIndex, vote}, peerKey}
|
||||
cs.peerMsgQueue <- msgInfo{&VoteMessage{vote}, peerKey}
|
||||
}
|
||||
|
||||
// TODO: wait for event?!
|
||||
return false, nil, nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// May block on send if queue is full.
|
||||
@@ -424,17 +500,12 @@ func (cs *ConsensusState) updateRoundStep(round int, step RoundStepType) {
|
||||
func (cs *ConsensusState) scheduleRound0(rs *RoundState) {
|
||||
//log.Info("scheduleRound0", "now", time.Now(), "startTime", cs.StartTime)
|
||||
sleepDuration := rs.StartTime.Sub(time.Now())
|
||||
if sleepDuration < time.Duration(0) {
|
||||
sleepDuration = time.Duration(0)
|
||||
}
|
||||
cs.scheduleTimeout(sleepDuration, rs.Height, 0, RoundStepNewHeight)
|
||||
}
|
||||
|
||||
// Attempt to schedule a timeout by sending timeoutInfo on the tickChan.
|
||||
// The timeoutRoutine is alwaya available to read from tickChan (it won't block).
|
||||
// The scheduling may fail if the timeoutRoutine has already scheduled a timeout for a later height/round/step.
|
||||
// Attempt to schedule a timeout (by sending timeoutInfo on the tickChan)
|
||||
func (cs *ConsensusState) scheduleTimeout(duration time.Duration, height, round int, step RoundStepType) {
|
||||
cs.tickChan <- timeoutInfo{duration, height, round, step}
|
||||
cs.timeoutTicker.ScheduleTimeout(timeoutInfo{duration, height, round, step})
|
||||
}
|
||||
|
||||
// send a msg into the receiveRoutine regarding our own proposal, block part, or vote
|
||||
@@ -459,11 +530,11 @@ func (cs *ConsensusState) reconstructLastCommit(state *sm.State) {
|
||||
}
|
||||
seenCommit := cs.blockStore.LoadSeenCommit(state.LastBlockHeight)
|
||||
lastPrecommits := types.NewVoteSet(cs.config.GetString("chain_id"), state.LastBlockHeight, seenCommit.Round(), types.VoteTypePrecommit, state.LastValidators)
|
||||
for idx, precommit := range seenCommit.Precommits {
|
||||
for _, precommit := range seenCommit.Precommits {
|
||||
if precommit == nil {
|
||||
continue
|
||||
}
|
||||
added, _, err := lastPrecommits.AddByIndex(idx, precommit)
|
||||
added, err := lastPrecommits.AddVote(precommit)
|
||||
if !added || err != nil {
|
||||
PanicCrisis(Fmt("Failed to reconstruct LastCommit: %v", err))
|
||||
}
|
||||
@@ -520,7 +591,6 @@ func (cs *ConsensusState) updateToState(state *sm.State) {
|
||||
} else {
|
||||
cs.StartTime = cs.timeoutParams.Commit(cs.CommitTime)
|
||||
}
|
||||
cs.CommitTime = time.Time{}
|
||||
cs.Validators = validators
|
||||
cs.Proposal = nil
|
||||
cs.ProposalBlock = nil
|
||||
@@ -545,62 +615,13 @@ func (cs *ConsensusState) newStep() {
|
||||
cs.nSteps += 1
|
||||
// newStep is called by updateToStep in NewConsensusState before the evsw is set!
|
||||
if cs.evsw != nil {
|
||||
cs.evsw.FireEvent(types.EventStringNewRoundStep(), rs)
|
||||
types.FireEventNewRoundStep(cs.evsw, rs)
|
||||
}
|
||||
}
|
||||
|
||||
//-----------------------------------------
|
||||
// the main go routines
|
||||
|
||||
// the state machine sends on tickChan to start a new timer.
|
||||
// timers are interupted and replaced by new ticks from later steps
|
||||
// timeouts of 0 on the tickChan will be immediately relayed to the tockChan
|
||||
func (cs *ConsensusState) timeoutRoutine() {
|
||||
log.Debug("Starting timeout routine")
|
||||
var ti timeoutInfo
|
||||
for {
|
||||
select {
|
||||
case newti := <-cs.tickChan:
|
||||
log.Debug("Received tick", "old_ti", ti, "new_ti", newti)
|
||||
|
||||
// ignore tickers for old height/round/step
|
||||
if newti.Height < ti.Height {
|
||||
continue
|
||||
} else if newti.Height == ti.Height {
|
||||
if newti.Round < ti.Round {
|
||||
continue
|
||||
} else if newti.Round == ti.Round {
|
||||
if ti.Step > 0 && newti.Step <= ti.Step {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ti = newti
|
||||
|
||||
// if the newti has duration == 0, we relay to the tockChan immediately (no timeout)
|
||||
if ti.Duration == time.Duration(0) {
|
||||
go func(t timeoutInfo) { cs.tockChan <- t }(ti)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debug("Scheduling timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
|
||||
cs.timeoutTicker.Stop()
|
||||
cs.timeoutTicker = time.NewTicker(ti.Duration)
|
||||
case <-cs.timeoutTicker.C:
|
||||
log.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
|
||||
cs.timeoutTicker.Stop()
|
||||
// go routine here gaurantees timeoutRoutine doesn't block.
|
||||
// Determinism comes from playback in the receiveRoutine.
|
||||
// We can eliminate it by merging the timeoutRoutine into receiveRoutine
|
||||
// and managing the timeouts ourselves with a millisecond ticker
|
||||
go func(t timeoutInfo) { cs.tockChan <- t }(ti)
|
||||
case <-cs.Quit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// a nice idea but probably more trouble than its worth
|
||||
func (cs *ConsensusState) stopTimer() {
|
||||
cs.timeoutTicker.Stop()
|
||||
@@ -632,29 +653,23 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) {
|
||||
cs.wal.Save(mi)
|
||||
// handles proposals, block parts, votes
|
||||
cs.handleMsg(mi, rs)
|
||||
case ti := <-cs.tockChan:
|
||||
case ti := <-cs.timeoutTicker.Chan(): // tockChan:
|
||||
cs.wal.Save(ti)
|
||||
// if the timeout is relevant to the rs
|
||||
// go to the next step
|
||||
cs.handleTimeout(ti, rs)
|
||||
case <-cs.Quit:
|
||||
|
||||
// drain the internalMsgQueue in case we eg. signed a proposal but it didn't hit the wal
|
||||
FLUSH:
|
||||
for {
|
||||
select {
|
||||
case mi = <-cs.internalMsgQueue:
|
||||
cs.wal.Save(mi)
|
||||
cs.handleMsg(mi, rs)
|
||||
default:
|
||||
break FLUSH
|
||||
}
|
||||
}
|
||||
// NOTE: the internalMsgQueue may have signed messages from our
|
||||
// priv_val that haven't hit the WAL, but its ok because
|
||||
// priv_val tracks LastSig
|
||||
|
||||
// close wal now that we're done writing to it
|
||||
if cs.wal != nil {
|
||||
cs.wal.Close()
|
||||
cs.wal.Stop()
|
||||
}
|
||||
|
||||
close(cs.done)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -681,7 +696,7 @@ func (cs *ConsensusState) handleMsg(mi msgInfo, rs RoundState) {
|
||||
case *VoteMessage:
|
||||
// attempt to add the vote and dupeout the validator if its a duplicate signature
|
||||
// if the vote gives us a 2/3-any or 2/3-one, we transition
|
||||
err := cs.tryAddVote(msg.ValidatorIndex, msg.Vote, peerKey)
|
||||
err := cs.tryAddVote(msg.Vote, peerKey)
|
||||
if err == ErrAddingVote {
|
||||
// TODO: punish peer
|
||||
}
|
||||
@@ -716,16 +731,16 @@ func (cs *ConsensusState) handleTimeout(ti timeoutInfo, rs RoundState) {
|
||||
switch ti.Step {
|
||||
case RoundStepNewHeight:
|
||||
// NewRound event fired from enterNewRound.
|
||||
// XXX: should we fire timeout here?
|
||||
// XXX: should we fire timeout here (for timeout commit)?
|
||||
cs.enterNewRound(ti.Height, 0)
|
||||
case RoundStepPropose:
|
||||
cs.evsw.FireEvent(types.EventStringTimeoutPropose(), cs.RoundStateEvent())
|
||||
types.FireEventTimeoutPropose(cs.evsw, cs.RoundStateEvent())
|
||||
cs.enterPrevote(ti.Height, ti.Round)
|
||||
case RoundStepPrevoteWait:
|
||||
cs.evsw.FireEvent(types.EventStringTimeoutWait(), cs.RoundStateEvent())
|
||||
types.FireEventTimeoutWait(cs.evsw, cs.RoundStateEvent())
|
||||
cs.enterPrecommit(ti.Height, ti.Round)
|
||||
case RoundStepPrecommitWait:
|
||||
cs.evsw.FireEvent(types.EventStringTimeoutWait(), cs.RoundStateEvent())
|
||||
types.FireEventTimeoutWait(cs.evsw, cs.RoundStateEvent())
|
||||
cs.enterNewRound(ti.Height, ti.Round+1)
|
||||
default:
|
||||
panic(Fmt("Invalid timeout step: %v", ti.Step))
|
||||
@@ -777,7 +792,7 @@ func (cs *ConsensusState) enterNewRound(height int, round int) {
|
||||
}
|
||||
cs.Votes.SetRound(round + 1) // also track next round (round+1) to allow round-skipping
|
||||
|
||||
cs.evsw.FireEvent(types.EventStringNewRound(), cs.RoundStateEvent())
|
||||
types.FireEventNewRound(cs.evsw, cs.RoundStateEvent())
|
||||
|
||||
// Immediately go to enterPropose.
|
||||
cs.enterPropose(height, round)
|
||||
@@ -812,16 +827,16 @@ func (cs *ConsensusState) enterPropose(height int, round int) {
|
||||
return
|
||||
}
|
||||
|
||||
if !bytes.Equal(cs.Validators.Proposer().Address, cs.privValidator.Address) {
|
||||
if !bytes.Equal(cs.Validators.Proposer().Address, cs.privValidator.GetAddress()) {
|
||||
log.Info("enterPropose: Not our turn to propose", "proposer", cs.Validators.Proposer().Address, "privValidator", cs.privValidator)
|
||||
} else {
|
||||
log.Info("enterPropose: Our turn to propose", "proposer", cs.Validators.Proposer().Address, "privValidator", cs.privValidator)
|
||||
cs.decideProposal(height, round)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (cs *ConsensusState) decideProposal(height, round int) {
|
||||
func (cs *ConsensusState) defaultDecideProposal(height, round int) {
|
||||
var block *types.Block
|
||||
var blockParts *types.PartSet
|
||||
|
||||
@@ -838,7 +853,8 @@ func (cs *ConsensusState) decideProposal(height, round int) {
|
||||
}
|
||||
|
||||
// Make proposal
|
||||
proposal := types.NewProposal(height, round, blockParts.Header(), cs.Votes.POLRound())
|
||||
polRound, polBlockID := cs.Votes.POLInfo()
|
||||
proposal := types.NewProposal(height, round, blockParts.Header(), polRound, polBlockID)
|
||||
err := cs.privValidator.SignProposal(cs.state.ChainID, proposal)
|
||||
if err == nil {
|
||||
// Set fields
|
||||
@@ -861,7 +877,6 @@ func (cs *ConsensusState) decideProposal(height, round int) {
|
||||
log.Warn("enterPropose: Error signing proposal", "height", height, "round", round, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Returns true if the proposal block is complete &&
|
||||
@@ -901,26 +916,8 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts
|
||||
// Mempool validated transactions
|
||||
txs := cs.mempool.Reap(cs.config.GetInt("block_size"))
|
||||
|
||||
block = &types.Block{
|
||||
Header: &types.Header{
|
||||
ChainID: cs.state.ChainID,
|
||||
Height: cs.Height,
|
||||
Time: time.Now(),
|
||||
NumTxs: len(txs),
|
||||
LastBlockHash: cs.state.LastBlockHash,
|
||||
LastBlockParts: cs.state.LastBlockParts,
|
||||
ValidatorsHash: cs.state.Validators.Hash(),
|
||||
AppHash: cs.state.AppHash, // state merkle root of txs from the previous block.
|
||||
},
|
||||
LastCommit: commit,
|
||||
Data: &types.Data{
|
||||
Txs: txs,
|
||||
},
|
||||
}
|
||||
block.FillHeader()
|
||||
blockParts = block.MakePartSet()
|
||||
|
||||
return block, blockParts
|
||||
return types.MakeBlock(cs.Height, cs.state.ChainID, txs, commit,
|
||||
cs.state.LastBlockID, cs.state.Validators.Hash(), cs.state.AppHash, cs.config.GetInt("block_part_size"))
|
||||
}
|
||||
|
||||
// Enter: `timeoutPropose` after entering Propose.
|
||||
@@ -942,7 +939,7 @@ func (cs *ConsensusState) enterPrevote(height int, round int) {
|
||||
|
||||
// fire event for how we got here
|
||||
if cs.isProposalComplete() {
|
||||
cs.evsw.FireEvent(types.EventStringCompleteProposal(), cs.RoundStateEvent())
|
||||
types.FireEventCompleteProposal(cs.evsw, cs.RoundStateEvent())
|
||||
} else {
|
||||
// we received +2/3 prevotes for a future round
|
||||
// TODO: catchup event?
|
||||
@@ -959,10 +956,10 @@ func (cs *ConsensusState) enterPrevote(height int, round int) {
|
||||
// (so we have more time to try and collect +2/3 prevotes for a single block)
|
||||
}
|
||||
|
||||
func (cs *ConsensusState) doPrevote(height int, round int) {
|
||||
func (cs *ConsensusState) defaultDoPrevote(height int, round int) {
|
||||
// If a block is locked, prevote that.
|
||||
if cs.LockedBlock != nil {
|
||||
log.Info("enterPrevote: Block was locked")
|
||||
log.Notice("enterPrevote: Block was locked")
|
||||
cs.signAddVote(types.VoteTypePrevote, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header())
|
||||
return
|
||||
}
|
||||
@@ -1033,29 +1030,30 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) {
|
||||
cs.newStep()
|
||||
}()
|
||||
|
||||
hash, partsHeader, ok := cs.Votes.Prevotes(round).TwoThirdsMajority()
|
||||
blockID, ok := cs.Votes.Prevotes(round).TwoThirdsMajority()
|
||||
|
||||
// If we don't have a polka, we must precommit nil
|
||||
if !ok {
|
||||
if cs.LockedBlock != nil {
|
||||
log.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit while we're locked. Precommitting nil")
|
||||
log.Notice("enterPrecommit: No +2/3 prevotes during enterPrecommit while we're locked. Precommitting nil")
|
||||
} else {
|
||||
log.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.")
|
||||
log.Notice("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.")
|
||||
}
|
||||
cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{})
|
||||
return
|
||||
}
|
||||
|
||||
// At this point +2/3 prevoted for a particular block or nil
|
||||
cs.evsw.FireEvent(types.EventStringPolka(), cs.RoundStateEvent())
|
||||
types.FireEventPolka(cs.evsw, cs.RoundStateEvent())
|
||||
|
||||
// the latest POLRound should be this round
|
||||
if cs.Votes.POLRound() < round {
|
||||
PanicSanity(Fmt("This POLRound should be %v but got %", round, cs.Votes.POLRound()))
|
||||
polRound, _ := cs.Votes.POLInfo()
|
||||
if polRound < round {
|
||||
PanicSanity(Fmt("This POLRound should be %v but got %", round, polRound))
|
||||
}
|
||||
|
||||
// +2/3 prevoted nil. Unlock and precommit nil.
|
||||
if len(hash) == 0 {
|
||||
if len(blockID.Hash) == 0 {
|
||||
if cs.LockedBlock == nil {
|
||||
log.Notice("enterPrecommit: +2/3 prevoted for nil.")
|
||||
} else {
|
||||
@@ -1063,7 +1061,7 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) {
|
||||
cs.LockedRound = 0
|
||||
cs.LockedBlock = nil
|
||||
cs.LockedBlockParts = nil
|
||||
cs.evsw.FireEvent(types.EventStringUnlock(), cs.RoundStateEvent())
|
||||
types.FireEventUnlock(cs.evsw, cs.RoundStateEvent())
|
||||
}
|
||||
cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{})
|
||||
return
|
||||
@@ -1072,17 +1070,17 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) {
|
||||
// At this point, +2/3 prevoted for a particular block.
|
||||
|
||||
// If we're already locked on that block, precommit it, and update the LockedRound
|
||||
if cs.LockedBlock.HashesTo(hash) {
|
||||
if cs.LockedBlock.HashesTo(blockID.Hash) {
|
||||
log.Notice("enterPrecommit: +2/3 prevoted locked block. Relocking")
|
||||
cs.LockedRound = round
|
||||
cs.evsw.FireEvent(types.EventStringRelock(), cs.RoundStateEvent())
|
||||
cs.signAddVote(types.VoteTypePrecommit, hash, partsHeader)
|
||||
types.FireEventRelock(cs.evsw, cs.RoundStateEvent())
|
||||
cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader)
|
||||
return
|
||||
}
|
||||
|
||||
// If +2/3 prevoted for proposal block, stage and precommit it
|
||||
if cs.ProposalBlock.HashesTo(hash) {
|
||||
log.Notice("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", hash)
|
||||
if cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
log.Notice("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash)
|
||||
// Validate the block.
|
||||
if err := cs.state.ValidateBlock(cs.ProposalBlock); err != nil {
|
||||
PanicConsensus(Fmt("enterPrecommit: +2/3 prevoted for an invalid block: %v", err))
|
||||
@@ -1090,8 +1088,8 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) {
|
||||
cs.LockedRound = round
|
||||
cs.LockedBlock = cs.ProposalBlock
|
||||
cs.LockedBlockParts = cs.ProposalBlockParts
|
||||
cs.evsw.FireEvent(types.EventStringLock(), cs.RoundStateEvent())
|
||||
cs.signAddVote(types.VoteTypePrecommit, hash, partsHeader)
|
||||
types.FireEventLock(cs.evsw, cs.RoundStateEvent())
|
||||
cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1102,11 +1100,11 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) {
|
||||
cs.LockedRound = 0
|
||||
cs.LockedBlock = nil
|
||||
cs.LockedBlockParts = nil
|
||||
if !cs.ProposalBlockParts.HasHeader(partsHeader) {
|
||||
if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) {
|
||||
cs.ProposalBlock = nil
|
||||
cs.ProposalBlockParts = types.NewPartSetFromHeader(partsHeader)
|
||||
cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader)
|
||||
}
|
||||
cs.evsw.FireEvent(types.EventStringUnlock(), cs.RoundStateEvent())
|
||||
types.FireEventUnlock(cs.evsw, cs.RoundStateEvent())
|
||||
cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{})
|
||||
return
|
||||
}
|
||||
@@ -1146,13 +1144,14 @@ func (cs *ConsensusState) enterCommit(height int, commitRound int) {
|
||||
// keep cs.Round the same, commitRound points to the right Precommits set.
|
||||
cs.updateRoundStep(cs.Round, RoundStepCommit)
|
||||
cs.CommitRound = commitRound
|
||||
cs.CommitTime = time.Now()
|
||||
cs.newStep()
|
||||
|
||||
// Maybe finalize immediately.
|
||||
cs.tryFinalizeCommit(height)
|
||||
}()
|
||||
|
||||
hash, partsHeader, ok := cs.Votes.Precommits(commitRound).TwoThirdsMajority()
|
||||
blockID, ok := cs.Votes.Precommits(commitRound).TwoThirdsMajority()
|
||||
if !ok {
|
||||
PanicSanity("RunActionCommit() expects +2/3 precommits")
|
||||
}
|
||||
@@ -1160,18 +1159,18 @@ func (cs *ConsensusState) enterCommit(height int, commitRound int) {
|
||||
// The Locked* fields no longer matter.
|
||||
// Move them over to ProposalBlock if they match the commit hash,
|
||||
// otherwise they'll be cleared in updateToState.
|
||||
if cs.LockedBlock.HashesTo(hash) {
|
||||
if cs.LockedBlock.HashesTo(blockID.Hash) {
|
||||
cs.ProposalBlock = cs.LockedBlock
|
||||
cs.ProposalBlockParts = cs.LockedBlockParts
|
||||
}
|
||||
|
||||
// If we don't have the block being committed, set up to get it.
|
||||
if !cs.ProposalBlock.HashesTo(hash) {
|
||||
if !cs.ProposalBlockParts.HasHeader(partsHeader) {
|
||||
if !cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) {
|
||||
// We're getting the wrong block.
|
||||
// Set up ProposalBlockParts and keep waiting.
|
||||
cs.ProposalBlock = nil
|
||||
cs.ProposalBlockParts = types.NewPartSetFromHeader(partsHeader)
|
||||
cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader)
|
||||
} else {
|
||||
// We just need to keep waiting.
|
||||
}
|
||||
@@ -1184,13 +1183,14 @@ func (cs *ConsensusState) tryFinalizeCommit(height int) {
|
||||
PanicSanity(Fmt("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height))
|
||||
}
|
||||
|
||||
hash, _, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
|
||||
if !ok || len(hash) == 0 {
|
||||
blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
|
||||
if !ok || len(blockID.Hash) == 0 {
|
||||
log.Warn("Attempt to finalize failed. There was no +2/3 majority, or +2/3 was for <nil>.")
|
||||
return
|
||||
}
|
||||
if !cs.ProposalBlock.HashesTo(hash) {
|
||||
if !cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
// TODO: this happens every time if we're not a validator (ugly logs)
|
||||
// TODO: ^^ wait, why does it matter that we're a validator?
|
||||
log.Warn("Attempt to finalize failed. We don't have the commit block.")
|
||||
return
|
||||
}
|
||||
@@ -1205,66 +1205,67 @@ func (cs *ConsensusState) finalizeCommit(height int) {
|
||||
return
|
||||
}
|
||||
|
||||
hash, header, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
|
||||
blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
|
||||
block, blockParts := cs.ProposalBlock, cs.ProposalBlockParts
|
||||
|
||||
if !ok {
|
||||
PanicSanity(Fmt("Cannot finalizeCommit, commit does not have two thirds majority"))
|
||||
}
|
||||
if !blockParts.HasHeader(header) {
|
||||
if !blockParts.HasHeader(blockID.PartsHeader) {
|
||||
PanicSanity(Fmt("Expected ProposalBlockParts header to be commit header"))
|
||||
}
|
||||
if !block.HashesTo(hash) {
|
||||
if !block.HashesTo(blockID.Hash) {
|
||||
PanicSanity(Fmt("Cannot finalizeCommit, ProposalBlock does not hash to commit hash"))
|
||||
}
|
||||
if err := cs.state.ValidateBlock(block); err != nil {
|
||||
PanicConsensus(Fmt("+2/3 committed an invalid block: %v", err))
|
||||
}
|
||||
|
||||
log.Notice(Fmt("Finalizing commit of block with %d txs", block.NumTxs), "height", block.Height, "hash", block.Hash())
|
||||
log.Notice(Fmt("Finalizing commit of block with %d txs", block.NumTxs),
|
||||
"height", block.Height, "hash", block.Hash(), "root", block.AppHash)
|
||||
log.Info(Fmt("%v", block))
|
||||
|
||||
// Fire off event for new block.
|
||||
// TODO: Handle app failure. See #177
|
||||
cs.evsw.FireEvent(types.EventStringNewBlock(), types.EventDataNewBlock{block})
|
||||
cs.evsw.FireEvent(types.EventStringNewBlockHeader(), types.EventDataNewBlockHeader{block.Header})
|
||||
|
||||
// Create a copy of the state for staging
|
||||
stateCopy := cs.state.Copy()
|
||||
|
||||
// event cache for txs
|
||||
eventCache := events.NewEventCache(cs.evsw)
|
||||
|
||||
// Run the block on the State:
|
||||
// + update validator sets
|
||||
// + run txs on the proxyAppConn
|
||||
err := stateCopy.ExecBlock(eventCache, cs.proxyAppConn, block, blockParts.Header())
|
||||
if err != nil {
|
||||
// TODO: handle this gracefully.
|
||||
PanicQ(Fmt("Exec failed for application: %v", err))
|
||||
}
|
||||
|
||||
// lock mempool, commit state, update mempoool
|
||||
err = cs.commitStateUpdateMempool(stateCopy, block)
|
||||
if err != nil {
|
||||
// TODO: handle this gracefully.
|
||||
PanicQ(Fmt("Commit failed for application: %v", err))
|
||||
}
|
||||
|
||||
// txs committed, bad ones removed from mepool; fire events
|
||||
// NOTE: the block.AppHash wont reflect these txs until the next block
|
||||
eventCache.Flush()
|
||||
fail.Fail() // XXX
|
||||
|
||||
// Save to blockStore.
|
||||
if cs.blockStore.Height() < block.Height {
|
||||
// NOTE: the seenCommit is local justification to commit this block,
|
||||
// but may differ from the LastCommit included in the next block
|
||||
precommits := cs.Votes.Precommits(cs.CommitRound)
|
||||
seenCommit := precommits.MakeCommit()
|
||||
cs.blockStore.SaveBlock(block, blockParts, seenCommit)
|
||||
} else {
|
||||
log.Warn("Why are we finalizeCommitting a block height we already have?", "height", block.Height)
|
||||
}
|
||||
|
||||
fail.Fail() // XXX
|
||||
|
||||
// Create a copy of the state for staging
|
||||
// and an event cache for txs
|
||||
stateCopy := cs.state.Copy()
|
||||
eventCache := types.NewEventCache(cs.evsw)
|
||||
|
||||
// Execute and commit the block, and update the mempool.
|
||||
// All calls to the proxyAppConn should come here.
|
||||
// NOTE: the block.AppHash wont reflect these txs until the next block
|
||||
err := stateCopy.ApplyBlock(eventCache, cs.proxyAppConn, block, blockParts.Header(), cs.mempool)
|
||||
if err != nil {
|
||||
// TODO!
|
||||
}
|
||||
|
||||
fail.Fail() // XXX
|
||||
|
||||
// Fire off event for new block.
|
||||
// TODO: Handle app failure. See #177
|
||||
types.FireEventNewBlock(cs.evsw, types.EventDataNewBlock{block})
|
||||
types.FireEventNewBlockHeader(cs.evsw, types.EventDataNewBlockHeader{block.Header})
|
||||
eventCache.Flush()
|
||||
|
||||
// Save the state.
|
||||
stateCopy.Save()
|
||||
|
||||
fail.Fail() // XXX
|
||||
|
||||
// NewHeightStep!
|
||||
cs.updateToState(stateCopy)
|
||||
|
||||
@@ -1279,36 +1280,11 @@ func (cs *ConsensusState) finalizeCommit(height int) {
|
||||
return
|
||||
}
|
||||
|
||||
// mempool must be locked during commit and update
|
||||
// because state is typically reset on Commit and old txs must be replayed
|
||||
// against committed state before new txs are run in the mempool, lest they be invalid
|
||||
func (cs *ConsensusState) commitStateUpdateMempool(s *sm.State, block *types.Block) error {
|
||||
cs.mempool.Lock()
|
||||
defer cs.mempool.Unlock()
|
||||
|
||||
// Commit block, get hash back
|
||||
res := cs.proxyAppConn.CommitSync()
|
||||
if res.IsErr() {
|
||||
log.Warn("Error in proxyAppConn.CommitSync", "error", res)
|
||||
return res
|
||||
}
|
||||
if res.Log != "" {
|
||||
log.Debug("Commit.Log: " + res.Log)
|
||||
}
|
||||
|
||||
// Set the state's new AppHash
|
||||
s.AppHash = res.Data
|
||||
|
||||
// Update mempool.
|
||||
cs.mempool.Update(block.Height, block.Txs)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
func (cs *ConsensusState) setProposal(proposal *types.Proposal) error {
|
||||
func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error {
|
||||
// Already have one
|
||||
// TODO: possibly catch double proposals
|
||||
if cs.Proposal != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -1376,15 +1352,15 @@ func (cs *ConsensusState) addProposalBlockPart(height int, part *types.Part, ver
|
||||
}
|
||||
|
||||
// Attempt to add the vote. if its a duplicate signature, dupeout the validator
|
||||
func (cs *ConsensusState) tryAddVote(valIndex int, vote *types.Vote, peerKey string) error {
|
||||
_, _, err := cs.addVote(valIndex, vote, peerKey)
|
||||
func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerKey string) error {
|
||||
_, err := cs.addVote(vote, peerKey)
|
||||
if err != nil {
|
||||
// If the vote height is off, we'll just ignore it,
|
||||
// But if it's a conflicting sig, broadcast evidence tx for slashing.
|
||||
// If it's otherwise invalid, punish peer.
|
||||
if err == ErrVoteHeightMismatch {
|
||||
return err
|
||||
} else if _, ok := err.(*types.ErrVoteConflictingSignature); ok {
|
||||
} else if _, ok := err.(*types.ErrVoteConflictingVotes); ok {
|
||||
if peerKey == "" {
|
||||
log.Warn("Found conflicting vote from ourselves. Did you unsafe_reset a validator?", "height", vote.Height, "round", vote.Round, "type", vote.Type)
|
||||
return err
|
||||
@@ -1410,31 +1386,39 @@ func (cs *ConsensusState) tryAddVote(valIndex int, vote *types.Vote, peerKey str
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
func (cs *ConsensusState) addVote(valIndex int, vote *types.Vote, peerKey string) (added bool, address []byte, err error) {
|
||||
func (cs *ConsensusState) addVote(vote *types.Vote, peerKey string) (added bool, err error) {
|
||||
log.Debug("addVote", "voteHeight", vote.Height, "voteType", vote.Type, "csHeight", cs.Height)
|
||||
|
||||
// A precommit for the previous height?
|
||||
// These come in while we wait timeoutCommit
|
||||
if vote.Height+1 == cs.Height {
|
||||
if !(cs.Step == RoundStepNewHeight && vote.Type == types.VoteTypePrecommit) {
|
||||
// TODO: give the reason ..
|
||||
// fmt.Errorf("tryAddVote: Wrong height, not a LastCommit straggler commit.")
|
||||
return added, nil, ErrVoteHeightMismatch
|
||||
return added, ErrVoteHeightMismatch
|
||||
}
|
||||
added, address, err = cs.LastCommit.AddByIndex(valIndex, vote)
|
||||
added, err = cs.LastCommit.AddVote(vote)
|
||||
if added {
|
||||
log.Info(Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
|
||||
cs.evsw.FireEvent(types.EventStringVote(), types.EventDataVote{valIndex, address, vote})
|
||||
types.FireEventVote(cs.evsw, types.EventDataVote{vote})
|
||||
|
||||
// if we can skip timeoutCommit and have all the votes now,
|
||||
if cs.timeoutParams.SkipTimeoutCommit && cs.LastCommit.HasAll() {
|
||||
// go straight to new round (skip timeout commit)
|
||||
// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, RoundStepNewHeight)
|
||||
cs.enterNewRound(cs.Height, 0)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// A prevote/precommit for this height?
|
||||
if vote.Height == cs.Height {
|
||||
height := cs.Height
|
||||
added, address, err = cs.Votes.AddByIndex(valIndex, vote, peerKey)
|
||||
added, err = cs.Votes.AddVote(vote, peerKey)
|
||||
if added {
|
||||
cs.evsw.FireEvent(types.EventStringVote(), types.EventDataVote{valIndex, address, vote})
|
||||
types.FireEventVote(cs.evsw, types.EventDataVote{vote})
|
||||
|
||||
switch vote.Type {
|
||||
case types.VoteTypePrevote:
|
||||
@@ -1446,13 +1430,13 @@ func (cs *ConsensusState) addVote(valIndex int, vote *types.Vote, peerKey string
|
||||
// we'll still enterNewRound(H,vote.R) and enterPrecommit(H,vote.R) to process it
|
||||
// there.
|
||||
if (cs.LockedBlock != nil) && (cs.LockedRound < vote.Round) && (vote.Round <= cs.Round) {
|
||||
hash, _, ok := prevotes.TwoThirdsMajority()
|
||||
if ok && !cs.LockedBlock.HashesTo(hash) {
|
||||
blockID, ok := prevotes.TwoThirdsMajority()
|
||||
if ok && !cs.LockedBlock.HashesTo(blockID.Hash) {
|
||||
log.Notice("Unlocking because of POL.", "lockedRound", cs.LockedRound, "POLRound", vote.Round)
|
||||
cs.LockedRound = 0
|
||||
cs.LockedBlock = nil
|
||||
cs.LockedBlockParts = nil
|
||||
cs.evsw.FireEvent(types.EventStringUnlock(), cs.RoundStateEvent())
|
||||
types.FireEventUnlock(cs.evsw, cs.RoundStateEvent())
|
||||
}
|
||||
}
|
||||
if cs.Round <= vote.Round && prevotes.HasTwoThirdsAny() {
|
||||
@@ -1473,20 +1457,27 @@ func (cs *ConsensusState) addVote(valIndex int, vote *types.Vote, peerKey string
|
||||
case types.VoteTypePrecommit:
|
||||
precommits := cs.Votes.Precommits(vote.Round)
|
||||
log.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort())
|
||||
hash, _, ok := precommits.TwoThirdsMajority()
|
||||
blockID, ok := precommits.TwoThirdsMajority()
|
||||
if ok {
|
||||
if len(hash) == 0 {
|
||||
if len(blockID.Hash) == 0 {
|
||||
cs.enterNewRound(height, vote.Round+1)
|
||||
} else {
|
||||
cs.enterNewRound(height, vote.Round)
|
||||
cs.enterPrecommit(height, vote.Round)
|
||||
cs.enterCommit(height, vote.Round)
|
||||
|
||||
if cs.timeoutParams.SkipTimeoutCommit && precommits.HasAll() {
|
||||
// if we have all the votes now,
|
||||
// go straight to new round (skip timeout commit)
|
||||
// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, RoundStepNewHeight)
|
||||
cs.enterNewRound(cs.Height, 0)
|
||||
}
|
||||
|
||||
}
|
||||
} else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() {
|
||||
cs.enterNewRound(height, vote.Round)
|
||||
cs.enterPrecommit(height, vote.Round)
|
||||
cs.enterPrecommitWait(height, vote.Round)
|
||||
//}()
|
||||
}
|
||||
default:
|
||||
PanicSanity(Fmt("Unexpected vote type %X", vote.Type)) // Should not happen.
|
||||
@@ -1499,17 +1490,20 @@ func (cs *ConsensusState) addVote(valIndex int, vote *types.Vote, peerKey string
|
||||
}
|
||||
|
||||
// Height mismatch, bad peer?
|
||||
log.Info("Vote ignored and not added", "voteHeight", vote.Height, "csHeight", cs.Height)
|
||||
log.Info("Vote ignored and not added", "voteHeight", vote.Height, "csHeight", cs.Height, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
func (cs *ConsensusState) signVote(type_ byte, hash []byte, header types.PartSetHeader) (*types.Vote, error) {
|
||||
addr := cs.privValidator.GetAddress()
|
||||
valIndex, _ := cs.Validators.GetByAddress(addr)
|
||||
vote := &types.Vote{
|
||||
ValidatorAddress: addr,
|
||||
ValidatorIndex: valIndex,
|
||||
Height: cs.Height,
|
||||
Round: cs.Round,
|
||||
Type: type_,
|
||||
BlockHash: hash,
|
||||
BlockPartsHeader: header,
|
||||
BlockID: types.BlockID{hash, header},
|
||||
}
|
||||
err := cs.privValidator.SignVote(cs.state.ChainID, vote)
|
||||
return vote, err
|
||||
@@ -1517,21 +1511,19 @@ func (cs *ConsensusState) signVote(type_ byte, hash []byte, header types.PartSet
|
||||
|
||||
// sign the vote and publish on internalMsgQueue
|
||||
func (cs *ConsensusState) signAddVote(type_ byte, hash []byte, header types.PartSetHeader) *types.Vote {
|
||||
|
||||
if cs.privValidator == nil || !cs.Validators.HasAddress(cs.privValidator.Address) {
|
||||
// if we don't have a key or we're not in the validator set, do nothing
|
||||
if cs.privValidator == nil || !cs.Validators.HasAddress(cs.privValidator.GetAddress()) {
|
||||
return nil
|
||||
}
|
||||
vote, err := cs.signVote(type_, hash, header)
|
||||
if err == nil {
|
||||
// TODO: store our index in the cs so we don't have to do this every time
|
||||
valIndex, _ := cs.Validators.GetByAddress(cs.privValidator.Address)
|
||||
cs.sendInternalMessage(msgInfo{&VoteMessage{valIndex, vote}, ""})
|
||||
cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""})
|
||||
log.Info("Signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote, "error", err)
|
||||
return vote
|
||||
} else {
|
||||
if !cs.replayMode {
|
||||
log.Warn("Error signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "error", err)
|
||||
}
|
||||
//if !cs.replayMode {
|
||||
log.Warn("Error signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "error", err)
|
||||
//}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@@ -6,9 +6,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/config/tendermint_test"
|
||||
//"github.com/tendermint/go-events"
|
||||
. "github.com/tendermint/go-common"
|
||||
"github.com/tendermint/tendermint/config/tendermint_test"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -31,7 +30,7 @@ x * TestBadProposal - 2 vals, bad proposal (bad block state hash), should prevot
|
||||
FullRoundSuite
|
||||
x * TestFullRound1 - 1 val, full successful round
|
||||
x * TestFullRoundNil - 1 val, full round of nil
|
||||
x * TestFullRound2 - 2 vals, both required for fuill round
|
||||
x * TestFullRound2 - 2 vals, both required for full round
|
||||
LockSuite
|
||||
x * TestLockNoPOL - 2 vals, 4 rounds. one val locked, precommits nil every round except first.
|
||||
x * TestLockPOLRelock - 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka
|
||||
@@ -67,15 +66,15 @@ func TestProposerSelection0(t *testing.T) {
|
||||
|
||||
// lets commit a block and ensure proposer for the next height is correct
|
||||
prop := cs1.GetRoundState().Validators.Proposer()
|
||||
if !bytes.Equal(prop.Address, cs1.privValidator.Address) {
|
||||
panic(Fmt("expected proposer to be validator %d. Got %X", 0, prop.Address))
|
||||
if !bytes.Equal(prop.Address, cs1.privValidator.GetAddress()) {
|
||||
t.Fatalf("expected proposer to be validator %d. Got %X", 0, prop.Address)
|
||||
}
|
||||
|
||||
// wait for complete proposal
|
||||
<-proposalCh
|
||||
|
||||
rs := cs1.GetRoundState()
|
||||
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), nil, vss[1:]...)
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:]...)
|
||||
|
||||
// wait for new round so next validator is set
|
||||
<-newRoundCh
|
||||
@@ -107,7 +106,7 @@ func TestProposerSelection2(t *testing.T) {
|
||||
}
|
||||
|
||||
rs := cs1.GetRoundState()
|
||||
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, rs.ProposalBlockParts.Header(), nil, vss[1:]...)
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, nil, rs.ProposalBlockParts.Header(), vss[1:]...)
|
||||
<-newRoundCh // wait for the new round event each round
|
||||
|
||||
incrementRound(vss[1:]...)
|
||||
@@ -180,12 +179,14 @@ func TestEnterProposeYesPrivValidator(t *testing.T) {
|
||||
func TestBadProposal(t *testing.T) {
|
||||
cs1, vss := randConsensusState(2)
|
||||
height, round := cs1.Height, cs1.Round
|
||||
cs2 := vss[1]
|
||||
vs2 := vss[1]
|
||||
|
||||
partSize := config.GetInt("block_part_size")
|
||||
|
||||
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
|
||||
voteCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringVote(), 1)
|
||||
|
||||
propBlock, _ := cs1.createProposalBlock() //changeProposer(t, cs1, cs2)
|
||||
propBlock, _ := cs1.createProposalBlock() //changeProposer(t, cs1, vs2)
|
||||
|
||||
// make the second validator the proposer by incrementing round
|
||||
round = round + 1
|
||||
@@ -198,10 +199,10 @@ func TestBadProposal(t *testing.T) {
|
||||
}
|
||||
stateHash[0] = byte((stateHash[0] + 1) % 255)
|
||||
propBlock.AppHash = stateHash
|
||||
propBlockParts := propBlock.MakePartSet()
|
||||
proposal := types.NewProposal(cs2.Height, round, propBlockParts.Header(), -1)
|
||||
if err := cs2.SignProposal(config.GetString("chain_id"), proposal); err != nil {
|
||||
panic("failed to sign bad proposal: " + err.Error())
|
||||
propBlockParts := propBlock.MakePartSet(partSize)
|
||||
proposal := types.NewProposal(vs2.Height, round, propBlockParts.Header(), -1, types.BlockID{})
|
||||
if err := vs2.SignProposal(config.GetString("chain_id"), proposal); err != nil {
|
||||
t.Fatal("failed to sign bad proposal", err)
|
||||
}
|
||||
|
||||
// set the proposal block
|
||||
@@ -218,14 +219,15 @@ func TestBadProposal(t *testing.T) {
|
||||
|
||||
validatePrevote(t, cs1, round, vss[0], nil)
|
||||
|
||||
// add bad prevote from cs2 and wait for it
|
||||
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, propBlock.Hash(), propBlock.MakePartSet().Header(), voteCh)
|
||||
// add bad prevote from vs2 and wait for it
|
||||
signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
|
||||
<-voteCh
|
||||
|
||||
// wait for precommit
|
||||
<-voteCh
|
||||
|
||||
validatePrecommit(t, cs1, round, 0, vss[0], nil, nil)
|
||||
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, propBlock.Hash(), propBlock.MakePartSet().Header(), voteCh)
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
@@ -282,7 +284,7 @@ func TestFullRoundNil(t *testing.T) {
|
||||
// where the first validator has to wait for votes from the second
|
||||
func TestFullRound2(t *testing.T) {
|
||||
cs1, vss := randConsensusState(2)
|
||||
cs2 := vss[1]
|
||||
vs2 := vss[1]
|
||||
height, round := cs1.Height, cs1.Round
|
||||
|
||||
voteCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringVote(), 1)
|
||||
@@ -297,8 +299,9 @@ func TestFullRound2(t *testing.T) {
|
||||
rs := cs1.GetRoundState()
|
||||
propBlockHash, propPartsHeader := rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header()
|
||||
|
||||
// prevote arrives from cs2:
|
||||
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, propBlockHash, propPartsHeader, voteCh)
|
||||
// prevote arrives from vs2:
|
||||
signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propPartsHeader, vs2)
|
||||
<-voteCh
|
||||
|
||||
<-voteCh //precommit
|
||||
|
||||
@@ -307,8 +310,9 @@ func TestFullRound2(t *testing.T) {
|
||||
|
||||
// we should be stuck in limbo waiting for more precommits
|
||||
|
||||
// precommit arrives from cs2:
|
||||
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, propBlockHash, propPartsHeader, voteCh)
|
||||
// precommit arrives from vs2:
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propPartsHeader, vs2)
|
||||
<-voteCh
|
||||
|
||||
// wait to finish commit, propose in next height
|
||||
<-newBlockCh
|
||||
@@ -321,9 +325,11 @@ func TestFullRound2(t *testing.T) {
|
||||
// two vals take turns proposing. val1 locks on first one, precommits nil on everything else
|
||||
func TestLockNoPOL(t *testing.T) {
|
||||
cs1, vss := randConsensusState(2)
|
||||
cs2 := vss[1]
|
||||
vs2 := vss[1]
|
||||
height := cs1.Height
|
||||
|
||||
partSize := config.GetInt("block_part_size")
|
||||
|
||||
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
|
||||
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
|
||||
voteCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringVote(), 1)
|
||||
@@ -345,8 +351,9 @@ func TestLockNoPOL(t *testing.T) {
|
||||
<-voteCh // prevote
|
||||
|
||||
// we should now be stuck in limbo forever, waiting for more prevotes
|
||||
// prevote arrives from cs2:
|
||||
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), voteCh)
|
||||
// prevote arrives from vs2:
|
||||
signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2)
|
||||
<-voteCh // prevote
|
||||
|
||||
<-voteCh // precommit
|
||||
|
||||
@@ -359,7 +366,8 @@ func TestLockNoPOL(t *testing.T) {
|
||||
hash := make([]byte, len(theBlockHash))
|
||||
copy(hash, theBlockHash)
|
||||
hash[0] = byte((hash[0] + 1) % 255)
|
||||
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, hash, rs.ProposalBlock.MakePartSet().Header(), voteCh)
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
|
||||
<-voteCh // precommit
|
||||
|
||||
// (note we're entering precommit for a second time this round)
|
||||
// but with invalid args. then we enterPrecommitWait, and the timeout to new round
|
||||
@@ -373,7 +381,7 @@ func TestLockNoPOL(t *testing.T) {
|
||||
Round2 (cs1, B) // B B2
|
||||
*/
|
||||
|
||||
incrementRound(cs2)
|
||||
incrementRound(vs2)
|
||||
|
||||
// now we're on a new round and not the proposer, so wait for timeout
|
||||
re = <-timeoutProposeCh
|
||||
@@ -390,7 +398,8 @@ func TestLockNoPOL(t *testing.T) {
|
||||
validatePrevote(t, cs1, 1, vss[0], rs.LockedBlock.Hash())
|
||||
|
||||
// add a conflicting prevote from the other validator
|
||||
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, hash, rs.ProposalBlock.MakePartSet().Header(), voteCh)
|
||||
signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
|
||||
<-voteCh
|
||||
|
||||
// now we're going to enter prevote again, but with invalid args
|
||||
// and then prevote wait, which should timeout. then wait for precommit
|
||||
@@ -402,9 +411,10 @@ func TestLockNoPOL(t *testing.T) {
|
||||
// we should precommit nil and be locked on the proposal
|
||||
validatePrecommit(t, cs1, 1, 0, vss[0], nil, theBlockHash)
|
||||
|
||||
// add conflicting precommit from cs2
|
||||
// add conflicting precommit from vs2
|
||||
// NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round
|
||||
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, hash, rs.ProposalBlock.MakePartSet().Header(), voteCh)
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
|
||||
<-voteCh
|
||||
|
||||
// (note we're entering precommit for a second time this round, but with invalid args
|
||||
// then we enterPrecommitWait and timeout into NewRound
|
||||
@@ -413,10 +423,10 @@ func TestLockNoPOL(t *testing.T) {
|
||||
<-newRoundCh
|
||||
log.Notice("#### ONTO ROUND 2")
|
||||
/*
|
||||
Round3 (cs2, _) // B, B2
|
||||
Round3 (vs2, _) // B, B2
|
||||
*/
|
||||
|
||||
incrementRound(cs2)
|
||||
incrementRound(vs2)
|
||||
|
||||
re = <-proposalCh
|
||||
rs = re.(types.EventDataRoundState).RoundState.(*RoundState)
|
||||
@@ -430,33 +440,36 @@ func TestLockNoPOL(t *testing.T) {
|
||||
|
||||
validatePrevote(t, cs1, 2, vss[0], rs.LockedBlock.Hash())
|
||||
|
||||
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, hash, rs.ProposalBlock.MakePartSet().Header(), voteCh)
|
||||
signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
|
||||
<-voteCh
|
||||
|
||||
<-timeoutWaitCh // prevote wait
|
||||
<-voteCh // precommit
|
||||
|
||||
validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal
|
||||
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, hash, rs.ProposalBlock.MakePartSet().Header(), voteCh) // NOTE: conflicting precommits at same height
|
||||
validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal
|
||||
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height
|
||||
<-voteCh
|
||||
|
||||
<-timeoutWaitCh
|
||||
|
||||
// before we time out into new round, set next proposal block
|
||||
prop, propBlock := decideProposal(cs1, cs2, cs2.Height, cs2.Round+1)
|
||||
prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
|
||||
if prop == nil || propBlock == nil {
|
||||
panic("Failed to create proposal block with cs2")
|
||||
t.Fatal("Failed to create proposal block with vs2")
|
||||
}
|
||||
|
||||
incrementRound(cs2)
|
||||
incrementRound(vs2)
|
||||
|
||||
<-newRoundCh
|
||||
log.Notice("#### ONTO ROUND 3")
|
||||
/*
|
||||
Round4 (cs2, C) // B C // B C
|
||||
Round4 (vs2, C) // B C // B C
|
||||
*/
|
||||
|
||||
// now we're on a new round and not the proposer
|
||||
// so set the proposal block
|
||||
cs1.SetProposalAndBlock(prop, propBlock, propBlock.MakePartSet(), "")
|
||||
cs1.SetProposalAndBlock(prop, propBlock, propBlock.MakePartSet(partSize), "")
|
||||
|
||||
<-proposalCh
|
||||
<-voteCh // prevote
|
||||
@@ -464,19 +477,24 @@ func TestLockNoPOL(t *testing.T) {
|
||||
// prevote for locked block (not proposal)
|
||||
validatePrevote(t, cs1, 0, vss[0], cs1.LockedBlock.Hash())
|
||||
|
||||
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, propBlock.Hash(), propBlock.MakePartSet().Header(), voteCh)
|
||||
signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
|
||||
<-voteCh
|
||||
|
||||
<-timeoutWaitCh
|
||||
<-voteCh
|
||||
|
||||
validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal
|
||||
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, propBlock.Hash(), propBlock.MakePartSet().Header(), voteCh) // NOTE: conflicting precommits at same height
|
||||
validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal
|
||||
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height
|
||||
<-voteCh
|
||||
}
|
||||
|
||||
// 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka
|
||||
func TestLockPOLRelock(t *testing.T) {
|
||||
cs1, vss := randConsensusState(4)
|
||||
cs2, cs3, cs4 := vss[1], vss[2], vss[3]
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
|
||||
partSize := config.GetInt("block_part_size")
|
||||
|
||||
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
|
||||
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
|
||||
@@ -485,14 +503,14 @@ func TestLockPOLRelock(t *testing.T) {
|
||||
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
|
||||
newBlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewBlockHeader(), 1)
|
||||
|
||||
log.Debug("cs2 last round", "lr", cs2.PrivValidator.LastRound)
|
||||
log.Debug("vs2 last round", "lr", vs2.PrivValidator.LastRound)
|
||||
|
||||
// everything done from perspective of cs1
|
||||
|
||||
/*
|
||||
Round1 (cs1, B) // B B B B// B nil B nil
|
||||
|
||||
eg. cs2 and cs4 didn't see the 2/3 prevotes
|
||||
eg. vs2 and vs4 didn't see the 2/3 prevotes
|
||||
*/
|
||||
|
||||
// start round and wait for propose and prevote
|
||||
@@ -502,26 +520,27 @@ func TestLockPOLRelock(t *testing.T) {
|
||||
re := <-proposalCh
|
||||
rs := re.(types.EventDataRoundState).RoundState.(*RoundState)
|
||||
theBlockHash := rs.ProposalBlock.Hash()
|
||||
theBlockPartsHeader := rs.ProposalBlockParts.Header()
|
||||
|
||||
<-voteCh // prevote
|
||||
|
||||
signAddVoteToFromMany(types.VoteTypePrevote, cs1, theBlockHash, theBlockPartsHeader, voteCh, cs2, cs3, cs4)
|
||||
signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4)
|
||||
_, _, _ = <-voteCh, <-voteCh, <-voteCh // prevotes
|
||||
|
||||
<-voteCh // our precommit
|
||||
// the proposed block should now be locked and our precommit added
|
||||
validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash)
|
||||
|
||||
// add precommits from the rest
|
||||
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, types.PartSetHeader{}, voteCh, cs2, cs4)
|
||||
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs3, theBlockHash, theBlockPartsHeader, voteCh)
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4)
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs3)
|
||||
_, _, _ = <-voteCh, <-voteCh, <-voteCh // precommits
|
||||
|
||||
// before we timeout to the new round set the new proposal
|
||||
prop, propBlock := decideProposal(cs1, cs2, cs2.Height, cs2.Round+1)
|
||||
propBlockParts := propBlock.MakePartSet()
|
||||
prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
|
||||
propBlockParts := propBlock.MakePartSet(partSize)
|
||||
propBlockHash := propBlock.Hash()
|
||||
|
||||
incrementRound(cs2, cs3, cs4)
|
||||
incrementRound(vs2, vs3, vs4)
|
||||
|
||||
// timeout to new round
|
||||
<-timeoutWaitCh
|
||||
@@ -533,7 +552,7 @@ func TestLockPOLRelock(t *testing.T) {
|
||||
log.Notice("### ONTO ROUND 1")
|
||||
|
||||
/*
|
||||
Round2 (cs2, C) // B C C C // C C C _)
|
||||
Round2 (vs2, C) // B C C C // C C C _)
|
||||
|
||||
cs1 changes lock!
|
||||
*/
|
||||
@@ -551,7 +570,8 @@ func TestLockPOLRelock(t *testing.T) {
|
||||
validatePrevote(t, cs1, 0, vss[0], theBlockHash)
|
||||
|
||||
// now lets add prevotes from everyone else for the new block
|
||||
signAddVoteToFromMany(types.VoteTypePrevote, cs1, propBlockHash, propBlockParts.Header(), voteCh, cs2, cs3, cs4)
|
||||
signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4)
|
||||
_, _, _ = <-voteCh, <-voteCh, <-voteCh // prevotes
|
||||
|
||||
// now either we go to PrevoteWait or Precommit
|
||||
select {
|
||||
@@ -565,7 +585,8 @@ func TestLockPOLRelock(t *testing.T) {
|
||||
// we should have unlocked and locked on the new block
|
||||
validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash)
|
||||
|
||||
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, propBlockHash, propBlockParts.Header(), voteCh, cs2, cs3)
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propBlockParts.Header(), vs2, vs3)
|
||||
_, _ = <-voteCh, <-voteCh
|
||||
|
||||
be := <-newBlockCh
|
||||
b := be.(types.EventDataNewBlockHeader)
|
||||
@@ -583,14 +604,16 @@ func TestLockPOLRelock(t *testing.T) {
|
||||
// 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka
|
||||
func TestLockPOLUnlock(t *testing.T) {
|
||||
cs1, vss := randConsensusState(4)
|
||||
cs2, cs3, cs4 := vss[1], vss[2], vss[3]
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
|
||||
partSize := config.GetInt("block_part_size")
|
||||
|
||||
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
|
||||
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
|
||||
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
|
||||
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
|
||||
unlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringUnlock(), 1)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.Address)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
|
||||
|
||||
// everything done from perspective of cs1
|
||||
|
||||
@@ -609,7 +632,7 @@ func TestLockPOLUnlock(t *testing.T) {
|
||||
|
||||
<-voteCh // prevote
|
||||
|
||||
signAddVoteToFromMany(types.VoteTypePrevote, cs1, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), nil, cs2, cs3, cs4)
|
||||
signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4)
|
||||
|
||||
<-voteCh //precommit
|
||||
|
||||
@@ -619,14 +642,14 @@ func TestLockPOLUnlock(t *testing.T) {
|
||||
rs = cs1.GetRoundState()
|
||||
|
||||
// add precommits from the rest
|
||||
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, types.PartSetHeader{}, nil, cs2, cs4)
|
||||
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs3, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), nil)
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4)
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs3)
|
||||
|
||||
// before we time out into new round, set next proposal block
|
||||
prop, propBlock := decideProposal(cs1, cs2, cs2.Height, cs2.Round+1)
|
||||
propBlockParts := propBlock.MakePartSet()
|
||||
prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
|
||||
propBlockParts := propBlock.MakePartSet(partSize)
|
||||
|
||||
incrementRound(cs2, cs3, cs4)
|
||||
incrementRound(vs2, vs3, vs4)
|
||||
|
||||
// timeout to new round
|
||||
re = <-timeoutWaitCh
|
||||
@@ -639,7 +662,7 @@ func TestLockPOLUnlock(t *testing.T) {
|
||||
<-newRoundCh
|
||||
log.Notice("#### ONTO ROUND 1")
|
||||
/*
|
||||
Round2 (cs2, C) // B nil nil nil // nil nil nil _
|
||||
Round2 (vs2, C) // B nil nil nil // nil nil nil _
|
||||
|
||||
cs1 unlocks!
|
||||
*/
|
||||
@@ -656,7 +679,7 @@ func TestLockPOLUnlock(t *testing.T) {
|
||||
<-voteCh
|
||||
validatePrevote(t, cs1, 0, vss[0], lockedBlockHash)
|
||||
// now lets add prevotes from everyone else for nil (a polka!)
|
||||
signAddVoteToFromMany(types.VoteTypePrevote, cs1, nil, types.PartSetHeader{}, nil, cs2, cs3, cs4)
|
||||
signAddVotes(cs1, types.VoteTypePrevote, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
// the polka makes us unlock and precommit nil
|
||||
<-unlockCh
|
||||
@@ -666,7 +689,7 @@ func TestLockPOLUnlock(t *testing.T) {
|
||||
// NOTE: since we don't relock on nil, the lock round is 0
|
||||
validatePrecommit(t, cs1, 1, 0, vss[0], nil, nil)
|
||||
|
||||
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, types.PartSetHeader{}, nil, cs2, cs3)
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3)
|
||||
<-newRoundCh
|
||||
}
|
||||
|
||||
@@ -676,13 +699,15 @@ func TestLockPOLUnlock(t *testing.T) {
|
||||
// then we see the polka from round 1 but shouldn't unlock
|
||||
func TestLockPOLSafety1(t *testing.T) {
|
||||
cs1, vss := randConsensusState(4)
|
||||
cs2, cs3, cs4 := vss[1], vss[2], vss[3]
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
|
||||
partSize := config.GetInt("block_part_size")
|
||||
|
||||
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
|
||||
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
|
||||
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
|
||||
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.Address)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
|
||||
|
||||
// start round and wait for propose and prevote
|
||||
startTestRound(cs1, cs1.Height, 0)
|
||||
@@ -696,7 +721,7 @@ func TestLockPOLSafety1(t *testing.T) {
|
||||
validatePrevote(t, cs1, 0, vss[0], propBlock.Hash())
|
||||
|
||||
// the others sign a polka but we don't see it
|
||||
prevotes := signVoteMany(types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet().Header(), cs2, cs3, cs4)
|
||||
prevotes := signVotes(types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4)
|
||||
|
||||
// before we time out into new round, set next proposer
|
||||
// and next proposal block
|
||||
@@ -710,13 +735,13 @@ func TestLockPOLSafety1(t *testing.T) {
|
||||
log.Warn("old prop", "hash", fmt.Sprintf("%X", propBlock.Hash()))
|
||||
|
||||
// we do see them precommit nil
|
||||
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, types.PartSetHeader{}, nil, cs2, cs3, cs4)
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
prop, propBlock := decideProposal(cs1, cs2, cs2.Height, cs2.Round+1)
|
||||
prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
|
||||
propBlockHash := propBlock.Hash()
|
||||
propBlockParts := propBlock.MakePartSet()
|
||||
propBlockParts := propBlock.MakePartSet(partSize)
|
||||
|
||||
incrementRound(cs2, cs3, cs4)
|
||||
incrementRound(vs2, vs3, vs4)
|
||||
|
||||
//XXX: this isnt gauranteed to get there before the timeoutPropose ...
|
||||
cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer")
|
||||
@@ -747,18 +772,18 @@ func TestLockPOLSafety1(t *testing.T) {
|
||||
validatePrevote(t, cs1, 1, vss[0], propBlockHash)
|
||||
|
||||
// now we see the others prevote for it, so we should lock on it
|
||||
signAddVoteToFromMany(types.VoteTypePrevote, cs1, propBlockHash, propBlockParts.Header(), nil, cs2, cs3, cs4)
|
||||
signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4)
|
||||
|
||||
<-voteCh // precommit
|
||||
|
||||
// we should have precommitted
|
||||
validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash)
|
||||
|
||||
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, types.PartSetHeader{}, nil, cs2, cs3)
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3)
|
||||
|
||||
<-timeoutWaitCh
|
||||
|
||||
incrementRound(cs2, cs3, cs4)
|
||||
incrementRound(vs2, vs3, vs4)
|
||||
|
||||
<-newRoundCh
|
||||
|
||||
@@ -779,7 +804,7 @@ func TestLockPOLSafety1(t *testing.T) {
|
||||
newStepCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRoundStep(), 1)
|
||||
|
||||
// add prevotes from the earlier round
|
||||
addVoteToFromMany(cs1, prevotes, cs2, cs3, cs4)
|
||||
addVotes(cs1, prevotes...)
|
||||
|
||||
log.Warn("Done adding prevotes!")
|
||||
|
||||
@@ -795,30 +820,33 @@ func TestLockPOLSafety1(t *testing.T) {
|
||||
// dont see P0, lock on P1 at R1, dont unlock using P0 at R2
|
||||
func TestLockPOLSafety2(t *testing.T) {
|
||||
cs1, vss := randConsensusState(4)
|
||||
cs2, cs3, cs4 := vss[1], vss[2], vss[3]
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
|
||||
partSize := config.GetInt("block_part_size")
|
||||
|
||||
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
|
||||
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
|
||||
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
|
||||
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
|
||||
unlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringUnlock(), 1)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.Address)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
|
||||
|
||||
// the block for R0: gets polkad but we miss it
|
||||
// (even though we signed it, shhh)
|
||||
_, propBlock0 := decideProposal(cs1, vss[0], cs1.Height, cs1.Round)
|
||||
propBlockHash0 := propBlock0.Hash()
|
||||
propBlockParts0 := propBlock0.MakePartSet()
|
||||
propBlockParts0 := propBlock0.MakePartSet(partSize)
|
||||
|
||||
// the others sign a polka but we don't see it
|
||||
prevotes := signVoteMany(types.VoteTypePrevote, propBlockHash0, propBlockParts0.Header(), cs2, cs3, cs4)
|
||||
prevotes := signVotes(types.VoteTypePrevote, propBlockHash0, propBlockParts0.Header(), vs2, vs3, vs4)
|
||||
|
||||
// the block for round 1
|
||||
prop1, propBlock1 := decideProposal(cs1, cs2, cs2.Height, cs2.Round+1)
|
||||
prop1, propBlock1 := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
|
||||
propBlockHash1 := propBlock1.Hash()
|
||||
propBlockParts1 := propBlock1.MakePartSet()
|
||||
propBlockParts1 := propBlock1.MakePartSet(partSize)
|
||||
propBlockID1 := types.BlockID{propBlockHash1, propBlockParts1.Header()}
|
||||
|
||||
incrementRound(cs2, cs3, cs4)
|
||||
incrementRound(vs2, vs3, vs4)
|
||||
|
||||
cs1.updateRoundStep(0, RoundStepPrecommitWait)
|
||||
|
||||
@@ -833,28 +861,30 @@ func TestLockPOLSafety2(t *testing.T) {
|
||||
|
||||
<-voteCh // prevote
|
||||
|
||||
signAddVoteToFromMany(types.VoteTypePrevote, cs1, propBlockHash1, propBlockParts1.Header(), nil, cs2, cs3, cs4)
|
||||
signAddVotes(cs1, types.VoteTypePrevote, propBlockHash1, propBlockParts1.Header(), vs2, vs3, vs4)
|
||||
|
||||
<-voteCh // precommit
|
||||
// the proposed block should now be locked and our precommit added
|
||||
validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash1, propBlockHash1)
|
||||
|
||||
// add precommits from the rest
|
||||
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, types.PartSetHeader{}, nil, cs2, cs4)
|
||||
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs3, propBlockHash1, propBlockParts1.Header(), nil)
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4)
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash1, propBlockParts1.Header(), vs3)
|
||||
|
||||
incrementRound(cs2, cs3, cs4)
|
||||
incrementRound(vs2, vs3, vs4)
|
||||
|
||||
// timeout of precommit wait to new round
|
||||
<-timeoutWaitCh
|
||||
|
||||
// in round 2 we see the polkad block from round 0
|
||||
newProp := types.NewProposal(height, 2, propBlockParts0.Header(), 0)
|
||||
if err := cs3.SignProposal(config.GetString("chain_id"), newProp); err != nil {
|
||||
panic(err)
|
||||
newProp := types.NewProposal(height, 2, propBlockParts0.Header(), 0, propBlockID1)
|
||||
if err := vs3.SignProposal(config.GetString("chain_id"), newProp); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cs1.SetProposalAndBlock(newProp, propBlock0, propBlockParts0, "some peer")
|
||||
addVoteToFromMany(cs1, prevotes, cs2, cs3, cs4) // add the pol votes
|
||||
|
||||
// Add the pol votes
|
||||
addVotes(cs1, prevotes...)
|
||||
|
||||
<-newRoundCh
|
||||
log.Notice("### ONTO Round 2")
|
||||
@@ -885,13 +915,13 @@ func TestLockPOLSafety2(t *testing.T) {
|
||||
/*
|
||||
func TestSlashingPrevotes(t *testing.T) {
|
||||
cs1, vss := randConsensusState(2)
|
||||
cs2 := vss[1]
|
||||
vs2 := vss[1]
|
||||
|
||||
|
||||
proposalCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringCompleteProposal() , 1)
|
||||
timeoutWaitCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringTimeoutWait() , 1)
|
||||
newRoundCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringNewRound() , 1)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.Address)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
|
||||
|
||||
// start round and wait for propose and prevote
|
||||
startTestRound(cs1, cs1.Height, 0)
|
||||
@@ -905,7 +935,7 @@ func TestSlashingPrevotes(t *testing.T) {
|
||||
// add one for a different block should cause us to go into prevote wait
|
||||
hash := rs.ProposalBlock.Hash()
|
||||
hash[0] = byte(hash[0]+1) % 255
|
||||
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, hash, rs.ProposalBlockParts.Header(), nil)
|
||||
signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlockParts.Header(), vs2)
|
||||
|
||||
<-timeoutWaitCh
|
||||
|
||||
@@ -913,20 +943,20 @@ func TestSlashingPrevotes(t *testing.T) {
|
||||
// away and ignore more prevotes (and thus fail to slash!)
|
||||
|
||||
// add the conflicting vote
|
||||
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(),nil)
|
||||
signAddVotes(cs1, types.VoteTypePrevote, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2)
|
||||
|
||||
// XXX: Check for existence of Dupeout info
|
||||
}
|
||||
|
||||
func TestSlashingPrecommits(t *testing.T) {
|
||||
cs1, vss := randConsensusState(2)
|
||||
cs2 := vss[1]
|
||||
vs2 := vss[1]
|
||||
|
||||
|
||||
proposalCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringCompleteProposal() , 1)
|
||||
timeoutWaitCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringTimeoutWait() , 1)
|
||||
newRoundCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringNewRound() , 1)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.Address)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
|
||||
|
||||
// start round and wait for propose and prevote
|
||||
startTestRound(cs1, cs1.Height, 0)
|
||||
@@ -934,8 +964,8 @@ func TestSlashingPrecommits(t *testing.T) {
|
||||
re := <-proposalCh
|
||||
<-voteCh // prevote
|
||||
|
||||
// add prevote from cs2
|
||||
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), nil)
|
||||
// add prevote from vs2
|
||||
signAddVotes(cs1, types.VoteTypePrevote, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2)
|
||||
|
||||
<-voteCh // precommit
|
||||
|
||||
@@ -943,13 +973,13 @@ func TestSlashingPrecommits(t *testing.T) {
|
||||
// add one for a different block should cause us to go into prevote wait
|
||||
hash := rs.ProposalBlock.Hash()
|
||||
hash[0] = byte(hash[0]+1) % 255
|
||||
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, hash, rs.ProposalBlockParts.Header(),nil)
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlockParts.Header(), vs2)
|
||||
|
||||
// NOTE: we have to send the vote for different block first so we don't just go into precommit round right
|
||||
// away and ignore more prevotes (and thus fail to slash!)
|
||||
|
||||
// add precommit from cs2
|
||||
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(),nil)
|
||||
// add precommit from vs2
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2)
|
||||
|
||||
// XXX: Check for existence of Dupeout info
|
||||
}
|
||||
@@ -965,13 +995,15 @@ func TestSlashingPrecommits(t *testing.T) {
|
||||
// we receive a final precommit after going into next round, but others might have gone to commit already!
|
||||
func TestHalt1(t *testing.T) {
|
||||
cs1, vss := randConsensusState(4)
|
||||
cs2, cs3, cs4 := vss[1], vss[2], vss[3]
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
|
||||
partSize := config.GetInt("block_part_size")
|
||||
|
||||
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
|
||||
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
|
||||
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
|
||||
newBlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewBlock(), 1)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.Address)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
|
||||
|
||||
// start round and wait for propose and prevote
|
||||
startTestRound(cs1, cs1.Height, 0)
|
||||
@@ -979,23 +1011,23 @@ func TestHalt1(t *testing.T) {
|
||||
re := <-proposalCh
|
||||
rs := re.(types.EventDataRoundState).RoundState.(*RoundState)
|
||||
propBlock := rs.ProposalBlock
|
||||
propBlockParts := propBlock.MakePartSet()
|
||||
propBlockParts := propBlock.MakePartSet(partSize)
|
||||
|
||||
<-voteCh // prevote
|
||||
|
||||
signAddVoteToFromMany(types.VoteTypePrevote, cs1, propBlock.Hash(), propBlockParts.Header(), nil, cs3, cs4)
|
||||
signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlockParts.Header(), vs3, vs4)
|
||||
<-voteCh // precommit
|
||||
|
||||
// the proposed block should now be locked and our precommit added
|
||||
validatePrecommit(t, cs1, 0, 0, vss[0], propBlock.Hash(), propBlock.Hash())
|
||||
|
||||
// add precommits from the rest
|
||||
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, nil, types.PartSetHeader{}, nil) // didnt receive proposal
|
||||
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs3, propBlock.Hash(), propBlockParts.Header(), nil)
|
||||
// we receive this later, but cs3 might receive it earlier and with ours will go to commit!
|
||||
precommit4 := signVote(cs4, types.VoteTypePrecommit, propBlock.Hash(), propBlockParts.Header())
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2) // didnt receive proposal
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlockParts.Header(), vs3)
|
||||
// we receive this later, but vs3 might receive it earlier and with ours will go to commit!
|
||||
precommit4 := signVote(vs4, types.VoteTypePrecommit, propBlock.Hash(), propBlockParts.Header())
|
||||
|
||||
incrementRound(cs2, cs3, cs4)
|
||||
incrementRound(vs2, vs3, vs4)
|
||||
|
||||
// timeout to new round
|
||||
<-timeoutWaitCh
|
||||
@@ -1013,7 +1045,7 @@ func TestHalt1(t *testing.T) {
|
||||
validatePrevote(t, cs1, 0, vss[0], rs.LockedBlock.Hash())
|
||||
|
||||
// now we receive the precommit from the previous round
|
||||
addVoteToFrom(cs1, cs4, precommit4)
|
||||
addVotes(cs1, precommit4)
|
||||
|
||||
// receiving that precommit should take us straight to commit
|
||||
<-newBlockCh
|
||||
|
36
consensus/test_data/README.md
Normal file
36
consensus/test_data/README.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# Generating test data
|
||||
|
||||
To generate the data, run `build.sh`. See that script for more details.
|
||||
|
||||
Make sure to adjust the stepChanges in the testCases if the number of messages changes.
|
||||
This sometimes happens for the `small_block2.cswal`, where the number of block parts changes between 4 and 5.
|
||||
|
||||
If you need to change the signatures, you can use a script as follows:
|
||||
The privBytes comes from `config/tendermint_test/...`:
|
||||
|
||||
```
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/go-crypto"
|
||||
)
|
||||
|
||||
func main() {
|
||||
signBytes, err := hex.DecodeString("7B22636861696E5F6964223A2274656E6465726D696E745F74657374222C22766F7465223A7B22626C6F636B5F68617368223A2242453544373939433846353044354645383533364334333932464443384537423342313830373638222C22626C6F636B5F70617274735F686561646572223A506172745365747B543A31204236323237323535464632307D2C22686569676874223A312C22726F756E64223A302C2274797065223A327D7D")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
privBytes, err := hex.DecodeString("27F82582AEFAE7AB151CFB01C48BB6C1A0DA78F9BDDA979A9F70A84D074EB07D3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
privKey := crypto.PrivKeyEd25519{}
|
||||
copy(privKey[:], privBytes)
|
||||
signature := privKey.Sign(signBytes)
|
||||
fmt.Printf("Signature Bytes: %X\n", signature.Bytes())
|
||||
}
|
||||
```
|
||||
|
58
consensus/test_data/build.sh
Normal file
58
consensus/test_data/build.sh
Normal file
@@ -0,0 +1,58 @@
|
||||
#! /bin/bash
|
||||
|
||||
cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
# specify a dir to copy
|
||||
# NOTE: eventually we should replace with `tendermint init --test`
|
||||
DIR=$HOME/.tendermint_test/consensus_state_test
|
||||
|
||||
# XXX: remove tendermint dir
|
||||
rm -rf $HOME/.tendermint
|
||||
cp -r $DIR $HOME/.tendermint
|
||||
|
||||
function reset(){
|
||||
rm -rf $HOME/.tendermint/data
|
||||
tendermint unsafe_reset_priv_validator
|
||||
}
|
||||
|
||||
reset
|
||||
|
||||
# empty block
|
||||
tendermint node --proxy_app=dummy &> /dev/null &
|
||||
sleep 5
|
||||
killall tendermint
|
||||
|
||||
# /q would print up to and including the match, then quit.
|
||||
# /Q doesn't include the match.
|
||||
# http://unix.stackexchange.com/questions/11305/grep-show-all-the-file-up-to-the-match
|
||||
sed '/HEIGHT: 2/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/empty_block.cswal
|
||||
|
||||
reset
|
||||
|
||||
# small block 1
|
||||
bash scripts/txs/random.sh 1000 36657 &> /dev/null &
|
||||
PID=$!
|
||||
tendermint node --proxy_app=dummy &> /dev/null &
|
||||
sleep 5
|
||||
killall tendermint
|
||||
kill -9 $PID
|
||||
|
||||
sed '/HEIGHT: 2/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/small_block1.cswal
|
||||
|
||||
reset
|
||||
|
||||
|
||||
# small block 2 (part size = 512)
|
||||
echo "" >> ~/.tendermint/config.toml
|
||||
echo "block_part_size = 512" >> ~/.tendermint/config.toml
|
||||
bash scripts/txs/random.sh 1000 36657 &> /dev/null &
|
||||
PID=$!
|
||||
tendermint node --proxy_app=dummy &> /dev/null &
|
||||
sleep 5
|
||||
killall tendermint
|
||||
kill -9 $PID
|
||||
|
||||
sed '/HEIGHT: 2/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/small_block2.cswal
|
||||
|
||||
reset
|
||||
|
10
consensus/test_data/empty_block.cswal
Normal file
10
consensus/test_data/empty_block.cswal
Normal file
@@ -0,0 +1,10 @@
|
||||
#HEIGHT: 1
|
||||
{"time":"2016-12-18T05:05:33.502Z","msg":[3,{"duration":974084551,"height":1,"round":0,"step":1}]}
|
||||
{"time":"2016-12-18T05:05:33.505Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]}
|
||||
{"time":"2016-12-18T05:05:33.505Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":1,"hash":"71D2DA2336A9F84C22A28FF6C67F35F3478FC0AF"},"pol_round":-1,"pol_block_id":{"hash":"","parts":{"total":0,"hash":""}},"signature":[1,"62C0F2BCCB491399EEDAF8E85837ADDD4E25BAB7A84BFC4F0E88594531FBC6D4755DEC7E6427F04AD7EB8BB89502762AB4380C7BBA93A4C297E6180EC78E3504"]}}],"peer_key":""}]}
|
||||
{"time":"2016-12-18T05:05:33.506Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":0,"bytes":"0101010F74656E6465726D696E745F74657374010114914148D83E0DC00000000000000114354594CBFC1A7BCA1AD0050ED6AA010023EADA390001000100000000","proof":{"aunts":[]}}}],"peer_key":""}]}
|
||||
{"time":"2016-12-18T05:05:33.508Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrevote"}]}
|
||||
{"time":"2016-12-18T05:05:33.508Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":1,"block_id":{"hash":"3E83DF89A01C5F104912E095F32451C202F34717","parts":{"total":1,"hash":"71D2DA2336A9F84C22A28FF6C67F35F3478FC0AF"}},"signature":[1,"B64D0BB64B2E9AAFDD4EBEA679644F77AE774D69E3E2E1B042AB15FE4F84B1427AC6C8A25AFF58EA22011AE567FEA49D2EE7354382E915AD85BF40C58FA6130C"]}}],"peer_key":""}]}
|
||||
{"time":"2016-12-18T05:05:33.509Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrecommit"}]}
|
||||
{"time":"2016-12-18T05:05:33.509Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":2,"block_id":{"hash":"3E83DF89A01C5F104912E095F32451C202F34717","parts":{"total":1,"hash":"71D2DA2336A9F84C22A28FF6C67F35F3478FC0AF"}},"signature":[1,"D83E968392D1BF09821E0D05079DAB5491CABD89BE128BD1CF573ED87148BA84667A56C0A069EFC90760F25EDAC62BC324DBB12EA63F44E6CB2D3500FE5E640F"]}}],"peer_key":""}]}
|
||||
{"time":"2016-12-18T05:05:33.509Z","msg":[1,{"height":1,"round":0,"step":"RoundStepCommit"}]}
|
10
consensus/test_data/small_block1.cswal
Normal file
10
consensus/test_data/small_block1.cswal
Normal file
File diff suppressed because one or more lines are too long
14
consensus/test_data/small_block2.cswal
Normal file
14
consensus/test_data/small_block2.cswal
Normal file
@@ -0,0 +1,14 @@
|
||||
#HEIGHT: 1
|
||||
{"time":"2016-12-18T05:05:43.641Z","msg":[3,{"duration":969409681,"height":1,"round":0,"step":1}]}
|
||||
{"time":"2016-12-18T05:05:43.643Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]}
|
||||
{"time":"2016-12-18T05:05:43.643Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":5,"hash":"C916905C3C444501DDDAA1BF52E959B7531E762E"},"pol_round":-1,"pol_block_id":{"hash":"","parts":{"total":0,"hash":""}},"signature":[1,"F1A8E9928889C68FD393F3983B5362AECA4A95AA13FE3C78569B2515EC046893CB718071CAF54F3F1507DCD851B37CD5557EA17BB5471D2DC6FB5AC5FBB72E02"]}}],"peer_key":""}]}
|
||||
{"time":"2016-12-18T05:05:43.643Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":0,"bytes":"0101010F74656E6465726D696E745F7465737401011491414B3483A8400190000000000114926EA77D30A4D19866159DE7E58AA9461F90F9D10114354594CBFC1A7BCA1AD0050ED6AA010023EADA3900010190010D6162636431323D646362613132010D6162636431333D646362613133010D6162636431343D646362613134010D6162636431353D646362613135010D6162636431363D646362613136010D6162636431373D646362613137010D6162636431383D646362613138010D6162636431393D646362613139010D6162636432303D646362613230010D6162636432313D646362613231010D6162636432323D646362613232010D6162636432333D646362613233010D6162636432343D646362613234010D6162636432353D646362613235010D6162636432363D646362613236010D6162636432373D646362613237010D6162636432383D646362613238010D6162636432393D646362613239010D6162636433303D646362613330010D6162636433313D646362613331010D6162636433323D646362613332010D6162636433333D646362613333010D6162636433343D646362613334010D6162636433353D646362613335010D6162636433363D646362613336010D6162636433373D646362613337010D6162636433383D646362613338010D6162636433393D646362613339010D6162636434303D","proof":{"aunts":["C9FBD66B63A976638196323F5B93494BDDFC9EED","47FD83BB7607E679EE5CF0783372D13C5A264056","FEEC97078A26B7F6057821C0660855170CC6F1D7"]}}}],"peer_key":""}]}
|
||||
{"time":"2016-12-18T05:05:43.643Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":1,"bytes":"646362613430010D6162636434313D646362613431010D6162636434323D646362613432010D6162636434333D646362613433010D6162636434343D646362613434010D6162636434353D646362613435010D6162636434363D646362613436010D6162636434373D646362613437010D6162636434383D646362613438010D6162636434393D646362613439010D6162636435303D646362613530010D6162636435313D646362613531010D6162636435323D646362613532010D6162636435333D646362613533010D6162636435343D646362613534010D6162636435353D646362613535010D6162636435363D646362613536010D6162636435373D646362613537010D6162636435383D646362613538010D6162636435393D646362613539010D6162636436303D646362613630010D6162636436313D646362613631010D6162636436323D646362613632010D6162636436333D646362613633010D6162636436343D646362613634010D6162636436353D646362613635010D6162636436363D646362613636010D6162636436373D646362613637010D6162636436383D646362613638010D6162636436393D646362613639010D6162636437303D646362613730010D6162636437313D646362613731010D6162636437323D646362613732010D6162636437333D646362613733010D6162636437343D6463","proof":{"aunts":["D7FB03B935B77C322064F8277823CDB5C7018597","47FD83BB7607E679EE5CF0783372D13C5A264056","FEEC97078A26B7F6057821C0660855170CC6F1D7"]}}}],"peer_key":""}]}
|
||||
{"time":"2016-12-18T05:05:43.644Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":2,"bytes":"62613734010D6162636437353D646362613735010D6162636437363D646362613736010D6162636437373D646362613737010D6162636437383D646362613738010D6162636437393D646362613739010D6162636438303D646362613830010D6162636438313D646362613831010D6162636438323D646362613832010D6162636438333D646362613833010D6162636438343D646362613834010D6162636438353D646362613835010D6162636438363D646362613836010D6162636438373D646362613837010D6162636438383D646362613838010D6162636438393D646362613839010D6162636439303D646362613930010D6162636439313D646362613931010D6162636439323D646362613932010D6162636439333D646362613933010D6162636439343D646362613934010D6162636439353D646362613935010D6162636439363D646362613936010D6162636439373D646362613937010D6162636439383D646362613938010D6162636439393D646362613939010F616263643130303D64636261313030010F616263643130313D64636261313031010F616263643130323D64636261313032010F616263643130333D64636261313033010F616263643130343D64636261313034010F616263643130353D64636261313035010F616263643130363D64636261313036010F616263643130373D64636261","proof":{"aunts":["A607D9BF5107E6C9FD19B6928D9CC7714B0730E4","FEEC97078A26B7F6057821C0660855170CC6F1D7"]}}}],"peer_key":""}]}
|
||||
{"time":"2016-12-18T05:05:43.644Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":3,"bytes":"313037010F616263643130383D64636261313038010F616263643130393D64636261313039010F616263643131303D64636261313130010F616263643131313D64636261313131010F616263643131323D64636261313132010F616263643131333D64636261313133010F616263643131343D64636261313134010F616263643131353D64636261313135010F616263643131363D64636261313136010F616263643131373D64636261313137010F616263643131383D64636261313138010F616263643131393D64636261313139010F616263643132303D64636261313230010F616263643132313D64636261313231010F616263643132323D64636261313232010F616263643132333D64636261313233010F616263643132343D64636261313234010F616263643132353D64636261313235010F616263643132363D64636261313236010F616263643132373D64636261313237010F616263643132383D64636261313238010F616263643132393D64636261313239010F616263643133303D64636261313330010F616263643133313D64636261313331010F616263643133323D64636261313332010F616263643133333D64636261313333010F616263643133343D64636261313334010F616263643133353D64636261313335010F616263643133363D64636261313336010F616263643133373D646362613133","proof":{"aunts":["0FD794B3506B9E92CDE3703F7189D42167E77095","86D455F542DA79F5A764B9DABDEABF01F4BAB2AB"]}}}],"peer_key":""}]}
|
||||
{"time":"2016-12-18T05:05:43.644Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":4,"bytes":"37010F616263643133383D64636261313338010F616263643133393D64636261313339010F616263643134303D64636261313430010F616263643134313D64636261313431010F616263643134323D64636261313432010F616263643134333D64636261313433010F616263643134343D64636261313434010F616263643134353D64636261313435010F616263643134363D64636261313436010F616263643134373D64636261313437010F616263643134383D64636261313438010F616263643134393D64636261313439010F616263643135303D64636261313530010F616263643135313D64636261313531010F616263643135323D64636261313532010F616263643135333D64636261313533010F616263643135343D64636261313534010F616263643135353D646362613135350100000000","proof":{"aunts":["50CBDC078A660EAE3442BA355BE10EE0D04408D1","86D455F542DA79F5A764B9DABDEABF01F4BAB2AB"]}}}],"peer_key":""}]}
|
||||
{"time":"2016-12-18T05:05:43.645Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrevote"}]}
|
||||
{"time":"2016-12-18T05:05:43.645Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":1,"block_id":{"hash":"6ADACDC2871C59A67337DAFD5045A982ED070C51","parts":{"total":5,"hash":"C916905C3C444501DDDAA1BF52E959B7531E762E"}},"signature":[1,"E815E0A63B7EEE7894DE2D72372A7C393434AC8ACCC46B60C628910F73351806D55A59994F08B454BFD71EDAA0CA95733CA47E37FFDAF9AAA2431A8160176E01"]}}],"peer_key":""}]}
|
||||
{"time":"2016-12-18T05:05:43.647Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrecommit"}]}
|
||||
{"time":"2016-12-18T05:05:43.647Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":2,"block_id":{"hash":"6ADACDC2871C59A67337DAFD5045A982ED070C51","parts":{"total":5,"hash":"C916905C3C444501DDDAA1BF52E959B7531E762E"}},"signature":[1,"9AAC3F3A118EE039EB460E9E5308D490D671C7490309BD5D62B5F392205C7E420DFDAF90F08294FF36BE8A9AA5CC203C1F2088B42D2BB8EE40A45F2BB5C54D0A"]}}],"peer_key":""}]}
|
||||
{"time":"2016-12-18T05:05:43.648Z","msg":[1,{"height":1,"round":0,"step":"RoundStepCommit"}]}
|
127
consensus/ticker.go
Normal file
127
consensus/ticker.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
. "github.com/tendermint/go-common"
|
||||
)
|
||||
|
||||
var (
|
||||
tickTockBufferSize = 10
|
||||
)
|
||||
|
||||
// TimeoutTicker is a timer that schedules timeouts
|
||||
// conditional on the height/round/step in the timeoutInfo.
|
||||
// The timeoutInfo.Duration may be non-positive.
|
||||
type TimeoutTicker interface {
|
||||
Start() (bool, error)
|
||||
Stop() bool
|
||||
Chan() <-chan timeoutInfo // on which to receive a timeout
|
||||
ScheduleTimeout(ti timeoutInfo) // reset the timer
|
||||
}
|
||||
|
||||
// timeoutTicker wraps time.Timer,
|
||||
// scheduling timeouts only for greater height/round/step
|
||||
// than what it's already seen.
|
||||
// Timeouts are scheduled along the tickChan,
|
||||
// and fired on the tockChan.
|
||||
type timeoutTicker struct {
|
||||
BaseService
|
||||
|
||||
timer *time.Timer
|
||||
tickChan chan timeoutInfo
|
||||
tockChan chan timeoutInfo
|
||||
}
|
||||
|
||||
func NewTimeoutTicker() TimeoutTicker {
|
||||
tt := &timeoutTicker{
|
||||
timer: time.NewTimer(0),
|
||||
tickChan: make(chan timeoutInfo, tickTockBufferSize),
|
||||
tockChan: make(chan timeoutInfo, tickTockBufferSize),
|
||||
}
|
||||
tt.stopTimer() // don't want to fire until the first scheduled timeout
|
||||
tt.BaseService = *NewBaseService(log, "TimeoutTicker", tt)
|
||||
return tt
|
||||
}
|
||||
|
||||
func (t *timeoutTicker) OnStart() error {
|
||||
t.BaseService.OnStart()
|
||||
|
||||
go t.timeoutRoutine()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *timeoutTicker) OnStop() {
|
||||
t.BaseService.OnStop()
|
||||
t.stopTimer()
|
||||
}
|
||||
|
||||
func (t *timeoutTicker) Chan() <-chan timeoutInfo {
|
||||
return t.tockChan
|
||||
}
|
||||
|
||||
// The timeoutRoutine is alwaya available to read from tickChan (it won't block).
|
||||
// The scheduling may fail if the timeoutRoutine has already scheduled a timeout for a later height/round/step.
|
||||
func (t *timeoutTicker) ScheduleTimeout(ti timeoutInfo) {
|
||||
t.tickChan <- ti
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------
|
||||
|
||||
// stop the timer and drain if necessary
|
||||
func (t *timeoutTicker) stopTimer() {
|
||||
// Stop() returns false if it was already fired or was stopped
|
||||
if !t.timer.Stop() {
|
||||
select {
|
||||
case <-t.timer.C:
|
||||
default:
|
||||
log.Debug("Timer already stopped")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// send on tickChan to start a new timer.
|
||||
// timers are interupted and replaced by new ticks from later steps
|
||||
// timeouts of 0 on the tickChan will be immediately relayed to the tockChan
|
||||
func (t *timeoutTicker) timeoutRoutine() {
|
||||
log.Debug("Starting timeout routine")
|
||||
var ti timeoutInfo
|
||||
for {
|
||||
select {
|
||||
case newti := <-t.tickChan:
|
||||
log.Debug("Received tick", "old_ti", ti, "new_ti", newti)
|
||||
|
||||
// ignore tickers for old height/round/step
|
||||
if newti.Height < ti.Height {
|
||||
continue
|
||||
} else if newti.Height == ti.Height {
|
||||
if newti.Round < ti.Round {
|
||||
continue
|
||||
} else if newti.Round == ti.Round {
|
||||
if ti.Step > 0 && newti.Step <= ti.Step {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// stop the last timer
|
||||
t.stopTimer()
|
||||
|
||||
// update timeoutInfo and reset timer
|
||||
// NOTE time.Timer allows duration to be non-positive
|
||||
ti = newti
|
||||
t.timer.Reset(ti.Duration)
|
||||
log.Debug("Scheduled timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
|
||||
case <-t.timer.C:
|
||||
log.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
|
||||
// go routine here gaurantees timeoutRoutine doesn't block.
|
||||
// Determinism comes from playback in the receiveRoutine.
|
||||
// We can eliminate it by merging the timeoutRoutine into receiveRoutine
|
||||
// and managing the timeouts ourselves with a millisecond ticker
|
||||
go func(toi timeoutInfo) { t.tockChan <- toi }(ti)
|
||||
case <-t.Quit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
157
consensus/wal.go
157
consensus/wal.go
@@ -1,10 +1,9 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
auto "github.com/tendermint/go-autofile"
|
||||
. "github.com/tendermint/go-common"
|
||||
"github.com/tendermint/go-wire"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -13,15 +12,15 @@ import (
|
||||
//--------------------------------------------------------
|
||||
// types and functions for savings consensus messages
|
||||
|
||||
type ConsensusLogMessage struct {
|
||||
Time time.Time `json:"time"`
|
||||
Msg ConsensusLogMessageInterface `json:"msg"`
|
||||
type TimedWALMessage struct {
|
||||
Time time.Time `json:"time"`
|
||||
Msg WALMessage `json:"msg"`
|
||||
}
|
||||
|
||||
type ConsensusLogMessageInterface interface{}
|
||||
type WALMessage interface{}
|
||||
|
||||
var _ = wire.RegisterInterface(
|
||||
struct{ ConsensusLogMessageInterface }{},
|
||||
struct{ WALMessage }{},
|
||||
wire.ConcreteType{types.EventDataRoundState{}, 0x01},
|
||||
wire.ConcreteType{msgInfo{}, 0x02},
|
||||
wire.ConcreteType{timeoutInfo{}, 0x03},
|
||||
@@ -35,111 +34,79 @@ var _ = wire.RegisterInterface(
|
||||
// TODO: currently the wal is overwritten during replay catchup
|
||||
// give it a mode so it's either reading or appending - must read to end to start appending again
|
||||
type WAL struct {
|
||||
fp *os.File
|
||||
exists bool // if the file already existed (restarted process)
|
||||
|
||||
done chan struct{}
|
||||
BaseService
|
||||
|
||||
group *auto.Group
|
||||
light bool // ignore block parts
|
||||
}
|
||||
|
||||
func NewWAL(file string, light bool) (*WAL, error) {
|
||||
var walExists bool
|
||||
if _, err := os.Stat(file); err == nil {
|
||||
walExists = true
|
||||
}
|
||||
fp, err := os.OpenFile(file, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
|
||||
func NewWAL(walDir string, light bool) (*WAL, error) {
|
||||
group, err := auto.OpenGroup(walDir + "/wal")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &WAL{
|
||||
fp: fp,
|
||||
exists: walExists,
|
||||
done: make(chan struct{}),
|
||||
light: light,
|
||||
}, nil
|
||||
wal := &WAL{
|
||||
group: group,
|
||||
light: light,
|
||||
}
|
||||
wal.BaseService = *NewBaseService(log, "WAL", wal)
|
||||
_, err = wal.Start()
|
||||
return wal, err
|
||||
}
|
||||
|
||||
func (wal *WAL) Exists() bool {
|
||||
if wal == nil {
|
||||
log.Warn("consensus msg log is nil")
|
||||
return false
|
||||
func (wal *WAL) OnStart() error {
|
||||
wal.BaseService.OnStart()
|
||||
size, err := wal.group.Head.Size()
|
||||
if err != nil {
|
||||
return err
|
||||
} else if size == 0 {
|
||||
wal.writeHeight(1)
|
||||
}
|
||||
return wal.exists
|
||||
_, err = wal.group.Start()
|
||||
return err
|
||||
}
|
||||
|
||||
func (wal *WAL) OnStop() {
|
||||
wal.BaseService.OnStop()
|
||||
wal.group.Stop()
|
||||
}
|
||||
|
||||
// called in newStep and for each pass in receiveRoutine
|
||||
func (wal *WAL) Save(clm ConsensusLogMessageInterface) {
|
||||
if wal != nil {
|
||||
if wal.light {
|
||||
// in light mode we only write new steps, timeouts, and our own votes (no proposals, block parts)
|
||||
if mi, ok := clm.(msgInfo); ok {
|
||||
_ = mi
|
||||
if mi.PeerKey != "" {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
var n int
|
||||
var err error
|
||||
wire.WriteJSON(ConsensusLogMessage{time.Now(), clm}, wal.fp, &n, &err)
|
||||
wire.WriteTo([]byte("\n"), wal.fp, &n, &err) // one message per line
|
||||
if err != nil {
|
||||
PanicQ(Fmt("Error writing msg to consensus wal. Error: %v \n\nMessage: %v", err, clm))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Must not be called concurrently with a write.
|
||||
func (wal *WAL) Close() {
|
||||
if wal != nil {
|
||||
wal.fp.Close()
|
||||
}
|
||||
wal.done <- struct{}{}
|
||||
}
|
||||
|
||||
func (wal *WAL) Wait() {
|
||||
<-wal.done
|
||||
}
|
||||
|
||||
func (wal *WAL) SeekFromEnd(found func([]byte) bool) (nLines int, err error) {
|
||||
var current int64
|
||||
// start at the end
|
||||
current, err = wal.fp.Seek(0, 2)
|
||||
if err != nil {
|
||||
func (wal *WAL) Save(wmsg WALMessage) {
|
||||
if wal == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// backup until we find the the right line
|
||||
// current is how far we are from the beginning
|
||||
for {
|
||||
current -= 1
|
||||
if current < 0 {
|
||||
wal.fp.Seek(0, 0) // back to beginning
|
||||
return
|
||||
}
|
||||
// backup one and read a new byte
|
||||
if _, err = wal.fp.Seek(current, 0); err != nil {
|
||||
return
|
||||
}
|
||||
b := make([]byte, 1)
|
||||
if _, err = wal.fp.Read(b); err != nil {
|
||||
return
|
||||
}
|
||||
if b[0] == '\n' || len(b) == 0 {
|
||||
nLines += 1
|
||||
// read a full line
|
||||
reader := bufio.NewReader(wal.fp)
|
||||
lineBytes, _ := reader.ReadBytes('\n')
|
||||
if len(lineBytes) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if found(lineBytes) {
|
||||
wal.fp.Seek(0, 1) // (?)
|
||||
wal.fp.Seek(current, 0)
|
||||
if wal.light {
|
||||
// in light mode we only write new steps, timeouts, and our own votes (no proposals, block parts)
|
||||
if mi, ok := wmsg.(msgInfo); ok {
|
||||
if mi.PeerKey != "" {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
// Write #HEIGHT: XYZ if new height
|
||||
if edrs, ok := wmsg.(types.EventDataRoundState); ok {
|
||||
if edrs.Step == RoundStepNewHeight.String() {
|
||||
wal.writeHeight(edrs.Height)
|
||||
}
|
||||
}
|
||||
// Write the wal message
|
||||
var wmsgBytes = wire.JSONBytes(TimedWALMessage{time.Now(), wmsg})
|
||||
err := wal.group.WriteLine(string(wmsgBytes))
|
||||
if err != nil {
|
||||
PanicQ(Fmt("Error writing msg to consensus wal. Error: %v \n\nMessage: %v", err, wmsg))
|
||||
}
|
||||
// TODO: only flush when necessary
|
||||
if err := wal.group.Flush(); err != nil {
|
||||
PanicQ(Fmt("Error flushing consensus wal buf to file. Error: %v \n", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (wal *WAL) writeHeight(height int) {
|
||||
wal.group.WriteLine(Fmt("#HEIGHT: %v", height))
|
||||
|
||||
// TODO: only flush when necessary
|
||||
if err := wal.group.Flush(); err != nil {
|
||||
PanicQ(Fmt("Error flushing consensus wal buf to file. Error: %v \n", err))
|
||||
}
|
||||
}
|
||||
|
@@ -1,78 +0,0 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
. "github.com/tendermint/go-common"
|
||||
)
|
||||
|
||||
var testTxt = `{"time":"2016-01-16T04:42:00.390Z","msg":[1,{"height":28219,"round":0,"step":"RoundStepPrevote"}]}
|
||||
{"time":"2016-01-16T04:42:00.390Z","msg":[2,{"msg":[20,{"ValidatorIndex":0,"Vote":{"height":28219,"round":0,"type":1,"block_hash":"67F9689F15BEC30BF311FB4C0C80C5E661AA44E0","block_parts_header":{"total":1,"hash":"DFFD4409A1E273ED61AC27CAF975F446020D5676"},"signature":"4CC6845A128E723A299B470CCBB2A158612AA51321447F6492F3DA57D135C27FCF4124B3B19446A248252BDA45B152819C76AAA5FD35E1C07091885CE6955E05"}}],"peer_key":""}]}
|
||||
{"time":"2016-01-16T04:42:00.392Z","msg":[1,{"height":28219,"round":0,"step":"RoundStepPrecommit"}]}
|
||||
{"time":"2016-01-16T04:42:00.392Z","msg":[2,{"msg":[20,{"ValidatorIndex":0,"Vote":{"height":28219,"round":0,"type":2,"block_hash":"67F9689F15BEC30BF311FB4C0C80C5E661AA44E0","block_parts_header":{"total":1,"hash":"DFFD4409A1E273ED61AC27CAF975F446020D5676"},"signature":"1B9924E010F47E0817695DFE462C531196E5A12632434DE12180BBA3EFDAD6B3960FDB9357AFF085EB61729A7D4A6AD8408555D7569C87D9028F280192FD4E05"}}],"peer_key":""}]}
|
||||
{"time":"2016-01-16T04:42:00.393Z","msg":[1,{"height":28219,"round":0,"step":"RoundStepCommit"}]}
|
||||
{"time":"2016-01-16T04:42:00.395Z","msg":[1,{"height":28220,"round":0,"step":"RoundStepNewHeight"}]}`
|
||||
|
||||
func TestSeek(t *testing.T) {
|
||||
f, err := ioutil.TempFile(os.TempDir(), "seek_test_")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
stat, _ := f.Stat()
|
||||
name := stat.Name()
|
||||
|
||||
_, err = f.WriteString(testTxt)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
wal, err := NewWAL(path.Join(os.TempDir(), name), config.GetBool("cswal_light"))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
keyWord := "Precommit"
|
||||
n, err := wal.SeekFromEnd(func(b []byte) bool {
|
||||
if strings.Contains(string(b), keyWord) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// confirm n
|
||||
spl := strings.Split(testTxt, "\n")
|
||||
var i int
|
||||
var s string
|
||||
for i, s = range spl {
|
||||
if strings.Contains(s, keyWord) {
|
||||
break
|
||||
}
|
||||
}
|
||||
// n is lines from the end.
|
||||
spl = spl[i:]
|
||||
if n != len(spl) {
|
||||
panic(Fmt("Wrong nLines. Got %d, expected %d", n, len(spl)))
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadAll(wal.fp)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// first char is a \n
|
||||
spl2 := strings.Split(strings.Trim(string(b), "\n"), "\n")
|
||||
for i, s := range spl {
|
||||
if s != spl2[i] {
|
||||
panic(Fmt("Mismatch. Got %s, expected %s", spl2[i], s))
|
||||
}
|
||||
}
|
||||
|
||||
}
|
134
glide.lock
generated
134
glide.lock
generated
@@ -1,36 +1,38 @@
|
||||
hash: d87a1fe0061d41c1e6ec78d405d54ae321e75f4bff22b38d19d3255bbd17f21e
|
||||
updated: 2016-09-10T18:02:24.023038691-04:00
|
||||
hash: dcaf3fb1290b0d7942c86f0644a7431ac313247936eab9515b1ade9ffe579848
|
||||
updated: 2017-01-13T00:30:55.237750829-05:00
|
||||
imports:
|
||||
- name: github.com/btcsuite/btcd
|
||||
version: 2ef82e7db35dc8c499fa9091d768dc99bbaff893
|
||||
version: 153dca5c1e4b5d1ea1523592495e5bedfa503391
|
||||
subpackages:
|
||||
- btcec
|
||||
- name: github.com/btcsuite/fastsha256
|
||||
version: 637e656429416087660c84436a2a035d69d54e2e
|
||||
- name: github.com/BurntSushi/toml
|
||||
version: 99064174e013895bbd9b025c31100bd1d9b590ca
|
||||
- name: github.com/ebuchman/fail-test
|
||||
version: c1eddaa09da2b4017351245b0d43234955276798
|
||||
- name: github.com/go-stack/stack
|
||||
version: 100eb0c0a9c5b306ca2fb4f165df21d80ada4b82
|
||||
- name: github.com/gogo/protobuf
|
||||
version: a11c89fbb0ad4acfa8abc4a4d5f7e27c477169b1
|
||||
version: f9114dace7bd920b32f943b3c73fafbcbab2bf31
|
||||
subpackages:
|
||||
- proto
|
||||
- name: github.com/golang/protobuf
|
||||
version: 1f49d83d9aa00e6ce4fc8258c71cc7786aec968a
|
||||
version: 8ee79997227bf9b34611aee7946ae64735e6fd93
|
||||
subpackages:
|
||||
- proto
|
||||
- name: github.com/golang/snappy
|
||||
version: d9eb7a3d35ec988b8585d4a0068e462c27d28380
|
||||
- name: github.com/gorilla/websocket
|
||||
version: a69d25be2fe2923a97c2af6849b2f52426f68fc0
|
||||
version: 17634340a83afe0cab595e40fbc63f6ffa1d8915
|
||||
- name: github.com/jmhodges/levigo
|
||||
version: c42d9e0ca023e2198120196f842701bb4c55d7b9
|
||||
- name: github.com/mattn/go-colorable
|
||||
version: ed8eb9e318d7a84ce5915b495b7d35e0cfe7b5a8
|
||||
version: d228849504861217f796da67fae4f6e347643f15
|
||||
- name: github.com/mattn/go-isatty
|
||||
version: 66b8e73f3f5cda9f96b69efd03dd3d7fc4a5cdb8
|
||||
version: 30a891c33c7cde7b02a981314b4228ec99380cca
|
||||
- name: github.com/spf13/pflag
|
||||
version: 6fd2ff4ff8dfcdf5556fbdc0ac0284408274b1a7
|
||||
version: 25f8b5b07aece3207895bf19f7ab517eb3b22a40
|
||||
- name: github.com/syndtr/goleveldb
|
||||
version: 6ae1797c0b42b9323fc27ff7dcf568df88f2f33d
|
||||
version: 23851d93a2292dcc56e71a18ec9e0624d84a0f65
|
||||
subpackages:
|
||||
- leveldb
|
||||
- leveldb/cache
|
||||
@@ -44,49 +46,8 @@ imports:
|
||||
- leveldb/storage
|
||||
- leveldb/table
|
||||
- leveldb/util
|
||||
- name: github.com/tendermint/ed25519
|
||||
version: 1f52c6f8b8a5c7908aff4497c186af344b428925
|
||||
subpackages:
|
||||
- edwards25519
|
||||
- extra25519
|
||||
- name: github.com/tendermint/flowcontrol
|
||||
version: 84d9671090430e8ec80e35b339907e0579b999eb
|
||||
- name: github.com/tendermint/go-clist
|
||||
version: 3baa390bbaf7634251c42ad69a8682e7e3990552
|
||||
- name: github.com/tendermint/go-common
|
||||
version: 47e06734f6ee488cc2e61550a38642025e1d4227
|
||||
subpackages:
|
||||
- test
|
||||
- name: github.com/tendermint/go-config
|
||||
version: e64b424499acd0eb9856b88e10c0dff41628c0d6
|
||||
- name: github.com/tendermint/go-crypto
|
||||
version: 4b11d62bdb324027ea01554e5767b71174680ba0
|
||||
- name: github.com/tendermint/go-db
|
||||
version: 31fdd21c7eaeed53e0ea7ca597fb1e960e2988a5
|
||||
- name: github.com/tendermint/go-events
|
||||
version: 48fa21511b259278b871a37b6951da2d5bef698d
|
||||
- name: github.com/tendermint/go-logger
|
||||
version: cefb3a45c0bf3c493a04e9bcd9b1540528be59f2
|
||||
- name: github.com/tendermint/go-merkle
|
||||
version: 05042c6ab9cad51d12e4cecf717ae68e3b1409a8
|
||||
- name: github.com/tendermint/go-p2p
|
||||
version: f508f3f20b5bb36f03d3bc83647b7a92425139d1
|
||||
subpackages:
|
||||
- upnp
|
||||
- name: github.com/tendermint/go-rpc
|
||||
version: 479510be0e80dd9e5d6b1f941adad168df0af85f
|
||||
subpackages:
|
||||
- client
|
||||
- server
|
||||
- types
|
||||
- name: github.com/tendermint/go-wire
|
||||
version: 3b0adbc86ed8425eaed98516165b6788d9f4de7a
|
||||
- name: github.com/tendermint/log15
|
||||
version: 9545b249b3aacafa97f79e0838b02b274adc6f5f
|
||||
subpackages:
|
||||
- term
|
||||
- name: github.com/tendermint/tmsp
|
||||
version: ead192adbbbf85ac581cf775b18ae70d59f86457
|
||||
- name: github.com/tendermint/abci
|
||||
version: 699d45bc678865b004b90213bf88a950f420973b
|
||||
subpackages:
|
||||
- client
|
||||
- example/counter
|
||||
@@ -94,8 +55,53 @@ imports:
|
||||
- example/nil
|
||||
- server
|
||||
- types
|
||||
- name: github.com/tendermint/ed25519
|
||||
version: 1f52c6f8b8a5c7908aff4497c186af344b428925
|
||||
subpackages:
|
||||
- edwards25519
|
||||
- extra25519
|
||||
- name: github.com/tendermint/go-autofile
|
||||
version: 0416e0aa9c68205aa44844096f9f151ada9d0405
|
||||
- name: github.com/tendermint/go-clist
|
||||
version: 3baa390bbaf7634251c42ad69a8682e7e3990552
|
||||
- name: github.com/tendermint/go-common
|
||||
version: e289af53b6bf6af28da129d9ef64389a4cf7987f
|
||||
subpackages:
|
||||
- test
|
||||
- name: github.com/tendermint/go-config
|
||||
version: e64b424499acd0eb9856b88e10c0dff41628c0d6
|
||||
- name: github.com/tendermint/go-crypto
|
||||
version: 4b11d62bdb324027ea01554e5767b71174680ba0
|
||||
- name: github.com/tendermint/go-db
|
||||
version: 72f6dacd22a686cdf7fcd60286503e3aceda77ba
|
||||
- name: github.com/tendermint/go-events
|
||||
version: fddee66d90305fccb6f6d84d16c34fa65ea5b7f6
|
||||
- name: github.com/tendermint/go-flowrate
|
||||
version: a20c98e61957faa93b4014fbd902f20ab9317a6a
|
||||
subpackages:
|
||||
- flowrate
|
||||
- name: github.com/tendermint/go-logger
|
||||
version: cefb3a45c0bf3c493a04e9bcd9b1540528be59f2
|
||||
- name: github.com/tendermint/go-merkle
|
||||
version: 7a86b4486f2cd84ac885c5bbc609fdee2905f5d1
|
||||
- name: github.com/tendermint/go-p2p
|
||||
version: 3d98f675f30dc4796546b8b890f895926152fa8d
|
||||
subpackages:
|
||||
- upnp
|
||||
- name: github.com/tendermint/go-rpc
|
||||
version: fcea0cda21f64889be00a0f4b6d13266b1a76ee7
|
||||
subpackages:
|
||||
- client
|
||||
- server
|
||||
- types
|
||||
- name: github.com/tendermint/go-wire
|
||||
version: 2f3b7aafe21c80b19b6ee3210ecb3e3d07c7a471
|
||||
- name: github.com/tendermint/log15
|
||||
version: ae0f3d6450da9eac7074b439c8e1c3cabf0d5ce6
|
||||
subpackages:
|
||||
- term
|
||||
- name: golang.org/x/crypto
|
||||
version: aa2481cbfe81d911eb62b642b7a6b5ec58bbea71
|
||||
version: 7c6cc321c680f03b9ef0764448e780704f486b51
|
||||
subpackages:
|
||||
- curve25519
|
||||
- nacl/box
|
||||
@@ -106,20 +112,28 @@ imports:
|
||||
- ripemd160
|
||||
- salsa20/salsa
|
||||
- name: golang.org/x/net
|
||||
version: cfe3c2a7525b50c3d707256e371c90938cfef98a
|
||||
version: 60c41d1de8da134c05b7b40154a9a82bf5b7edb9
|
||||
subpackages:
|
||||
- context
|
||||
- http2
|
||||
- http2/hpack
|
||||
- idna
|
||||
- internal/timeseries
|
||||
- lex/httplex
|
||||
- trace
|
||||
- name: golang.org/x/sys
|
||||
version: 30de6d19a3bd89a5f38ae4028e23aaa5582648af
|
||||
version: d75a52659825e75fff6158388dddc6a5b04f9ba5
|
||||
subpackages:
|
||||
- unix
|
||||
- name: golang.org/x/text
|
||||
version: 44f4f658a783b0cee41fe0a23b8fc91d9c120558
|
||||
subpackages:
|
||||
- secure/bidirule
|
||||
- transform
|
||||
- unicode/bidi
|
||||
- unicode/norm
|
||||
- name: google.golang.org/grpc
|
||||
version: 28707e14b1d2b2f5da81474dea2790d71e526987
|
||||
version: 50955793b0183f9de69bd78e2ec251cf20aab121
|
||||
subpackages:
|
||||
- codes
|
||||
- credentials
|
||||
@@ -128,5 +142,7 @@ imports:
|
||||
- metadata
|
||||
- naming
|
||||
- peer
|
||||
- stats
|
||||
- tap
|
||||
- transport
|
||||
testImports: []
|
||||
|
19
glide.yaml
19
glide.yaml
@@ -6,7 +6,8 @@ import:
|
||||
- package: github.com/gorilla/websocket
|
||||
- package: github.com/spf13/pflag
|
||||
- package: github.com/tendermint/ed25519
|
||||
- package: github.com/tendermint/flowcontrol
|
||||
- package: github.com/tendermint/go-flowrate
|
||||
- package: github.com/tendermint/go-autofile
|
||||
- package: github.com/tendermint/go-clist
|
||||
- package: github.com/tendermint/go-common
|
||||
- package: github.com/tendermint/go-config
|
||||
@@ -16,21 +17,13 @@ import:
|
||||
- package: github.com/tendermint/go-logger
|
||||
- package: github.com/tendermint/go-merkle
|
||||
- package: github.com/tendermint/go-p2p
|
||||
subpackages:
|
||||
- upnp
|
||||
- package: github.com/tendermint/go-rpc
|
||||
subpackages:
|
||||
- client
|
||||
- server
|
||||
- types
|
||||
- package: github.com/tendermint/go-wire
|
||||
- package: github.com/tendermint/log15
|
||||
- package: github.com/tendermint/tmsp
|
||||
subpackages:
|
||||
- client
|
||||
- example/dummy
|
||||
- example/nil
|
||||
- types
|
||||
- package: github.com/tendermint/abci
|
||||
- package: golang.org/x/crypto
|
||||
subpackages:
|
||||
- ripemd160
|
||||
- package: github.com/tendermint/go-flowrate
|
||||
subpackages:
|
||||
- flowrate
|
||||
|
@@ -7,12 +7,13 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
auto "github.com/tendermint/go-autofile"
|
||||
"github.com/tendermint/go-clist"
|
||||
. "github.com/tendermint/go-common"
|
||||
cfg "github.com/tendermint/go-config"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmsp "github.com/tendermint/tmsp/types"
|
||||
abci "github.com/tendermint/abci/types"
|
||||
)
|
||||
|
||||
/*
|
||||
@@ -39,7 +40,7 @@ Garbage collection of old elements from mempool.txs is handlde via
|
||||
the DetachPrev() call, which makes old elements not reachable by
|
||||
peer broadcastTxRoutine() automatically garbage collected.
|
||||
|
||||
TODO: Better handle tmsp client errors. (make it automatically handle connection errors)
|
||||
TODO: Better handle abci client errors. (make it automatically handle connection errors)
|
||||
|
||||
*/
|
||||
|
||||
@@ -59,8 +60,10 @@ type Mempool struct {
|
||||
|
||||
// Keep a cache of already-seen txs.
|
||||
// This reduces the pressure on the proxyApp.
|
||||
cacheMap map[string]struct{}
|
||||
cacheList *list.List // to remove oldest tx when cache gets too big
|
||||
cache *txCache
|
||||
|
||||
// A log of mempool txs
|
||||
wal *auto.AutoFile
|
||||
}
|
||||
|
||||
func NewMempool(config cfg.Config, proxyAppConn proxy.AppConnMempool) *Mempool {
|
||||
@@ -74,13 +77,30 @@ func NewMempool(config cfg.Config, proxyAppConn proxy.AppConnMempool) *Mempool {
|
||||
recheckCursor: nil,
|
||||
recheckEnd: nil,
|
||||
|
||||
cacheMap: make(map[string]struct{}, cacheSize),
|
||||
cacheList: list.New(),
|
||||
cache: newTxCache(cacheSize),
|
||||
}
|
||||
mempool.initWAL()
|
||||
proxyAppConn.SetResponseCallback(mempool.resCb)
|
||||
return mempool
|
||||
}
|
||||
|
||||
func (mem *Mempool) initWAL() {
|
||||
walDir := mem.config.GetString("mempool_wal_dir")
|
||||
if walDir != "" {
|
||||
err := EnsureDir(walDir, 0700)
|
||||
if err != nil {
|
||||
log.Error("Error ensuring Mempool wal dir", "error", err)
|
||||
PanicSanity(err)
|
||||
}
|
||||
af, err := auto.OpenAutoFile(walDir + "/wal")
|
||||
if err != nil {
|
||||
log.Error("Error opening Mempool wal file", "error", err)
|
||||
PanicSanity(err)
|
||||
}
|
||||
mem.wal = af
|
||||
}
|
||||
}
|
||||
|
||||
// consensus must be able to hold lock to safely update
|
||||
func (mem *Mempool) Lock() {
|
||||
mem.proxyMtx.Lock()
|
||||
@@ -100,8 +120,7 @@ func (mem *Mempool) Flush() {
|
||||
mem.proxyMtx.Lock()
|
||||
defer mem.proxyMtx.Unlock()
|
||||
|
||||
mem.cacheMap = make(map[string]struct{}, cacheSize)
|
||||
mem.cacheList.Init()
|
||||
mem.cache.Reset()
|
||||
|
||||
for e := mem.txs.Front(); e != nil; e = e.Next() {
|
||||
mem.txs.Remove(e)
|
||||
@@ -120,17 +139,17 @@ func (mem *Mempool) TxsFrontWait() *clist.CElement {
|
||||
// cb: A callback from the CheckTx command.
|
||||
// It gets called from another goroutine.
|
||||
// CONTRACT: Either cb will get called, or err returned.
|
||||
func (mem *Mempool) CheckTx(tx types.Tx, cb func(*tmsp.Response)) (err error) {
|
||||
func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) {
|
||||
mem.proxyMtx.Lock()
|
||||
defer mem.proxyMtx.Unlock()
|
||||
|
||||
// CACHE
|
||||
if _, exists := mem.cacheMap[string(tx)]; exists {
|
||||
if mem.cache.Exists(tx) {
|
||||
if cb != nil {
|
||||
cb(&tmsp.Response{
|
||||
Value: &tmsp.Response_CheckTx{
|
||||
&tmsp.ResponseCheckTx{
|
||||
Code: tmsp.CodeType_BadNonce, // TODO or duplicate tx
|
||||
cb(&abci.Response{
|
||||
Value: &abci.Response_CheckTx{
|
||||
&abci.ResponseCheckTx{
|
||||
Code: abci.CodeType_BadNonce, // TODO or duplicate tx
|
||||
Log: "Duplicate transaction (ignored)",
|
||||
},
|
||||
},
|
||||
@@ -138,18 +157,17 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*tmsp.Response)) (err error) {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if mem.cacheList.Len() >= cacheSize {
|
||||
popped := mem.cacheList.Front()
|
||||
poppedTx := popped.Value.(types.Tx)
|
||||
// NOTE: the tx may have already been removed from the map
|
||||
// but deleting a non-existant element is fine
|
||||
delete(mem.cacheMap, string(poppedTx))
|
||||
mem.cacheList.Remove(popped)
|
||||
}
|
||||
mem.cacheMap[string(tx)] = struct{}{}
|
||||
mem.cacheList.PushBack(tx)
|
||||
mem.cache.Push(tx)
|
||||
// END CACHE
|
||||
|
||||
// WAL
|
||||
if mem.wal != nil {
|
||||
// TODO: Notify administrators when WAL fails
|
||||
mem.wal.Write([]byte(tx))
|
||||
mem.wal.Write([]byte("\n"))
|
||||
}
|
||||
// END WAL
|
||||
|
||||
// NOTE: proxyAppConn may error if tx buffer is full
|
||||
if err = mem.proxyAppConn.Error(); err != nil {
|
||||
return err
|
||||
@@ -162,15 +180,8 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*tmsp.Response)) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mem *Mempool) removeTxFromCacheMap(tx []byte) {
|
||||
mem.proxyMtx.Lock()
|
||||
// NOTE tx not removed from cacheList
|
||||
delete(mem.cacheMap, string(tx))
|
||||
mem.proxyMtx.Unlock()
|
||||
}
|
||||
|
||||
// TMSP callback function
|
||||
func (mem *Mempool) resCb(req *tmsp.Request, res *tmsp.Response) {
|
||||
// ABCI callback function
|
||||
func (mem *Mempool) resCb(req *abci.Request, res *abci.Response) {
|
||||
if mem.recheckCursor == nil {
|
||||
mem.resCbNormal(req, res)
|
||||
} else {
|
||||
@@ -178,10 +189,10 @@ func (mem *Mempool) resCb(req *tmsp.Request, res *tmsp.Response) {
|
||||
}
|
||||
}
|
||||
|
||||
func (mem *Mempool) resCbNormal(req *tmsp.Request, res *tmsp.Response) {
|
||||
func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) {
|
||||
switch r := res.Value.(type) {
|
||||
case *tmsp.Response_CheckTx:
|
||||
if r.CheckTx.Code == tmsp.CodeType_OK {
|
||||
case *abci.Response_CheckTx:
|
||||
if r.CheckTx.Code == abci.CodeType_OK {
|
||||
mem.counter++
|
||||
memTx := &mempoolTx{
|
||||
counter: mem.counter,
|
||||
@@ -194,9 +205,7 @@ func (mem *Mempool) resCbNormal(req *tmsp.Request, res *tmsp.Response) {
|
||||
log.Info("Bad Transaction", "res", r)
|
||||
|
||||
// remove from cache (it might be good later)
|
||||
// note this is an async callback,
|
||||
// so we need to grab the lock in removeTxFromCacheMap
|
||||
mem.removeTxFromCacheMap(req.GetCheckTx().Tx)
|
||||
mem.cache.Remove(req.GetCheckTx().Tx)
|
||||
|
||||
// TODO: handle other retcodes
|
||||
}
|
||||
@@ -205,15 +214,15 @@ func (mem *Mempool) resCbNormal(req *tmsp.Request, res *tmsp.Response) {
|
||||
}
|
||||
}
|
||||
|
||||
func (mem *Mempool) resCbRecheck(req *tmsp.Request, res *tmsp.Response) {
|
||||
func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) {
|
||||
switch r := res.Value.(type) {
|
||||
case *tmsp.Response_CheckTx:
|
||||
case *abci.Response_CheckTx:
|
||||
memTx := mem.recheckCursor.Value.(*mempoolTx)
|
||||
if !bytes.Equal(req.GetCheckTx().Tx, memTx.tx) {
|
||||
PanicSanity(Fmt("Unexpected tx response from proxy during recheck\n"+
|
||||
"Expected %X, got %X", r.CheckTx.Data, memTx.tx))
|
||||
}
|
||||
if r.CheckTx.Code == tmsp.CodeType_OK {
|
||||
if r.CheckTx.Code == abci.CodeType_OK {
|
||||
// Good, nothing to do.
|
||||
} else {
|
||||
// Tx became invalidated due to newly committed block.
|
||||
@@ -221,7 +230,7 @@ func (mem *Mempool) resCbRecheck(req *tmsp.Request, res *tmsp.Response) {
|
||||
mem.recheckCursor.DetachPrev()
|
||||
|
||||
// remove from cache (it might be good later)
|
||||
mem.removeTxFromCacheMap(req.GetCheckTx().Tx)
|
||||
mem.cache.Remove(req.GetCheckTx().Tx)
|
||||
}
|
||||
if mem.recheckCursor == mem.recheckEnd {
|
||||
mem.recheckCursor = nil
|
||||
@@ -239,7 +248,7 @@ func (mem *Mempool) resCbRecheck(req *tmsp.Request, res *tmsp.Response) {
|
||||
}
|
||||
|
||||
// Get the valid transactions remaining
|
||||
// If maxTxs is 0, there is no cap.
|
||||
// If maxTxs is -1, there is no cap on returned transactions.
|
||||
func (mem *Mempool) Reap(maxTxs int) []types.Tx {
|
||||
mem.proxyMtx.Lock()
|
||||
defer mem.proxyMtx.Unlock()
|
||||
@@ -273,8 +282,7 @@ func (mem *Mempool) collectTxs(maxTxs int) []types.Tx {
|
||||
// NOTE: this should be called *after* block is committed by consensus.
|
||||
// NOTE: unsafe; Lock/Unlock must be managed by caller
|
||||
func (mem *Mempool) Update(height int, txs []types.Tx) {
|
||||
// mem.proxyMtx.Lock()
|
||||
// defer mem.proxyMtx.Unlock()
|
||||
// TODO: check err ?
|
||||
mem.proxyAppConn.FlushSync() // To flush async resCb calls e.g. from CheckTx
|
||||
|
||||
// First, create a lookup map of txns in new txs.
|
||||
@@ -348,3 +356,62 @@ type mempoolTx struct {
|
||||
func (memTx *mempoolTx) Height() int {
|
||||
return int(atomic.LoadInt64(&memTx.height))
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
|
||||
type txCache struct {
|
||||
mtx sync.Mutex
|
||||
size int
|
||||
map_ map[string]struct{}
|
||||
list *list.List // to remove oldest tx when cache gets too big
|
||||
}
|
||||
|
||||
func newTxCache(cacheSize int) *txCache {
|
||||
return &txCache{
|
||||
size: cacheSize,
|
||||
map_: make(map[string]struct{}, cacheSize),
|
||||
list: list.New(),
|
||||
}
|
||||
}
|
||||
|
||||
func (cache *txCache) Reset() {
|
||||
cache.mtx.Lock()
|
||||
cache.map_ = make(map[string]struct{}, cacheSize)
|
||||
cache.list.Init()
|
||||
cache.mtx.Unlock()
|
||||
}
|
||||
|
||||
func (cache *txCache) Exists(tx types.Tx) bool {
|
||||
cache.mtx.Lock()
|
||||
_, exists := cache.map_[string(tx)]
|
||||
cache.mtx.Unlock()
|
||||
return exists
|
||||
}
|
||||
|
||||
// Returns false if tx is in cache.
|
||||
func (cache *txCache) Push(tx types.Tx) bool {
|
||||
cache.mtx.Lock()
|
||||
defer cache.mtx.Unlock()
|
||||
|
||||
if _, exists := cache.map_[string(tx)]; exists {
|
||||
return false
|
||||
}
|
||||
|
||||
if cache.list.Len() >= cache.size {
|
||||
popped := cache.list.Front()
|
||||
poppedTx := popped.Value.(types.Tx)
|
||||
// NOTE: the tx may have already been removed from the map
|
||||
// but deleting a non-existant element is fine
|
||||
delete(cache.map_, string(poppedTx))
|
||||
cache.list.Remove(popped)
|
||||
}
|
||||
cache.map_[string(tx)] = struct{}{}
|
||||
cache.list.PushBack(tx)
|
||||
return true
|
||||
}
|
||||
|
||||
func (cache *txCache) Remove(tx types.Tx) {
|
||||
cache.mtx.Lock()
|
||||
delete(cache.map_, string(tx))
|
||||
cache.mtx.Unlock()
|
||||
}
|
||||
|
@@ -2,13 +2,12 @@ package mempool
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/tendermint/tendermint/config/tendermint_test"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmspcli "github.com/tendermint/tmsp/client"
|
||||
"github.com/tendermint/tmsp/example/counter"
|
||||
"github.com/tendermint/abci/example/counter"
|
||||
)
|
||||
|
||||
func TestSerialReap(t *testing.T) {
|
||||
@@ -16,13 +15,13 @@ func TestSerialReap(t *testing.T) {
|
||||
|
||||
app := counter.NewCounterApplication(true)
|
||||
app.SetOption("serial", "on")
|
||||
mtx := new(sync.Mutex)
|
||||
appConnMem := tmspcli.NewLocalClient(mtx, app)
|
||||
appConnCon := tmspcli.NewLocalClient(mtx, app)
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
appConnMem, _ := cc.NewABCIClient()
|
||||
appConnCon, _ := cc.NewABCIClient()
|
||||
mempool := NewMempool(config, appConnMem)
|
||||
|
||||
appendTxsRange := func(start, end int) {
|
||||
// Append some txs.
|
||||
deliverTxsRange := func(start, end int) {
|
||||
// Deliver some txs.
|
||||
for i := start; i < end; i++ {
|
||||
|
||||
// This will succeed
|
||||
@@ -62,17 +61,17 @@ func TestSerialReap(t *testing.T) {
|
||||
}
|
||||
|
||||
commitRange := func(start, end int) {
|
||||
// Append some txs.
|
||||
// Deliver some txs.
|
||||
for i := start; i < end; i++ {
|
||||
txBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(txBytes, uint64(i))
|
||||
res := appConnCon.AppendTx(txBytes)
|
||||
res := appConnCon.DeliverTxSync(txBytes)
|
||||
if !res.IsOK() {
|
||||
t.Errorf("Error committing tx. Code:%v result:%X log:%v",
|
||||
res.Code, res.Data, res.Log)
|
||||
}
|
||||
}
|
||||
res := appConnCon.Commit()
|
||||
res := appConnCon.CommitSync()
|
||||
if len(res.Data) != 8 {
|
||||
t.Errorf("Error committing. Hash:%X log:%v", res.Data, res.Log)
|
||||
}
|
||||
@@ -80,8 +79,8 @@ func TestSerialReap(t *testing.T) {
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
// Append some txs.
|
||||
appendTxsRange(0, 100)
|
||||
// Deliver some txs.
|
||||
deliverTxsRange(0, 100)
|
||||
|
||||
// Reap the txs.
|
||||
reapCheck(100)
|
||||
@@ -89,9 +88,9 @@ func TestSerialReap(t *testing.T) {
|
||||
// Reap again. We should get the same amount
|
||||
reapCheck(100)
|
||||
|
||||
// Append 0 to 999, we should reap 900 new txs
|
||||
// Deliver 0 to 999, we should reap 900 new txs
|
||||
// because 100 were already counted.
|
||||
appendTxsRange(0, 1000)
|
||||
deliverTxsRange(0, 1000)
|
||||
|
||||
// Reap the txs.
|
||||
reapCheck(1000)
|
||||
@@ -106,8 +105,8 @@ func TestSerialReap(t *testing.T) {
|
||||
// We should have 500 left.
|
||||
reapCheck(500)
|
||||
|
||||
// Append 100 invalid txs and 100 valid txs
|
||||
appendTxsRange(900, 1100)
|
||||
// Deliver 100 invalid txs and 100 valid txs
|
||||
deliverTxsRange(900, 1100)
|
||||
|
||||
// We should have 600 now.
|
||||
reapCheck(600)
|
||||
|
@@ -9,11 +9,10 @@ import (
|
||||
"github.com/tendermint/go-clist"
|
||||
. "github.com/tendermint/go-common"
|
||||
cfg "github.com/tendermint/go-config"
|
||||
"github.com/tendermint/go-events"
|
||||
"github.com/tendermint/go-p2p"
|
||||
"github.com/tendermint/go-wire"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmsp "github.com/tendermint/tmsp/types"
|
||||
abci "github.com/tendermint/abci/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -28,7 +27,7 @@ type MempoolReactor struct {
|
||||
p2p.BaseReactor
|
||||
config cfg.Config
|
||||
Mempool *Mempool
|
||||
evsw *events.EventSwitch
|
||||
evsw types.EventSwitch
|
||||
}
|
||||
|
||||
func NewMempoolReactor(config cfg.Config, mempool *Mempool) *MempoolReactor {
|
||||
@@ -67,7 +66,7 @@ func (memR *MempoolReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
|
||||
log.Warn("Error decoding message", "error", err)
|
||||
return
|
||||
}
|
||||
log.Info("Receive", "src", src, "chId", chID, "msg", msg)
|
||||
log.Debug("Receive", "src", src, "chId", chID, "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *TxMessage:
|
||||
@@ -86,7 +85,7 @@ func (memR *MempoolReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
|
||||
}
|
||||
|
||||
// Just an alias for CheckTx since broadcasting happens in peer routines
|
||||
func (memR *MempoolReactor) BroadcastTx(tx types.Tx, cb func(*tmsp.Response)) error {
|
||||
func (memR *MempoolReactor) BroadcastTx(tx types.Tx, cb func(*abci.Response)) error {
|
||||
return memR.Mempool.CheckTx(tx, cb)
|
||||
}
|
||||
|
||||
@@ -110,7 +109,7 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer Peer) {
|
||||
|
||||
var next *clist.CElement
|
||||
for {
|
||||
if !memR.IsRunning() {
|
||||
if !memR.IsRunning() || !peer.IsRunning() {
|
||||
return // Quit!
|
||||
}
|
||||
if next == nil {
|
||||
@@ -143,7 +142,7 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer Peer) {
|
||||
}
|
||||
|
||||
// implements events.Eventable
|
||||
func (memR *MempoolReactor) SetEventSwitch(evsw *events.EventSwitch) {
|
||||
func (memR *MempoolReactor) SetEventSwitch(evsw types.EventSwitch) {
|
||||
memR.evsw = evsw
|
||||
}
|
||||
|
||||
|
107
node/node.go
107
node/node.go
@@ -12,7 +12,6 @@ import (
|
||||
cfg "github.com/tendermint/go-config"
|
||||
"github.com/tendermint/go-crypto"
|
||||
dbm "github.com/tendermint/go-db"
|
||||
"github.com/tendermint/go-events"
|
||||
"github.com/tendermint/go-p2p"
|
||||
"github.com/tendermint/go-rpc"
|
||||
"github.com/tendermint/go-rpc/server"
|
||||
@@ -22,6 +21,7 @@ import (
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
rpccore "github.com/tendermint/tendermint/rpc/core"
|
||||
grpccore "github.com/tendermint/tendermint/rpc/grpc"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
@@ -32,7 +32,7 @@ import _ "net/http/pprof"
|
||||
type Node struct {
|
||||
config cfg.Config
|
||||
sw *p2p.Switch
|
||||
evsw *events.EventSwitch
|
||||
evsw types.EventSwitch
|
||||
blockStore *bc.BlockStore
|
||||
bcReactor *bc.BlockchainReactor
|
||||
mempoolReactor *mempl.MempoolReactor
|
||||
@@ -53,8 +53,6 @@ func NewNodeDefault(config cfg.Config) *Node {
|
||||
|
||||
func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreator proxy.ClientCreator) *Node {
|
||||
|
||||
EnsureDir(config.GetString("db_dir"), 0700) // incase we use memdb, cswal still gets written here
|
||||
|
||||
// Get BlockStore
|
||||
blockStoreDB := dbm.NewDB("blockstore", config.GetString("db_backend"), config.GetString("db_dir"))
|
||||
blockStore := bc.NewBlockStore(blockStoreDB)
|
||||
@@ -63,11 +61,10 @@ func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreato
|
||||
stateDB := dbm.NewDB("state", config.GetString("db_backend"), config.GetString("db_dir"))
|
||||
|
||||
// Get State
|
||||
state := getState(config, stateDB)
|
||||
state := sm.GetState(config, stateDB)
|
||||
|
||||
// Create the proxyApp, which houses three connections:
|
||||
// query, consensus, and mempool
|
||||
proxyApp := proxy.NewAppConns(config, clientCreator, state, blockStore)
|
||||
// Create the proxyApp, which manages connections (consensus, mempool, query)
|
||||
proxyApp := proxy.NewAppConns(config, clientCreator, sm.NewHandshaker(config, state, blockStore))
|
||||
if _, err := proxyApp.Start(); err != nil {
|
||||
Exit(Fmt("Error starting proxy app connections: %v", err))
|
||||
}
|
||||
@@ -80,7 +77,7 @@ func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreato
|
||||
privKey := crypto.GenPrivKeyEd25519()
|
||||
|
||||
// Make event switch
|
||||
eventSwitch := events.NewEventSwitch()
|
||||
eventSwitch := types.NewEventSwitch()
|
||||
_, err := eventSwitch.Start()
|
||||
if err != nil {
|
||||
Exit(Fmt("Failed to start switch: %v", err))
|
||||
@@ -97,7 +94,7 @@ func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreato
|
||||
}
|
||||
|
||||
// Make BlockchainReactor
|
||||
bcReactor := bc.NewBlockchainReactor(state.Copy(), proxyApp.Consensus(), blockStore, fastSync)
|
||||
bcReactor := bc.NewBlockchainReactor(config, state.Copy(), proxyApp.Consensus(), blockStore, fastSync)
|
||||
|
||||
// Make MempoolReactor
|
||||
mempool := mempl.NewMempool(config, proxyApp.Mempool())
|
||||
@@ -105,16 +102,10 @@ func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreato
|
||||
|
||||
// Make ConsensusReactor
|
||||
consensusState := consensus.NewConsensusState(config, state.Copy(), proxyApp.Consensus(), blockStore, mempool)
|
||||
consensusReactor := consensus.NewConsensusReactor(consensusState, blockStore, fastSync)
|
||||
if privValidator != nil {
|
||||
consensusReactor.SetPrivValidator(privValidator)
|
||||
}
|
||||
|
||||
// deterministic accountability
|
||||
err = consensusState.OpenWAL(config.GetString("cswal"))
|
||||
if err != nil {
|
||||
log.Error("Failed to open cswal", "error", err.Error())
|
||||
consensusState.SetPrivValidator(privValidator)
|
||||
}
|
||||
consensusReactor := consensus.NewConsensusReactor(consensusState, fastSync)
|
||||
|
||||
// Make p2p network switch
|
||||
sw := p2p.NewSwitch(config.GetConfig("p2p"))
|
||||
@@ -122,7 +113,16 @@ func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreato
|
||||
sw.AddReactor("BLOCKCHAIN", bcReactor)
|
||||
sw.AddReactor("CONSENSUS", consensusReactor)
|
||||
|
||||
// filter peers by addr or pubkey with a tmsp query.
|
||||
// Optionally, start the pex reactor
|
||||
// TODO: this is a dev feature, it needs some love
|
||||
if config.GetBool("pex_reactor") {
|
||||
addrBook := p2p.NewAddrBook(config.GetString("addrbook_file"), config.GetBool("addrbook_strict"))
|
||||
addrBook.Start()
|
||||
pexReactor := p2p.NewPEXReactor(addrBook)
|
||||
sw.AddReactor("PEX", pexReactor)
|
||||
}
|
||||
|
||||
// filter peers by addr or pubkey with a abci query.
|
||||
// if the query return code is OK, add peer
|
||||
// XXX: query format subject to change
|
||||
if config.GetBool("filter_peers") {
|
||||
@@ -187,7 +187,7 @@ func (n *Node) Stop() {
|
||||
}
|
||||
|
||||
// Add the event switch to reactors, mempool, etc.
|
||||
func SetEventSwitch(evsw *events.EventSwitch, eventables ...events.Eventable) {
|
||||
func SetEventSwitch(evsw types.EventSwitch, eventables ...types.Eventable) {
|
||||
for _, e := range eventables {
|
||||
e.SetEventSwitch(evsw)
|
||||
}
|
||||
@@ -207,10 +207,9 @@ func (n *Node) StartRPC() ([]net.Listener, error) {
|
||||
rpccore.SetEventSwitch(n.evsw)
|
||||
rpccore.SetBlockStore(n.blockStore)
|
||||
rpccore.SetConsensusState(n.consensusState)
|
||||
rpccore.SetConsensusReactor(n.consensusReactor)
|
||||
rpccore.SetMempoolReactor(n.mempoolReactor)
|
||||
rpccore.SetMempool(n.mempoolReactor.Mempool)
|
||||
rpccore.SetSwitch(n.sw)
|
||||
rpccore.SetPrivValidator(n.privValidator)
|
||||
rpccore.SetPubKey(n.privValidator.PubKey)
|
||||
rpccore.SetGenesisDoc(n.genesisDoc)
|
||||
rpccore.SetProxyAppQuery(n.proxyApp.Query())
|
||||
|
||||
@@ -229,6 +228,17 @@ func (n *Node) StartRPC() ([]net.Listener, error) {
|
||||
}
|
||||
listeners[i] = listener
|
||||
}
|
||||
|
||||
// we expose a simplified api over grpc for convenience to app devs
|
||||
grpcListenAddr := n.config.GetString("grpc_laddr")
|
||||
if grpcListenAddr != "" {
|
||||
listener, err := grpccore.StartGRPCServer(grpcListenAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listeners = append(listeners, listener)
|
||||
}
|
||||
|
||||
return listeners, nil
|
||||
}
|
||||
|
||||
@@ -252,7 +262,7 @@ func (n *Node) MempoolReactor() *mempl.MempoolReactor {
|
||||
return n.mempoolReactor
|
||||
}
|
||||
|
||||
func (n *Node) EventSwitch() *events.EventSwitch {
|
||||
func (n *Node) EventSwitch() types.EventSwitch {
|
||||
return n.evsw
|
||||
}
|
||||
|
||||
@@ -261,6 +271,14 @@ func (n *Node) PrivValidator() *types.PrivValidator {
|
||||
return n.privValidator
|
||||
}
|
||||
|
||||
func (n *Node) GenesisDoc() *types.GenesisDoc {
|
||||
return n.genesisDoc
|
||||
}
|
||||
|
||||
func (n *Node) ProxyApp() proxy.AppConns {
|
||||
return n.proxyApp
|
||||
}
|
||||
|
||||
func makeNodeInfo(config cfg.Config, sw *p2p.Switch, privKey crypto.PrivKeyEd25519) *p2p.NodeInfo {
|
||||
|
||||
nodeInfo := &p2p.NodeInfo{
|
||||
@@ -298,22 +316,11 @@ func makeNodeInfo(config cfg.Config, sw *p2p.Switch, privKey crypto.PrivKeyEd255
|
||||
return nodeInfo
|
||||
}
|
||||
|
||||
// Load the most recent state from "state" db,
|
||||
// or create a new one (and save) from genesis.
|
||||
func getState(config cfg.Config, stateDB dbm.DB) *sm.State {
|
||||
state := sm.LoadState(stateDB)
|
||||
if state == nil {
|
||||
state = sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
|
||||
state.Save()
|
||||
}
|
||||
return state
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// Users wishing to:
|
||||
// * use an external signer for their validators
|
||||
// * supply an in-proc tmsp app
|
||||
// * supply an in-proc abci app
|
||||
// should fork tendermint/tendermint and implement RunNode to
|
||||
// call NewNode with their custom priv validator and/or custom
|
||||
// proxy.ClientCreator interface
|
||||
@@ -393,17 +400,19 @@ func newConsensusState(config cfg.Config) *consensus.ConsensusState {
|
||||
stateDB := dbm.NewDB("state", config.GetString("db_backend"), config.GetString("db_dir"))
|
||||
state := sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
|
||||
|
||||
// Create two proxyAppConn connections,
|
||||
// one for the consensus and one for the mempool.
|
||||
proxyApp := proxy.NewAppConns(config, proxy.DefaultClientCreator(config), state, blockStore)
|
||||
// Create proxyAppConn connection (consensus, mempool, query)
|
||||
proxyApp := proxy.NewAppConns(config, proxy.DefaultClientCreator(config), sm.NewHandshaker(config, state, blockStore))
|
||||
_, err := proxyApp.Start()
|
||||
if err != nil {
|
||||
Exit(Fmt("Error starting proxy app conns: %v", err))
|
||||
}
|
||||
|
||||
// add the chainid to the global config
|
||||
config.Set("chain_id", state.ChainID)
|
||||
|
||||
// Make event switch
|
||||
eventSwitch := events.NewEventSwitch()
|
||||
_, err := eventSwitch.Start()
|
||||
if err != nil {
|
||||
eventSwitch := types.NewEventSwitch()
|
||||
if _, err := eventSwitch.Start(); err != nil {
|
||||
Exit(Fmt("Failed to start event switch: %v", err))
|
||||
}
|
||||
|
||||
@@ -414,12 +423,7 @@ func newConsensusState(config cfg.Config) *consensus.ConsensusState {
|
||||
return consensusState
|
||||
}
|
||||
|
||||
func RunReplayConsole(config cfg.Config) {
|
||||
walFile := config.GetString("cswal")
|
||||
if walFile == "" {
|
||||
Exit("cswal file name not set in tendermint config")
|
||||
}
|
||||
|
||||
func RunReplayConsole(config cfg.Config, walFile string) {
|
||||
consensusState := newConsensusState(config)
|
||||
|
||||
if err := consensusState.ReplayConsole(walFile); err != nil {
|
||||
@@ -427,12 +431,7 @@ func RunReplayConsole(config cfg.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
func RunReplay(config cfg.Config) {
|
||||
walFile := config.GetString("cswal")
|
||||
if walFile == "" {
|
||||
Exit("cswal file name not set in tendermint config")
|
||||
}
|
||||
|
||||
func RunReplay(config cfg.Config, walFile string) {
|
||||
consensusState := newConsensusState(config)
|
||||
|
||||
if err := consensusState.ReplayMessages(walFile); err != nil {
|
||||
|
@@ -1,32 +1,32 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
tmspcli "github.com/tendermint/tmsp/client"
|
||||
"github.com/tendermint/tmsp/types"
|
||||
abcicli "github.com/tendermint/abci/client"
|
||||
"github.com/tendermint/abci/types"
|
||||
)
|
||||
|
||||
//----------------------------------------------------------------------------------------
|
||||
// Enforce which tmsp msgs can be sent on a connection at the type level
|
||||
// Enforce which abci msgs can be sent on a connection at the type level
|
||||
|
||||
type AppConnConsensus interface {
|
||||
SetResponseCallback(tmspcli.Callback)
|
||||
SetResponseCallback(abcicli.Callback)
|
||||
Error() error
|
||||
|
||||
InitChainSync(validators []*types.Validator) (err error)
|
||||
|
||||
BeginBlockSync(height uint64) (err error)
|
||||
AppendTxAsync(tx []byte) *tmspcli.ReqRes
|
||||
EndBlockSync(height uint64) (changedValidators []*types.Validator, err error)
|
||||
BeginBlockSync(hash []byte, header *types.Header) (err error)
|
||||
DeliverTxAsync(tx []byte) *abcicli.ReqRes
|
||||
EndBlockSync(height uint64) (types.ResponseEndBlock, error)
|
||||
CommitSync() (res types.Result)
|
||||
}
|
||||
|
||||
type AppConnMempool interface {
|
||||
SetResponseCallback(tmspcli.Callback)
|
||||
SetResponseCallback(abcicli.Callback)
|
||||
Error() error
|
||||
|
||||
CheckTxAsync(tx []byte) *tmspcli.ReqRes
|
||||
CheckTxAsync(tx []byte) *abcicli.ReqRes
|
||||
|
||||
FlushAsync() *tmspcli.ReqRes
|
||||
FlushAsync() *abcicli.ReqRes
|
||||
FlushSync() error
|
||||
}
|
||||
|
||||
@@ -34,42 +34,46 @@ type AppConnQuery interface {
|
||||
Error() error
|
||||
|
||||
EchoSync(string) (res types.Result)
|
||||
InfoSync() (res types.Result)
|
||||
InfoSync() (types.ResponseInfo, error)
|
||||
QuerySync(tx []byte) (res types.Result)
|
||||
|
||||
// SetOptionSync(key string, value string) (res types.Result)
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------------------
|
||||
// Implements AppConnConsensus (subset of tmspcli.Client)
|
||||
// Implements AppConnConsensus (subset of abcicli.Client)
|
||||
|
||||
type appConnConsensus struct {
|
||||
appConn tmspcli.Client
|
||||
appConn abcicli.Client
|
||||
}
|
||||
|
||||
func NewAppConnConsensus(appConn tmspcli.Client) *appConnConsensus {
|
||||
func NewAppConnConsensus(appConn abcicli.Client) *appConnConsensus {
|
||||
return &appConnConsensus{
|
||||
appConn: appConn,
|
||||
}
|
||||
}
|
||||
|
||||
func (app *appConnConsensus) SetResponseCallback(cb tmspcli.Callback) {
|
||||
func (app *appConnConsensus) SetResponseCallback(cb abcicli.Callback) {
|
||||
app.appConn.SetResponseCallback(cb)
|
||||
}
|
||||
|
||||
func (app *appConnConsensus) Error() error {
|
||||
return app.appConn.Error()
|
||||
}
|
||||
|
||||
func (app *appConnConsensus) InitChainSync(validators []*types.Validator) (err error) {
|
||||
return app.appConn.InitChainSync(validators)
|
||||
}
|
||||
func (app *appConnConsensus) BeginBlockSync(height uint64) (err error) {
|
||||
return app.appConn.BeginBlockSync(height)
|
||||
}
|
||||
func (app *appConnConsensus) AppendTxAsync(tx []byte) *tmspcli.ReqRes {
|
||||
return app.appConn.AppendTxAsync(tx)
|
||||
|
||||
func (app *appConnConsensus) BeginBlockSync(hash []byte, header *types.Header) (err error) {
|
||||
return app.appConn.BeginBlockSync(hash, header)
|
||||
}
|
||||
|
||||
func (app *appConnConsensus) EndBlockSync(height uint64) (changedValidators []*types.Validator, err error) {
|
||||
func (app *appConnConsensus) DeliverTxAsync(tx []byte) *abcicli.ReqRes {
|
||||
return app.appConn.DeliverTxAsync(tx)
|
||||
}
|
||||
|
||||
func (app *appConnConsensus) EndBlockSync(height uint64) (types.ResponseEndBlock, error) {
|
||||
return app.appConn.EndBlockSync(height)
|
||||
}
|
||||
|
||||
@@ -78,19 +82,19 @@ func (app *appConnConsensus) CommitSync() (res types.Result) {
|
||||
}
|
||||
|
||||
//------------------------------------------------
|
||||
// Implements AppConnMempool (subset of tmspcli.Client)
|
||||
// Implements AppConnMempool (subset of abcicli.Client)
|
||||
|
||||
type appConnMempool struct {
|
||||
appConn tmspcli.Client
|
||||
appConn abcicli.Client
|
||||
}
|
||||
|
||||
func NewAppConnMempool(appConn tmspcli.Client) *appConnMempool {
|
||||
func NewAppConnMempool(appConn abcicli.Client) *appConnMempool {
|
||||
return &appConnMempool{
|
||||
appConn: appConn,
|
||||
}
|
||||
}
|
||||
|
||||
func (app *appConnMempool) SetResponseCallback(cb tmspcli.Callback) {
|
||||
func (app *appConnMempool) SetResponseCallback(cb abcicli.Callback) {
|
||||
app.appConn.SetResponseCallback(cb)
|
||||
}
|
||||
|
||||
@@ -98,7 +102,7 @@ func (app *appConnMempool) Error() error {
|
||||
return app.appConn.Error()
|
||||
}
|
||||
|
||||
func (app *appConnMempool) FlushAsync() *tmspcli.ReqRes {
|
||||
func (app *appConnMempool) FlushAsync() *abcicli.ReqRes {
|
||||
return app.appConn.FlushAsync()
|
||||
}
|
||||
|
||||
@@ -106,18 +110,18 @@ func (app *appConnMempool) FlushSync() error {
|
||||
return app.appConn.FlushSync()
|
||||
}
|
||||
|
||||
func (app *appConnMempool) CheckTxAsync(tx []byte) *tmspcli.ReqRes {
|
||||
func (app *appConnMempool) CheckTxAsync(tx []byte) *abcicli.ReqRes {
|
||||
return app.appConn.CheckTxAsync(tx)
|
||||
}
|
||||
|
||||
//------------------------------------------------
|
||||
// Implements AppConnQuery (subset of tmspcli.Client)
|
||||
// Implements AppConnQuery (subset of abcicli.Client)
|
||||
|
||||
type appConnQuery struct {
|
||||
appConn tmspcli.Client
|
||||
appConn abcicli.Client
|
||||
}
|
||||
|
||||
func NewAppConnQuery(appConn tmspcli.Client) *appConnQuery {
|
||||
func NewAppConnQuery(appConn abcicli.Client) *appConnQuery {
|
||||
return &appConnQuery{
|
||||
appConn: appConn,
|
||||
}
|
||||
@@ -131,7 +135,7 @@ func (app *appConnQuery) EchoSync(msg string) (res types.Result) {
|
||||
return app.appConn.EchoSync(msg)
|
||||
}
|
||||
|
||||
func (app *appConnQuery) InfoSync() (res types.Result) {
|
||||
func (app *appConnQuery) InfoSync() (types.ResponseInfo, error) {
|
||||
return app.appConn.InfoSync()
|
||||
}
|
||||
|
||||
|
@@ -5,29 +5,29 @@ import (
|
||||
"testing"
|
||||
|
||||
. "github.com/tendermint/go-common"
|
||||
tmspcli "github.com/tendermint/tmsp/client"
|
||||
"github.com/tendermint/tmsp/example/dummy"
|
||||
"github.com/tendermint/tmsp/server"
|
||||
"github.com/tendermint/tmsp/types"
|
||||
abcicli "github.com/tendermint/abci/client"
|
||||
"github.com/tendermint/abci/example/dummy"
|
||||
"github.com/tendermint/abci/server"
|
||||
"github.com/tendermint/abci/types"
|
||||
)
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
type AppConnTest interface {
|
||||
EchoAsync(string) *tmspcli.ReqRes
|
||||
EchoAsync(string) *abcicli.ReqRes
|
||||
FlushSync() error
|
||||
InfoSync() (res types.Result)
|
||||
InfoSync() (types.ResponseInfo, error)
|
||||
}
|
||||
|
||||
type appConnTest struct {
|
||||
appConn tmspcli.Client
|
||||
appConn abcicli.Client
|
||||
}
|
||||
|
||||
func NewAppConnTest(appConn tmspcli.Client) AppConnTest {
|
||||
func NewAppConnTest(appConn abcicli.Client) AppConnTest {
|
||||
return &appConnTest{appConn}
|
||||
}
|
||||
|
||||
func (app *appConnTest) EchoAsync(msg string) *tmspcli.ReqRes {
|
||||
func (app *appConnTest) EchoAsync(msg string) *abcicli.ReqRes {
|
||||
return app.appConn.EchoAsync(msg)
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ func (app *appConnTest) FlushSync() error {
|
||||
return app.appConn.FlushSync()
|
||||
}
|
||||
|
||||
func (app *appConnTest) InfoSync() types.Result {
|
||||
func (app *appConnTest) InfoSync() (types.ResponseInfo, error) {
|
||||
return app.appConn.InfoSync()
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ func TestEcho(t *testing.T) {
|
||||
}
|
||||
defer s.Stop()
|
||||
// Start client
|
||||
cli, err := clientCreator.NewTMSPClient()
|
||||
cli, err := clientCreator.NewABCIClient()
|
||||
if err != nil {
|
||||
Exit(err.Error())
|
||||
}
|
||||
@@ -78,7 +78,7 @@ func BenchmarkEcho(b *testing.B) {
|
||||
}
|
||||
defer s.Stop()
|
||||
// Start client
|
||||
cli, err := clientCreator.NewTMSPClient()
|
||||
cli, err := clientCreator.NewABCIClient()
|
||||
if err != nil {
|
||||
Exit(err.Error())
|
||||
}
|
||||
@@ -107,18 +107,18 @@ func TestInfo(t *testing.T) {
|
||||
}
|
||||
defer s.Stop()
|
||||
// Start client
|
||||
cli, err := clientCreator.NewTMSPClient()
|
||||
cli, err := clientCreator.NewABCIClient()
|
||||
if err != nil {
|
||||
Exit(err.Error())
|
||||
}
|
||||
proxy := NewAppConnTest(cli)
|
||||
t.Log("Connected")
|
||||
|
||||
res := proxy.InfoSync()
|
||||
if res.IsErr() {
|
||||
resInfo, err := proxy.InfoSync()
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if string(res.Data) != "size:0" {
|
||||
t.Error("Expected ResponseInfo with one element 'size:0' but got something else")
|
||||
if string(resInfo.Data) != "{\"size\":0}" {
|
||||
t.Error("Expected ResponseInfo with one element '{\"size\":0}' but got something else")
|
||||
}
|
||||
}
|
||||
|
@@ -5,15 +5,15 @@ import (
|
||||
"sync"
|
||||
|
||||
cfg "github.com/tendermint/go-config"
|
||||
tmspcli "github.com/tendermint/tmsp/client"
|
||||
"github.com/tendermint/tmsp/example/dummy"
|
||||
nilapp "github.com/tendermint/tmsp/example/nil"
|
||||
"github.com/tendermint/tmsp/types"
|
||||
abcicli "github.com/tendermint/abci/client"
|
||||
"github.com/tendermint/abci/example/dummy"
|
||||
nilapp "github.com/tendermint/abci/example/nil"
|
||||
"github.com/tendermint/abci/types"
|
||||
)
|
||||
|
||||
// NewTMSPClient returns newly connected client
|
||||
// NewABCIClient returns newly connected client
|
||||
type ClientCreator interface {
|
||||
NewTMSPClient() (tmspcli.Client, error)
|
||||
NewABCIClient() (abcicli.Client, error)
|
||||
}
|
||||
|
||||
//----------------------------------------------------
|
||||
@@ -31,8 +31,8 @@ func NewLocalClientCreator(app types.Application) ClientCreator {
|
||||
}
|
||||
}
|
||||
|
||||
func (l *localClientCreator) NewTMSPClient() (tmspcli.Client, error) {
|
||||
return tmspcli.NewLocalClient(l.mtx, l.app), nil
|
||||
func (l *localClientCreator) NewABCIClient() (abcicli.Client, error) {
|
||||
return abcicli.NewLocalClient(l.mtx, l.app), nil
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------
|
||||
@@ -52,9 +52,9 @@ func NewRemoteClientCreator(addr, transport string, mustConnect bool) ClientCrea
|
||||
}
|
||||
}
|
||||
|
||||
func (r *remoteClientCreator) NewTMSPClient() (tmspcli.Client, error) {
|
||||
func (r *remoteClientCreator) NewABCIClient() (abcicli.Client, error) {
|
||||
// Run forever in a loop
|
||||
remoteApp, err := tmspcli.NewClient(r.addr, r.transport, r.mustConnect)
|
||||
remoteApp, err := abcicli.NewClient(r.addr, r.transport, r.mustConnect)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to connect to proxy: %v", err)
|
||||
}
|
||||
@@ -66,11 +66,13 @@ func (r *remoteClientCreator) NewTMSPClient() (tmspcli.Client, error) {
|
||||
|
||||
func DefaultClientCreator(config cfg.Config) ClientCreator {
|
||||
addr := config.GetString("proxy_app")
|
||||
transport := config.GetString("tmsp")
|
||||
transport := config.GetString("abci")
|
||||
|
||||
switch addr {
|
||||
case "dummy":
|
||||
return NewLocalClientCreator(dummy.NewDummyApplication())
|
||||
case "persistent_dummy":
|
||||
return NewLocalClientCreator(dummy.NewPersistentDummyApplication(config.GetString("db_dir")))
|
||||
case "nilapp":
|
||||
return NewLocalClientCreator(nilapp.NewNilApplication())
|
||||
default:
|
||||
|
@@ -5,6 +5,8 @@ import (
|
||||
cfg "github.com/tendermint/go-config"
|
||||
)
|
||||
|
||||
//-----------------------------
|
||||
|
||||
// Tendermint's interface to the application consists of multiple connections
|
||||
type AppConns interface {
|
||||
Service
|
||||
@@ -14,19 +16,27 @@ type AppConns interface {
|
||||
Query() AppConnQuery
|
||||
}
|
||||
|
||||
func NewAppConns(config cfg.Config, clientCreator ClientCreator, state State, blockStore BlockStore) AppConns {
|
||||
return NewMultiAppConn(config, clientCreator, state, blockStore)
|
||||
func NewAppConns(config cfg.Config, clientCreator ClientCreator, handshaker Handshaker) AppConns {
|
||||
return NewMultiAppConn(config, clientCreator, handshaker)
|
||||
}
|
||||
|
||||
//-----------------------------
|
||||
// multiAppConn implements AppConns
|
||||
|
||||
type Handshaker interface {
|
||||
Handshake(AppConns) error
|
||||
}
|
||||
|
||||
// a multiAppConn is made of a few appConns (mempool, consensus, query)
|
||||
// and manages their underlying tmsp clients, ensuring they reboot together
|
||||
// and manages their underlying abci clients, including the handshake
|
||||
// which ensures the app and tendermint are synced.
|
||||
// TODO: on app restart, clients must reboot together
|
||||
type multiAppConn struct {
|
||||
QuitService
|
||||
BaseService
|
||||
|
||||
config cfg.Config
|
||||
|
||||
state State
|
||||
blockStore BlockStore
|
||||
handshaker Handshaker
|
||||
|
||||
mempoolConn *appConnMempool
|
||||
consensusConn *appConnConsensus
|
||||
@@ -35,15 +45,14 @@ type multiAppConn struct {
|
||||
clientCreator ClientCreator
|
||||
}
|
||||
|
||||
// Make all necessary tmsp connections to the application
|
||||
func NewMultiAppConn(config cfg.Config, clientCreator ClientCreator, state State, blockStore BlockStore) *multiAppConn {
|
||||
// Make all necessary abci connections to the application
|
||||
func NewMultiAppConn(config cfg.Config, clientCreator ClientCreator, handshaker Handshaker) *multiAppConn {
|
||||
multiAppConn := &multiAppConn{
|
||||
config: config,
|
||||
state: state,
|
||||
blockStore: blockStore,
|
||||
handshaker: handshaker,
|
||||
clientCreator: clientCreator,
|
||||
}
|
||||
multiAppConn.QuitService = *NewQuitService(log, "multiAppConn", multiAppConn)
|
||||
multiAppConn.BaseService = *NewBaseService(log, "multiAppConn", multiAppConn)
|
||||
return multiAppConn
|
||||
}
|
||||
|
||||
@@ -57,39 +66,38 @@ func (app *multiAppConn) Consensus() AppConnConsensus {
|
||||
return app.consensusConn
|
||||
}
|
||||
|
||||
// Returns the query Connection
|
||||
func (app *multiAppConn) Query() AppConnQuery {
|
||||
return app.queryConn
|
||||
}
|
||||
|
||||
func (app *multiAppConn) OnStart() error {
|
||||
app.QuitService.OnStart()
|
||||
app.BaseService.OnStart()
|
||||
|
||||
// query connection
|
||||
querycli, err := app.clientCreator.NewTMSPClient()
|
||||
querycli, err := app.clientCreator.NewABCIClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
app.queryConn = NewAppConnQuery(querycli)
|
||||
|
||||
// mempool connection
|
||||
memcli, err := app.clientCreator.NewTMSPClient()
|
||||
memcli, err := app.clientCreator.NewABCIClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
app.mempoolConn = NewAppConnMempool(memcli)
|
||||
|
||||
// consensus connection
|
||||
concli, err := app.clientCreator.NewTMSPClient()
|
||||
concli, err := app.clientCreator.NewABCIClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
app.consensusConn = NewAppConnConsensus(concli)
|
||||
|
||||
// TODO: handshake
|
||||
|
||||
// TODO: replay blocks
|
||||
|
||||
// TODO: (on restart) replay mempool
|
||||
|
||||
// ensure app is synced to the latest state
|
||||
if app.handshaker != nil {
|
||||
return app.handshaker.Handshake(app)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -1,9 +0,0 @@
|
||||
package proxy
|
||||
|
||||
type State interface {
|
||||
// TODO
|
||||
}
|
||||
|
||||
type BlockStore interface {
|
||||
// TODO
|
||||
}
|
25
rpc/core/abci.go
Normal file
25
rpc/core/abci.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
func ABCIQuery(query []byte) (*ctypes.ResultABCIQuery, error) {
|
||||
res := proxyAppQuery.QuerySync(query)
|
||||
return &ctypes.ResultABCIQuery{res}, nil
|
||||
}
|
||||
|
||||
func ABCIInfo() (*ctypes.ResultABCIInfo, error) {
|
||||
res, err := proxyAppQuery.InfoSync()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ctypes.ResultABCIInfo{
|
||||
Data: res.Data,
|
||||
Version: res.Version,
|
||||
LastBlockHeight: res.LastBlockHeight,
|
||||
LastBlockAppHash: res.LastBlockAppHash,
|
||||
}, nil
|
||||
}
|
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
// TODO: limit/permission on (max - min)
|
||||
func BlockchainInfo(minHeight, maxHeight int) (*ctypes.ResultBlockchainInfo, error) {
|
||||
if maxHeight == 0 {
|
||||
maxHeight = blockStore.Height()
|
||||
@@ -18,7 +19,7 @@ func BlockchainInfo(minHeight, maxHeight int) (*ctypes.ResultBlockchainInfo, err
|
||||
if minHeight == 0 {
|
||||
minHeight = MaxInt(1, maxHeight-20)
|
||||
}
|
||||
log.Info("BlockchainInfoHandler", "maxHeight", maxHeight, "minHeight", minHeight)
|
||||
log.Debug("BlockchainInfoHandler", "maxHeight", maxHeight, "minHeight", minHeight)
|
||||
|
||||
blockMetas := []*types.BlockMeta{}
|
||||
for height := maxHeight; height >= minHeight; height-- {
|
||||
|
@@ -8,18 +8,7 @@ import (
|
||||
)
|
||||
|
||||
func Validators() (*ctypes.ResultValidators, error) {
|
||||
var blockHeight int
|
||||
var validators []*types.Validator
|
||||
|
||||
// XXX: this is racy.
|
||||
// Either use state.LoadState(db) or make state atomic (see #165)
|
||||
state := consensusState.GetState()
|
||||
blockHeight = state.LastBlockHeight
|
||||
state.Validators.Iterate(func(index int, val *types.Validator) bool {
|
||||
validators = append(validators, val)
|
||||
return false
|
||||
})
|
||||
|
||||
blockHeight, validators := consensusState.GetValidators()
|
||||
return &ctypes.ResultValidators{blockHeight, validators}, nil
|
||||
}
|
||||
|
||||
|
@@ -10,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func UnsafeFlushMempool() (*ctypes.ResultUnsafeFlushMempool, error) {
|
||||
mempoolReactor.Mempool.Flush()
|
||||
mempool.Flush()
|
||||
return &ctypes.ResultUnsafeFlushMempool{}, nil
|
||||
}
|
||||
|
||||
|
@@ -1,7 +1,6 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"github.com/tendermint/go-events"
|
||||
"github.com/tendermint/go-rpc/types"
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -9,10 +8,10 @@ import (
|
||||
|
||||
func Subscribe(wsCtx rpctypes.WSRPCContext, event string) (*ctypes.ResultSubscribe, error) {
|
||||
log.Notice("Subscribe to event", "remote", wsCtx.GetRemoteAddr(), "event", event)
|
||||
wsCtx.GetEventSwitch().AddListenerForEvent(wsCtx.GetRemoteAddr(), event, func(msg events.EventData) {
|
||||
types.AddListenerForEvent(wsCtx.GetEventSwitch(), wsCtx.GetRemoteAddr(), event, func(msg types.TMEventData) {
|
||||
// NOTE: EventSwitch callbacks must be nonblocking
|
||||
// NOTE: RPCResponses of subscribed events have id suffix "#event"
|
||||
tmResult := ctypes.TMResult(&ctypes.ResultEvent{event, types.TMEventData(msg)})
|
||||
tmResult := ctypes.TMResult(&ctypes.ResultEvent{event, msg})
|
||||
wsCtx.TryWriteRPCResponse(rpctypes.NewRPCResponse(wsCtx.Request.ID+"#event", &tmResult, ""))
|
||||
})
|
||||
return &ctypes.ResultSubscribe{}, nil
|
||||
|
@@ -4,10 +4,9 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/go-events"
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmsp "github.com/tendermint/tmsp/types"
|
||||
abci "github.com/tendermint/abci/types"
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
@@ -15,7 +14,7 @@ import (
|
||||
|
||||
// Returns right away, with no response
|
||||
func BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
|
||||
err := mempoolReactor.BroadcastTx(tx, nil)
|
||||
err := mempool.CheckTx(tx, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error broadcasting transaction: %v", err)
|
||||
}
|
||||
@@ -24,8 +23,8 @@ func BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
|
||||
|
||||
// Returns with the response from CheckTx
|
||||
func BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
|
||||
resCh := make(chan *tmsp.Response, 1)
|
||||
err := mempoolReactor.BroadcastTx(tx, func(res *tmsp.Response) {
|
||||
resCh := make(chan *abci.Response, 1)
|
||||
err := mempool.CheckTx(tx, func(res *abci.Response) {
|
||||
resCh <- res
|
||||
})
|
||||
if err != nil {
|
||||
@@ -40,62 +39,59 @@ func BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CONTRACT: returns error==nil iff the tx is included in a block.
|
||||
//
|
||||
// If CheckTx fails, return with the response from CheckTx AND an error.
|
||||
// Else, block until the tx is included in a block,
|
||||
// and return the result of AppendTx (with no error).
|
||||
// Even if AppendTx fails, so long as the tx is included in a block this function
|
||||
// will not return an error - it is the caller's responsibility to check res.Code.
|
||||
// The function times out after five minutes and returns the result of CheckTx and an error.
|
||||
// TODO: smarter timeout logic or someway to cancel (tx not getting committed is a sign of a larger problem!)
|
||||
func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
|
||||
// CONTRACT: only returns error if mempool.BroadcastTx errs (ie. problem with the app)
|
||||
// or if we timeout waiting for tx to commit.
|
||||
// If CheckTx or DeliverTx fail, no error will be returned, but the returned result
|
||||
// will contain a non-OK ABCI code.
|
||||
func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
|
||||
|
||||
// subscribe to tx being committed in block
|
||||
appendTxResCh := make(chan *tmsp.Response, 1)
|
||||
eventSwitch.AddListenerForEvent("rpc", types.EventStringTx(tx), func(data events.EventData) {
|
||||
appendTxResCh <- data.(*tmsp.Response)
|
||||
deliverTxResCh := make(chan types.EventDataTx, 1)
|
||||
types.AddListenerForEvent(eventSwitch, "rpc", types.EventStringTx(tx), func(data types.TMEventData) {
|
||||
deliverTxResCh <- data.(types.EventDataTx)
|
||||
})
|
||||
|
||||
// broadcast the tx and register checktx callback
|
||||
checkTxResCh := make(chan *tmsp.Response, 1)
|
||||
err := mempoolReactor.BroadcastTx(tx, func(res *tmsp.Response) {
|
||||
checkTxResCh := make(chan *abci.Response, 1)
|
||||
err := mempool.CheckTx(tx, func(res *abci.Response) {
|
||||
checkTxResCh <- res
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("err", "err", err)
|
||||
return nil, fmt.Errorf("Error broadcasting transaction: %v", err)
|
||||
}
|
||||
checkTxRes := <-checkTxResCh
|
||||
checkTxR := checkTxRes.GetCheckTx()
|
||||
if r := checkTxR; r.Code != tmsp.CodeType_OK {
|
||||
if checkTxR.Code != abci.CodeType_OK {
|
||||
// CheckTx failed!
|
||||
return &ctypes.ResultBroadcastTx{
|
||||
Code: r.Code,
|
||||
Data: r.Data,
|
||||
Log: r.Log,
|
||||
}, fmt.Errorf("Check tx failed with non-zero code: %s. Data: %X; Log: %s", r.Code.String(), r.Data, r.Log)
|
||||
return &ctypes.ResultBroadcastTxCommit{
|
||||
CheckTx: checkTxR,
|
||||
DeliverTx: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Wait for the tx to be included in a block,
|
||||
// timeout after something reasonable.
|
||||
timer := time.NewTimer(60 * 5 * time.Second)
|
||||
// TODO: configureable?
|
||||
timer := time.NewTimer(60 * 2 * time.Second)
|
||||
select {
|
||||
case appendTxRes := <-appendTxResCh:
|
||||
case deliverTxRes := <-deliverTxResCh:
|
||||
// The tx was included in a block.
|
||||
// NOTE we don't return an error regardless of the AppendTx code;
|
||||
// clients must check this to see if they need to send a new tx!
|
||||
r := appendTxRes.GetAppendTx()
|
||||
return &ctypes.ResultBroadcastTx{
|
||||
Code: r.Code,
|
||||
Data: r.Data,
|
||||
Log: r.Log,
|
||||
deliverTxR := &abci.ResponseDeliverTx{
|
||||
Code: deliverTxRes.Code,
|
||||
Data: deliverTxRes.Data,
|
||||
Log: deliverTxRes.Log,
|
||||
}
|
||||
log.Notice("DeliverTx passed ", "tx", []byte(tx), "response", deliverTxR)
|
||||
return &ctypes.ResultBroadcastTxCommit{
|
||||
CheckTx: checkTxR,
|
||||
DeliverTx: deliverTxR,
|
||||
}, nil
|
||||
case <-timer.C:
|
||||
r := checkTxR
|
||||
return &ctypes.ResultBroadcastTx{
|
||||
Code: r.Code,
|
||||
Data: r.Data,
|
||||
Log: r.Log,
|
||||
log.Error("failed to include tx")
|
||||
return &ctypes.ResultBroadcastTxCommit{
|
||||
CheckTx: checkTxR,
|
||||
DeliverTx: nil,
|
||||
}, fmt.Errorf("Timed out waiting for transaction to be included in a block")
|
||||
}
|
||||
|
||||
@@ -103,10 +99,10 @@ func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
|
||||
}
|
||||
|
||||
func UnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) {
|
||||
txs := mempoolReactor.Mempool.Reap(-1)
|
||||
txs := mempool.Reap(-1)
|
||||
return &ctypes.ResultUnconfirmedTxs{len(txs), txs}, nil
|
||||
}
|
||||
|
||||
func NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) {
|
||||
return &ctypes.ResultUnconfirmedTxs{N: mempoolReactor.Mempool.Size()}, nil
|
||||
return &ctypes.ResultUnconfirmedTxs{N: mempool.Size()}, nil
|
||||
}
|
||||
|
@@ -1,8 +1,6 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
)
|
||||
|
||||
@@ -31,12 +29,8 @@ func NetInfo() (*ctypes.ResultNetInfo, error) {
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
// Dial given list of seeds if we have no outbound peers
|
||||
func DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) {
|
||||
outbound, _, _ := p2pSwitch.NumPeers()
|
||||
if outbound != 0 {
|
||||
return nil, fmt.Errorf("Already have some outbound peers")
|
||||
}
|
||||
// Dial given list of seeds
|
||||
func UnsafeDialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) {
|
||||
// starts go routines to dial each seed after random delays
|
||||
p2pSwitch.DialSeeds(seeds)
|
||||
return &ctypes.ResultDialSeeds{}, nil
|
||||
|
@@ -2,58 +2,89 @@ package core
|
||||
|
||||
import (
|
||||
cfg "github.com/tendermint/go-config"
|
||||
"github.com/tendermint/go-crypto"
|
||||
"github.com/tendermint/go-p2p"
|
||||
|
||||
"github.com/tendermint/go-events"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/consensus"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
abci "github.com/tendermint/abci/types"
|
||||
)
|
||||
|
||||
var eventSwitch *events.EventSwitch
|
||||
var blockStore *bc.BlockStore
|
||||
var consensusState *consensus.ConsensusState
|
||||
var consensusReactor *consensus.ConsensusReactor
|
||||
var mempoolReactor *mempl.MempoolReactor
|
||||
var p2pSwitch *p2p.Switch
|
||||
var privValidator *types.PrivValidator
|
||||
var genDoc *types.GenesisDoc // cache the genesis structure
|
||||
var proxyAppQuery proxy.AppConnQuery
|
||||
//-----------------------------------------------------
|
||||
// Interfaces for use by RPC
|
||||
// NOTE: these methods must be thread safe!
|
||||
|
||||
var config cfg.Config = nil
|
||||
type BlockStore interface {
|
||||
Height() int
|
||||
LoadBlockMeta(height int) *types.BlockMeta
|
||||
LoadBlock(height int) *types.Block
|
||||
}
|
||||
|
||||
type Consensus interface {
|
||||
GetValidators() (int, []*types.Validator)
|
||||
GetRoundState() *consensus.RoundState
|
||||
}
|
||||
|
||||
type Mempool interface {
|
||||
Size() int
|
||||
CheckTx(types.Tx, func(*abci.Response)) error
|
||||
Reap(int) []types.Tx
|
||||
Flush()
|
||||
}
|
||||
|
||||
type P2P interface {
|
||||
Listeners() []p2p.Listener
|
||||
Peers() p2p.IPeerSet
|
||||
NumPeers() (outbound, inbound, dialig int)
|
||||
NodeInfo() *p2p.NodeInfo
|
||||
IsListening() bool
|
||||
DialSeeds([]string)
|
||||
}
|
||||
|
||||
var (
|
||||
// external, thread safe interfaces
|
||||
eventSwitch types.EventSwitch
|
||||
proxyAppQuery proxy.AppConnQuery
|
||||
config cfg.Config
|
||||
|
||||
// interfaces defined above
|
||||
blockStore BlockStore
|
||||
consensusState Consensus
|
||||
mempool Mempool
|
||||
p2pSwitch P2P
|
||||
|
||||
// objects
|
||||
pubKey crypto.PubKey
|
||||
genDoc *types.GenesisDoc // cache the genesis structure
|
||||
)
|
||||
|
||||
func SetConfig(c cfg.Config) {
|
||||
config = c
|
||||
}
|
||||
|
||||
func SetEventSwitch(evsw *events.EventSwitch) {
|
||||
func SetEventSwitch(evsw types.EventSwitch) {
|
||||
eventSwitch = evsw
|
||||
}
|
||||
|
||||
func SetBlockStore(bs *bc.BlockStore) {
|
||||
func SetBlockStore(bs BlockStore) {
|
||||
blockStore = bs
|
||||
}
|
||||
|
||||
func SetConsensusState(cs *consensus.ConsensusState) {
|
||||
func SetConsensusState(cs Consensus) {
|
||||
consensusState = cs
|
||||
}
|
||||
|
||||
func SetConsensusReactor(cr *consensus.ConsensusReactor) {
|
||||
consensusReactor = cr
|
||||
func SetMempool(mem Mempool) {
|
||||
mempool = mem
|
||||
}
|
||||
|
||||
func SetMempoolReactor(mr *mempl.MempoolReactor) {
|
||||
mempoolReactor = mr
|
||||
}
|
||||
|
||||
func SetSwitch(sw *p2p.Switch) {
|
||||
func SetSwitch(sw P2P) {
|
||||
p2pSwitch = sw
|
||||
}
|
||||
|
||||
func SetPrivValidator(pv *types.PrivValidator) {
|
||||
privValidator = pv
|
||||
func SetPubKey(pk crypto.PubKey) {
|
||||
pubKey = pk
|
||||
}
|
||||
|
||||
func SetGenesisDoc(doc *types.GenesisDoc) {
|
||||
|
@@ -6,30 +6,38 @@ import (
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
)
|
||||
|
||||
// TODO: better system than "unsafe" prefix
|
||||
var Routes = map[string]*rpc.RPCFunc{
|
||||
// subscribe/unsubscribe are reserved for websocket events.
|
||||
"subscribe": rpc.NewWSRPCFunc(SubscribeResult, "event"),
|
||||
"unsubscribe": rpc.NewWSRPCFunc(UnsubscribeResult, "event"),
|
||||
|
||||
// info API
|
||||
"status": rpc.NewRPCFunc(StatusResult, ""),
|
||||
"net_info": rpc.NewRPCFunc(NetInfoResult, ""),
|
||||
"dial_seeds": rpc.NewRPCFunc(DialSeedsResult, "seeds"),
|
||||
"blockchain": rpc.NewRPCFunc(BlockchainInfoResult, "minHeight,maxHeight"),
|
||||
"genesis": rpc.NewRPCFunc(GenesisResult, ""),
|
||||
"block": rpc.NewRPCFunc(BlockResult, "height"),
|
||||
"validators": rpc.NewRPCFunc(ValidatorsResult, ""),
|
||||
"dump_consensus_state": rpc.NewRPCFunc(DumpConsensusStateResult, ""),
|
||||
"broadcast_tx_commit": rpc.NewRPCFunc(BroadcastTxCommitResult, "tx"),
|
||||
"broadcast_tx_sync": rpc.NewRPCFunc(BroadcastTxSyncResult, "tx"),
|
||||
"broadcast_tx_async": rpc.NewRPCFunc(BroadcastTxAsyncResult, "tx"),
|
||||
"unconfirmed_txs": rpc.NewRPCFunc(UnconfirmedTxsResult, ""),
|
||||
"num_unconfirmed_txs": rpc.NewRPCFunc(NumUnconfirmedTxsResult, ""),
|
||||
|
||||
"tmsp_query": rpc.NewRPCFunc(TMSPQueryResult, "query"),
|
||||
"tmsp_info": rpc.NewRPCFunc(TMSPInfoResult, ""),
|
||||
// broadcast API
|
||||
"broadcast_tx_commit": rpc.NewRPCFunc(BroadcastTxCommitResult, "tx"),
|
||||
"broadcast_tx_sync": rpc.NewRPCFunc(BroadcastTxSyncResult, "tx"),
|
||||
"broadcast_tx_async": rpc.NewRPCFunc(BroadcastTxAsyncResult, "tx"),
|
||||
|
||||
"unsafe_flush_mempool": rpc.NewRPCFunc(UnsafeFlushMempool, ""),
|
||||
"unsafe_set_config": rpc.NewRPCFunc(UnsafeSetConfigResult, "type,key,value"),
|
||||
// abci API
|
||||
"abci_query": rpc.NewRPCFunc(ABCIQueryResult, "query"),
|
||||
"abci_info": rpc.NewRPCFunc(ABCIInfoResult, ""),
|
||||
|
||||
// control API
|
||||
"dial_seeds": rpc.NewRPCFunc(UnsafeDialSeedsResult, "seeds"),
|
||||
"unsafe_flush_mempool": rpc.NewRPCFunc(UnsafeFlushMempool, ""),
|
||||
"unsafe_set_config": rpc.NewRPCFunc(UnsafeSetConfigResult, "type,key,value"),
|
||||
|
||||
// profiler API
|
||||
"unsafe_start_cpu_profiler": rpc.NewRPCFunc(UnsafeStartCPUProfilerResult, "filename"),
|
||||
"unsafe_stop_cpu_profiler": rpc.NewRPCFunc(UnsafeStopCPUProfilerResult, ""),
|
||||
"unsafe_write_heap_profile": rpc.NewRPCFunc(UnsafeWriteHeapProfileResult, "filename"),
|
||||
@@ -67,8 +75,8 @@ func NetInfoResult() (ctypes.TMResult, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func DialSeedsResult(seeds []string) (ctypes.TMResult, error) {
|
||||
if r, err := DialSeeds(seeds); err != nil {
|
||||
func UnsafeDialSeedsResult(seeds []string) (ctypes.TMResult, error) {
|
||||
if r, err := UnsafeDialSeeds(seeds); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return r, nil
|
||||
@@ -155,16 +163,16 @@ func BroadcastTxAsyncResult(tx []byte) (ctypes.TMResult, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func TMSPQueryResult(query []byte) (ctypes.TMResult, error) {
|
||||
if r, err := TMSPQuery(query); err != nil {
|
||||
func ABCIQueryResult(query []byte) (ctypes.TMResult, error) {
|
||||
if r, err := ABCIQuery(query); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return r, nil
|
||||
}
|
||||
}
|
||||
|
||||
func TMSPInfoResult() (ctypes.TMResult, error) {
|
||||
if r, err := TMSPInfo(); err != nil {
|
||||
func ABCIInfoResult() (ctypes.TMResult, error) {
|
||||
if r, err := ABCIInfo(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return r, nil
|
||||
|
@@ -22,7 +22,7 @@ func Status() (*ctypes.ResultStatus, error) {
|
||||
|
||||
return &ctypes.ResultStatus{
|
||||
NodeInfo: p2pSwitch.NodeInfo(),
|
||||
PubKey: privValidator.PubKey,
|
||||
PubKey: pubKey,
|
||||
LatestBlockHash: latestBlockHash,
|
||||
LatestAppHash: latestAppHash,
|
||||
LatestBlockHeight: latestHeight,
|
||||
|
@@ -1,17 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
func TMSPQuery(query []byte) (*ctypes.ResultTMSPQuery, error) {
|
||||
res := proxyAppQuery.QuerySync(query)
|
||||
return &ctypes.ResultTMSPQuery{res}, nil
|
||||
}
|
||||
|
||||
func TMSPInfo() (*ctypes.ResultTMSPInfo, error) {
|
||||
res := proxyAppQuery.InfoSync()
|
||||
return &ctypes.ResultTMSPInfo{res}, nil
|
||||
}
|
@@ -6,7 +6,7 @@ import (
|
||||
"github.com/tendermint/go-rpc/types"
|
||||
"github.com/tendermint/go-wire"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmsp "github.com/tendermint/tmsp/types"
|
||||
abci "github.com/tendermint/abci/types"
|
||||
)
|
||||
|
||||
type ResultBlockchainInfo struct {
|
||||
@@ -58,22 +58,30 @@ type ResultDumpConsensusState struct {
|
||||
}
|
||||
|
||||
type ResultBroadcastTx struct {
|
||||
Code tmsp.CodeType `json:"code"`
|
||||
Code abci.CodeType `json:"code"`
|
||||
Data []byte `json:"data"`
|
||||
Log string `json:"log"`
|
||||
}
|
||||
|
||||
type ResultBroadcastTxCommit struct {
|
||||
CheckTx *abci.ResponseCheckTx `json:"check_tx"`
|
||||
DeliverTx *abci.ResponseDeliverTx `json:"deliver_tx"`
|
||||
}
|
||||
|
||||
type ResultUnconfirmedTxs struct {
|
||||
N int `json:"n_txs"`
|
||||
Txs []types.Tx `json:"txs"`
|
||||
}
|
||||
|
||||
type ResultTMSPInfo struct {
|
||||
Result tmsp.Result `json:"result"`
|
||||
type ResultABCIInfo struct {
|
||||
Data string `json:"data"`
|
||||
Version string `json:"version"`
|
||||
LastBlockHeight uint64 `json:"last_block_height"`
|
||||
LastBlockAppHash []byte `json:"last_block_app_hash"`
|
||||
}
|
||||
|
||||
type ResultTMSPQuery struct {
|
||||
Result tmsp.Result `json:"result"`
|
||||
type ResultABCIQuery struct {
|
||||
Result abci.Result `json:"result"`
|
||||
}
|
||||
|
||||
type ResultUnsafeFlushMempool struct{}
|
||||
@@ -112,12 +120,13 @@ const (
|
||||
ResultTypeDumpConsensusState = byte(0x41)
|
||||
|
||||
// 0x6 bytes are for txs / the application
|
||||
ResultTypeBroadcastTx = byte(0x60)
|
||||
ResultTypeUnconfirmedTxs = byte(0x61)
|
||||
ResultTypeBroadcastTx = byte(0x60)
|
||||
ResultTypeUnconfirmedTxs = byte(0x61)
|
||||
ResultTypeBroadcastTxCommit = byte(0x62)
|
||||
|
||||
// 0x7 bytes are for querying the application
|
||||
ResultTypeTMSPQuery = byte(0x70)
|
||||
ResultTypeTMSPInfo = byte(0x71)
|
||||
ResultTypeABCIQuery = byte(0x70)
|
||||
ResultTypeABCIInfo = byte(0x71)
|
||||
|
||||
// 0x8 bytes are for events
|
||||
ResultTypeSubscribe = byte(0x80)
|
||||
@@ -148,6 +157,7 @@ var _ = wire.RegisterInterface(
|
||||
wire.ConcreteType{&ResultValidators{}, ResultTypeValidators},
|
||||
wire.ConcreteType{&ResultDumpConsensusState{}, ResultTypeDumpConsensusState},
|
||||
wire.ConcreteType{&ResultBroadcastTx{}, ResultTypeBroadcastTx},
|
||||
wire.ConcreteType{&ResultBroadcastTxCommit{}, ResultTypeBroadcastTxCommit},
|
||||
wire.ConcreteType{&ResultUnconfirmedTxs{}, ResultTypeUnconfirmedTxs},
|
||||
wire.ConcreteType{&ResultSubscribe{}, ResultTypeSubscribe},
|
||||
wire.ConcreteType{&ResultUnsubscribe{}, ResultTypeUnsubscribe},
|
||||
@@ -157,6 +167,6 @@ var _ = wire.RegisterInterface(
|
||||
wire.ConcreteType{&ResultUnsafeProfile{}, ResultTypeUnsafeStopCPUProfiler},
|
||||
wire.ConcreteType{&ResultUnsafeProfile{}, ResultTypeUnsafeWriteHeapProfile},
|
||||
wire.ConcreteType{&ResultUnsafeFlushMempool{}, ResultTypeUnsafeFlushMempool},
|
||||
wire.ConcreteType{&ResultTMSPQuery{}, ResultTypeTMSPQuery},
|
||||
wire.ConcreteType{&ResultTMSPInfo{}, ResultTypeTMSPInfo},
|
||||
wire.ConcreteType{&ResultABCIQuery{}, ResultTypeABCIQuery},
|
||||
wire.ConcreteType{&ResultABCIInfo{}, ResultTypeABCIInfo},
|
||||
)
|
||||
|
18
rpc/grpc/api.go
Normal file
18
rpc/grpc/api.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package core_grpc
|
||||
|
||||
import (
|
||||
core "github.com/tendermint/tendermint/rpc/core"
|
||||
|
||||
context "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type broadcastAPI struct {
|
||||
}
|
||||
|
||||
func (bapi *broadcastAPI) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) {
|
||||
res, err := core.BroadcastTxCommit(req.Tx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ResponseBroadcastTx{res.CheckTx, res.DeliverTx}, nil
|
||||
}
|
44
rpc/grpc/client_server.go
Normal file
44
rpc/grpc/client_server.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package core_grpc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
. "github.com/tendermint/go-common"
|
||||
)
|
||||
|
||||
// Start the grpcServer in a go routine
|
||||
func StartGRPCServer(protoAddr string) (net.Listener, error) {
|
||||
parts := strings.SplitN(protoAddr, "://", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("Invalid listen address for grpc server (did you forget a tcp:// prefix?) : %s", protoAddr)
|
||||
}
|
||||
proto, addr := parts[0], parts[1]
|
||||
ln, err := net.Listen(proto, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
grpcServer := grpc.NewServer()
|
||||
RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{})
|
||||
go grpcServer.Serve(ln)
|
||||
|
||||
return ln, nil
|
||||
}
|
||||
|
||||
// Start the client by dialing the server
|
||||
func StartGRPCClient(protoAddr string) BroadcastAPIClient {
|
||||
conn, err := grpc.Dial(protoAddr, grpc.WithInsecure(), grpc.WithDialer(dialerFunc))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return NewBroadcastAPIClient(conn)
|
||||
}
|
||||
|
||||
func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
return Connect(addr)
|
||||
}
|
3
rpc/grpc/compile.sh
Normal file
3
rpc/grpc/compile.sh
Normal file
@@ -0,0 +1,3 @@
|
||||
#! /bin/bash
|
||||
|
||||
protoc --go_out=plugins=grpc:. -I $GOPATH/src/ -I . types.proto
|
174
rpc/grpc/types.pb.go
Normal file
174
rpc/grpc/types.pb.go
Normal file
@@ -0,0 +1,174 @@
|
||||
// Code generated by protoc-gen-go.
|
||||
// source: types.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package core_grpc is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
types.proto
|
||||
|
||||
It has these top-level messages:
|
||||
RequestBroadcastTx
|
||||
ResponseBroadcastTx
|
||||
*/
|
||||
package core_grpc
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import types "github.com/tendermint/abci/types"
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type RequestBroadcastTx struct {
|
||||
Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"`
|
||||
}
|
||||
|
||||
func (m *RequestBroadcastTx) Reset() { *m = RequestBroadcastTx{} }
|
||||
func (m *RequestBroadcastTx) String() string { return proto.CompactTextString(m) }
|
||||
func (*RequestBroadcastTx) ProtoMessage() {}
|
||||
func (*RequestBroadcastTx) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
|
||||
func (m *RequestBroadcastTx) GetTx() []byte {
|
||||
if m != nil {
|
||||
return m.Tx
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ResponseBroadcastTx struct {
|
||||
CheckTx *types.ResponseCheckTx `protobuf:"bytes,1,opt,name=check_tx,json=checkTx" json:"check_tx,omitempty"`
|
||||
DeliverTx *types.ResponseDeliverTx `protobuf:"bytes,2,opt,name=deliver_tx,json=deliverTx" json:"deliver_tx,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ResponseBroadcastTx) Reset() { *m = ResponseBroadcastTx{} }
|
||||
func (m *ResponseBroadcastTx) String() string { return proto.CompactTextString(m) }
|
||||
func (*ResponseBroadcastTx) ProtoMessage() {}
|
||||
func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
|
||||
func (m *ResponseBroadcastTx) GetCheckTx() *types.ResponseCheckTx {
|
||||
if m != nil {
|
||||
return m.CheckTx
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ResponseBroadcastTx) GetDeliverTx() *types.ResponseDeliverTx {
|
||||
if m != nil {
|
||||
return m.DeliverTx
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*RequestBroadcastTx)(nil), "core_grpc.RequestBroadcastTx")
|
||||
proto.RegisterType((*ResponseBroadcastTx)(nil), "core_grpc.ResponseBroadcastTx")
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// Client API for BroadcastAPI service
|
||||
|
||||
type BroadcastAPIClient interface {
|
||||
BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error)
|
||||
}
|
||||
|
||||
type broadcastAPIClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewBroadcastAPIClient(cc *grpc.ClientConn) BroadcastAPIClient {
|
||||
return &broadcastAPIClient{cc}
|
||||
}
|
||||
|
||||
func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) {
|
||||
out := new(ResponseBroadcastTx)
|
||||
err := grpc.Invoke(ctx, "/core_grpc.BroadcastAPI/BroadcastTx", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Server API for BroadcastAPI service
|
||||
|
||||
type BroadcastAPIServer interface {
|
||||
BroadcastTx(context.Context, *RequestBroadcastTx) (*ResponseBroadcastTx, error)
|
||||
}
|
||||
|
||||
func RegisterBroadcastAPIServer(s *grpc.Server, srv BroadcastAPIServer) {
|
||||
s.RegisterService(&_BroadcastAPI_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _BroadcastAPI_BroadcastTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RequestBroadcastTx)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(BroadcastAPIServer).BroadcastTx(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/core_grpc.BroadcastAPI/BroadcastTx",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(BroadcastAPIServer).BroadcastTx(ctx, req.(*RequestBroadcastTx))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "core_grpc.BroadcastAPI",
|
||||
HandlerType: (*BroadcastAPIServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "BroadcastTx",
|
||||
Handler: _BroadcastAPI_BroadcastTx_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "types.proto",
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("types.proto", fileDescriptor0) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 226 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2e, 0xa9, 0x2c, 0x48,
|
||||
0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4c, 0xce, 0x2f, 0x4a, 0x8d, 0x4f, 0x2f,
|
||||
0x2a, 0x48, 0x96, 0xd2, 0x49, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x2f,
|
||||
0x49, 0xcd, 0x4b, 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0xc9, 0x2d, 0x2e, 0xd0, 0x07,
|
||||
0x6b, 0xd1, 0x47, 0xd2, 0xa8, 0xa4, 0xc2, 0x25, 0x14, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0xe2,
|
||||
0x54, 0x94, 0x9f, 0x98, 0x92, 0x9c, 0x58, 0x5c, 0x12, 0x52, 0x21, 0xc4, 0xc7, 0xc5, 0x54, 0x52,
|
||||
0x21, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x13, 0xc4, 0x54, 0x52, 0xa1, 0x54, 0xc7, 0x25, 0x1c, 0x94,
|
||||
0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x8a, 0xac, 0xcc, 0x90, 0x8b, 0x23, 0x39, 0x23, 0x35, 0x39,
|
||||
0x3b, 0x1e, 0xaa, 0x98, 0xdb, 0x48, 0x4c, 0x0f, 0x62, 0x38, 0x4c, 0xb5, 0x33, 0x48, 0x3a, 0xa4,
|
||||
0x22, 0x88, 0x3d, 0x19, 0xc2, 0x10, 0x32, 0xe1, 0xe2, 0x4c, 0x2c, 0x28, 0x48, 0xcd, 0x4b, 0x01,
|
||||
0xe9, 0x61, 0x02, 0xeb, 0x11, 0x47, 0xd3, 0xe3, 0x08, 0x96, 0x0f, 0xa9, 0x08, 0xe2, 0x48, 0x84,
|
||||
0xb2, 0x8c, 0x62, 0xb8, 0x78, 0xe0, 0xf6, 0x3a, 0x06, 0x78, 0x0a, 0xf9, 0x70, 0x71, 0x23, 0xbb,
|
||||
0x43, 0x56, 0x0f, 0xee, 0x7d, 0x3d, 0x4c, 0xdf, 0x48, 0xc9, 0xa1, 0x48, 0x63, 0x78, 0x23, 0x89,
|
||||
0x0d, 0x1c, 0x14, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x68, 0x73, 0x87, 0xb0, 0x52, 0x01,
|
||||
0x00, 0x00,
|
||||
}
|
29
rpc/grpc/types.proto
Normal file
29
rpc/grpc/types.proto
Normal file
@@ -0,0 +1,29 @@
|
||||
syntax = "proto3";
|
||||
package core_grpc;
|
||||
|
||||
import "github.com/tendermint/abci/types/types.proto";
|
||||
|
||||
//----------------------------------------
|
||||
// Message types
|
||||
|
||||
//----------------------------------------
|
||||
// Request types
|
||||
|
||||
message RequestBroadcastTx {
|
||||
bytes tx = 1;
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Response types
|
||||
|
||||
message ResponseBroadcastTx{
|
||||
types.ResponseCheckTx check_tx = 1;
|
||||
types.ResponseDeliverTx deliver_tx = 2;
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Service Definition
|
||||
|
||||
service BroadcastAPI {
|
||||
rpc BroadcastTx(RequestBroadcastTx) returns (ResponseBroadcastTx) ;
|
||||
}
|
@@ -5,14 +5,15 @@ import (
|
||||
crand "crypto/rand"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/tendermint/go-common"
|
||||
"github.com/tendermint/go-wire"
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmsp "github.com/tendermint/tmsp/types"
|
||||
"github.com/tendermint/abci/example/dummy"
|
||||
abci "github.com/tendermint/abci/types"
|
||||
)
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
@@ -91,7 +92,7 @@ func TestJSONBroadcastTxSync(t *testing.T) {
|
||||
func testBroadcastTxSync(t *testing.T, resI interface{}, tx []byte) {
|
||||
tmRes := resI.(*ctypes.TMResult)
|
||||
res := (*tmRes).(*ctypes.ResultBroadcastTx)
|
||||
if res.Code != tmsp.CodeType_OK {
|
||||
if res.Code != abci.CodeType_OK {
|
||||
panic(Fmt("BroadcastTxSync got non-zero exit code: %v. %X; %s", res.Code, res.Data, res.Log))
|
||||
}
|
||||
mem := node.MempoolReactor().Mempool
|
||||
@@ -129,36 +130,41 @@ func sendTx() ([]byte, []byte) {
|
||||
return k, v
|
||||
}
|
||||
|
||||
func TestURITMSPQuery(t *testing.T) {
|
||||
func TestURIABCIQuery(t *testing.T) {
|
||||
k, v := sendTx()
|
||||
time.Sleep(time.Second)
|
||||
tmResult := new(ctypes.TMResult)
|
||||
_, err := clientURI.Call("tmsp_query", map[string]interface{}{"query": Fmt("%X", k)}, tmResult)
|
||||
_, err := clientURI.Call("abci_query", map[string]interface{}{"query": k}, tmResult)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
testTMSPQuery(t, tmResult, v)
|
||||
testABCIQuery(t, tmResult, v)
|
||||
}
|
||||
|
||||
func TestJSONTMSPQuery(t *testing.T) {
|
||||
func TestJSONABCIQuery(t *testing.T) {
|
||||
k, v := sendTx()
|
||||
tmResult := new(ctypes.TMResult)
|
||||
_, err := clientJSON.Call("tmsp_query", []interface{}{Fmt("%X", k)}, tmResult)
|
||||
_, err := clientJSON.Call("abci_query", []interface{}{k}, tmResult)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
testTMSPQuery(t, tmResult, v)
|
||||
testABCIQuery(t, tmResult, v)
|
||||
}
|
||||
|
||||
func testTMSPQuery(t *testing.T, statusI interface{}, value []byte) {
|
||||
func testABCIQuery(t *testing.T, statusI interface{}, value []byte) {
|
||||
tmRes := statusI.(*ctypes.TMResult)
|
||||
query := (*tmRes).(*ctypes.ResultTMSPQuery)
|
||||
query := (*tmRes).(*ctypes.ResultABCIQuery)
|
||||
if query.Result.IsErr() {
|
||||
panic(Fmt("Query returned an err: %v", query))
|
||||
}
|
||||
|
||||
qResult := new(dummy.QueryResult)
|
||||
if err := wire.ReadJSONBytes(query.Result.Data, qResult); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// XXX: specific to value returned by the dummy
|
||||
if !strings.Contains(string(query.Result.Data), "exists=true") {
|
||||
panic(Fmt("Query error. Expected to find 'exists=true'. Got: %s", query.Result.Data))
|
||||
if qResult.Exists != true {
|
||||
panic(Fmt("Query error. Expected to find 'exists=true'. Got: %v", qResult))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -187,9 +193,14 @@ func TestJSONBroadcastTxCommit(t *testing.T) {
|
||||
|
||||
func testBroadcastTxCommit(t *testing.T, resI interface{}, tx []byte) {
|
||||
tmRes := resI.(*ctypes.TMResult)
|
||||
res := (*tmRes).(*ctypes.ResultBroadcastTx)
|
||||
if res.Code != tmsp.CodeType_OK {
|
||||
panic(Fmt("BroadcastTxCommit got non-zero exit code: %v. %X; %s", res.Code, res.Data, res.Log))
|
||||
res := (*tmRes).(*ctypes.ResultBroadcastTxCommit)
|
||||
checkTx := res.CheckTx
|
||||
if checkTx.Code != abci.CodeType_OK {
|
||||
panic(Fmt("BroadcastTxCommit got non-zero exit code from CheckTx: %v. %X; %s", checkTx.Code, checkTx.Data, checkTx.Log))
|
||||
}
|
||||
deliverTx := res.DeliverTx
|
||||
if deliverTx.Code != abci.CodeType_OK {
|
||||
panic(Fmt("BroadcastTxCommit got non-zero exit code from CheckTx: %v. %X; %s", deliverTx.Code, deliverTx.Data, deliverTx.Log))
|
||||
}
|
||||
mem := node.MempoolReactor().Mempool
|
||||
if mem.Size() != 0 {
|
||||
@@ -257,6 +268,40 @@ func TestWSBlockchainGrowth(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestWSTxEvent(t *testing.T) {
|
||||
wsc := newWSClient(t)
|
||||
tx := randBytes()
|
||||
|
||||
// listen for the tx I am about to submit
|
||||
eid := types.EventStringTx(types.Tx(tx))
|
||||
subscribe(t, wsc, eid)
|
||||
defer func() {
|
||||
unsubscribe(t, wsc, eid)
|
||||
wsc.Stop()
|
||||
}()
|
||||
|
||||
// send an tx
|
||||
tmResult := new(ctypes.TMResult)
|
||||
_, err := clientJSON.Call("broadcast_tx_sync", []interface{}{tx}, tmResult)
|
||||
if err != nil {
|
||||
t.Fatal("Error submitting event")
|
||||
}
|
||||
|
||||
waitForEvent(t, wsc, eid, true, func() {}, func(eid string, b interface{}) error {
|
||||
evt, ok := b.(types.EventDataTx)
|
||||
if !ok {
|
||||
t.Fatal("Got wrong event type", b)
|
||||
}
|
||||
if bytes.Compare([]byte(evt.Tx), tx) != 0 {
|
||||
t.Error("Event returned different tx")
|
||||
}
|
||||
if evt.Code != abci.CodeType_OK {
|
||||
t.Error("Event returned tx error code", evt.Code)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
/* TODO: this with dummy app..
|
||||
func TestWSDoubleFire(t *testing.T) {
|
||||
if testing.Short() {
|
||||
|
24
rpc/test/grpc_test.go
Normal file
24
rpc/test/grpc_test.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package rpctest
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/tendermint/tendermint/rpc/grpc"
|
||||
)
|
||||
|
||||
//-------------------------------------------
|
||||
|
||||
func TestBroadcastTx(t *testing.T) {
|
||||
res, err := clientGRPC.BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{[]byte("this is a tx")})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.CheckTx.Code != 0 {
|
||||
t.Fatalf("Non-zero check tx code: %d", res.CheckTx.Code)
|
||||
}
|
||||
if res.DeliverTx.Code != 0 {
|
||||
t.Fatalf("Non-zero append tx code: %d", res.DeliverTx.Code)
|
||||
}
|
||||
}
|
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/tendermint/tendermint/config/tendermint_test"
|
||||
nm "github.com/tendermint/tendermint/node"
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
"github.com/tendermint/tendermint/rpc/grpc"
|
||||
)
|
||||
|
||||
// global variables for use across all tests
|
||||
@@ -24,8 +25,10 @@ var (
|
||||
requestAddr string
|
||||
websocketAddr string
|
||||
websocketEndpoint string
|
||||
grpcAddr string
|
||||
clientURI *client.ClientURI
|
||||
clientJSON *client.ClientJSONRPC
|
||||
clientGRPC core_grpc.BroadcastAPIClient
|
||||
)
|
||||
|
||||
// initialize config and create new node
|
||||
@@ -33,12 +36,14 @@ func init() {
|
||||
config = tendermint_test.ResetConfig("rpc_test_client_test")
|
||||
chainID = config.GetString("chain_id")
|
||||
rpcAddr = config.GetString("rpc_laddr")
|
||||
grpcAddr = config.GetString("grpc_laddr")
|
||||
requestAddr = rpcAddr
|
||||
websocketAddr = rpcAddr
|
||||
websocketEndpoint = "/websocket"
|
||||
|
||||
clientURI = client.NewClientURI(requestAddr)
|
||||
clientJSON = client.NewClientJSONRPC(requestAddr)
|
||||
clientGRPC = core_grpc.StartGRPCClient(grpcAddr)
|
||||
|
||||
// TODO: change consensus/state.go timeouts to be shorter
|
||||
|
||||
@@ -59,6 +64,8 @@ func newNode(ready chan struct{}) {
|
||||
|
||||
// Run the RPC server.
|
||||
node.StartRPC()
|
||||
time.Sleep(time.Second)
|
||||
|
||||
ready <- struct{}{}
|
||||
|
||||
// Sleep forever
|
||||
|
27
scripts/glide/checkout.sh
Normal file
27
scripts/glide/checkout.sh
Normal file
@@ -0,0 +1,27 @@
|
||||
#! /bin/bash
|
||||
set -u
|
||||
|
||||
function parseGlide() {
|
||||
cat $1 | grep -A1 $2 | grep -v $2 | awk '{print $2}'
|
||||
}
|
||||
|
||||
|
||||
# fetch and checkout vendored dep
|
||||
|
||||
glide=$1
|
||||
lib=$2
|
||||
|
||||
echo "----------------------------------"
|
||||
echo "Getting $lib ..."
|
||||
go get -t github.com/tendermint/$lib/...
|
||||
|
||||
VENDORED=$(parseGlide $glide $lib)
|
||||
cd $GOPATH/src/github.com/tendermint/$lib
|
||||
MASTER=$(git rev-parse origin/master)
|
||||
|
||||
if [[ "$VENDORED" != "$MASTER" ]]; then
|
||||
echo "... VENDORED != MASTER ($VENDORED != $MASTER)"
|
||||
echo "... Checking out commit $VENDORED"
|
||||
git checkout $VENDORED &> /dev/null
|
||||
fi
|
||||
|
@@ -1,8 +1,13 @@
|
||||
#! /bin/bash
|
||||
|
||||
set +u
|
||||
if [[ "$GLIDE" == "" ]]; then
|
||||
GLIDE=$GOPATH/src/github.com/tendermint/tendermint/glide.lock
|
||||
fi
|
||||
set -u
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
LIB=$1
|
||||
|
||||
GLIDE=$GOPATH/src/github.com/tendermint/tendermint/glide.lock
|
||||
|
||||
cat $GLIDE | grep -A1 $LIB | grep -v $LIB | awk '{print $2}'
|
||||
|
@@ -2,7 +2,9 @@
|
||||
|
||||
# for every github.com/tendermint dependency, warn is if its not synced with origin/master
|
||||
|
||||
GLIDE=$GOPATH/src/github.com/tendermint/tendermint/glide.lock
|
||||
if [[ "$GLIDE" == "" ]]; then
|
||||
GLIDE=$GOPATH/src/github.com/tendermint/tendermint/glide.lock
|
||||
fi
|
||||
|
||||
# make list of libs
|
||||
LIBS=($(grep "github.com/tendermint" $GLIDE | awk '{print $3}'))
|
||||
@@ -31,6 +33,11 @@ for lib in "${LIBS[@]}"; do
|
||||
echo "Vendored: $VENDORED"
|
||||
echo "Master: $MASTER"
|
||||
fi
|
||||
elif [[ "$VENDORED" != "$HEAD" ]]; then
|
||||
echo ""
|
||||
echo "Vendored version of $lib matches origin/master but differs from HEAD"
|
||||
echo "Vendored: $VENDORED"
|
||||
echo "Head: $HEAD"
|
||||
fi
|
||||
done
|
||||
|
||||
|
@@ -6,9 +6,14 @@ IFS=$'\n\t'
|
||||
|
||||
LIB=$1
|
||||
|
||||
GLIDE=$GOPATH/src/github.com/tendermint/tendermint/glide.lock
|
||||
TMCORE=$GOPATH/src/github.com/tendermint/tendermint
|
||||
set +u
|
||||
if [[ "$GLIDE" == "" ]]; then
|
||||
GLIDE=$TMCORE/glide.lock
|
||||
fi
|
||||
set -u
|
||||
|
||||
OLD_COMMIT=`bash scripts/glide/parse.sh $LIB`
|
||||
OLD_COMMIT=`bash $TMCORE/scripts/glide/parse.sh $LIB`
|
||||
|
||||
PWD=`pwd`
|
||||
cd $GOPATH/src/github.com/tendermint/$LIB
|
||||
@@ -16,4 +21,12 @@ cd $GOPATH/src/github.com/tendermint/$LIB
|
||||
NEW_COMMIT=$(git rev-parse HEAD)
|
||||
|
||||
cd $PWD
|
||||
sed -i "s/$OLD_COMMIT/$NEW_COMMIT/g" $GLIDE
|
||||
|
||||
uname -a | grep Linux > /dev/null
|
||||
if [[ "$?" == 0 ]]; then
|
||||
# linux
|
||||
sed -i "s/$OLD_COMMIT/$NEW_COMMIT/g" $GLIDE
|
||||
else
|
||||
# mac
|
||||
sed -i "" "s/$OLD_COMMIT/$NEW_COMMIT/g" $GLIDE
|
||||
fi
|
||||
|
13
scripts/install_abci_apps.sh
Normal file
13
scripts/install_abci_apps.sh
Normal file
@@ -0,0 +1,13 @@
|
||||
#! /bin/bash
|
||||
|
||||
go get github.com/tendermint/abci/...
|
||||
|
||||
# get the abci commit used by tendermint
|
||||
COMMIT=`bash scripts/glide/parse.sh abci`
|
||||
|
||||
echo "Checking out vendored commit for abci: $COMMIT"
|
||||
|
||||
cd $GOPATH/src/github.com/tendermint/abci
|
||||
git checkout $COMMIT
|
||||
glide install
|
||||
go install ./cmd/...
|
@@ -1,12 +0,0 @@
|
||||
#! /bin/bash
|
||||
|
||||
go get github.com/tendermint/tmsp/...
|
||||
|
||||
# get the tmsp commit used by tendermint
|
||||
COMMIT=`bash scripts/glide/parse.sh $(pwd)/glide.lock tmsp`
|
||||
|
||||
cd $GOPATH/src/github.com/tendermint/tmsp
|
||||
git checkout $COMMIT
|
||||
go install ./cmd/...
|
||||
|
||||
|
19
scripts/txs/random.sh
Normal file
19
scripts/txs/random.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
#! /bin/bash
|
||||
set -u
|
||||
|
||||
function toHex() {
|
||||
echo -n $1 | hexdump -ve '1/1 "%.2X"'
|
||||
}
|
||||
|
||||
N=$1
|
||||
PORT=$2
|
||||
|
||||
for i in `seq 1 $N`; do
|
||||
# store key value pair
|
||||
KEY="abcd$i"
|
||||
VALUE="dcba$i"
|
||||
echo "$KEY:$VALUE"
|
||||
curl 127.0.0.1:$PORT/broadcast_tx_sync?tx=\"$(toHex $KEY=$VALUE)\"
|
||||
done
|
||||
|
||||
|
55
state/errors.go
Normal file
55
state/errors.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
. "github.com/tendermint/go-common"
|
||||
)
|
||||
|
||||
type (
|
||||
ErrInvalidBlock error
|
||||
ErrProxyAppConn error
|
||||
|
||||
ErrUnknownBlock struct {
|
||||
height int
|
||||
}
|
||||
|
||||
ErrBlockHashMismatch struct {
|
||||
coreHash []byte
|
||||
appHash []byte
|
||||
height int
|
||||
}
|
||||
|
||||
ErrAppBlockHeightTooHigh struct {
|
||||
coreHeight int
|
||||
appHeight int
|
||||
}
|
||||
|
||||
ErrLastStateMismatch struct {
|
||||
height int
|
||||
core []byte
|
||||
app []byte
|
||||
}
|
||||
|
||||
ErrStateMismatch struct {
|
||||
got *State
|
||||
expected *State
|
||||
}
|
||||
)
|
||||
|
||||
func (e ErrUnknownBlock) Error() string {
|
||||
return Fmt("Could not find block #%d", e.height)
|
||||
}
|
||||
|
||||
func (e ErrBlockHashMismatch) Error() string {
|
||||
return Fmt("App block hash (%X) does not match core block hash (%X) for height %d", e.appHash, e.coreHash, e.height)
|
||||
}
|
||||
|
||||
func (e ErrAppBlockHeightTooHigh) Error() string {
|
||||
return Fmt("App block height (%d) is higher than core (%d)", e.appHeight, e.coreHeight)
|
||||
}
|
||||
func (e ErrLastStateMismatch) Error() string {
|
||||
return Fmt("Latest tendermint block (%d) LastAppHash (%X) does not match app's AppHash (%X)", e.height, e.core, e.app)
|
||||
}
|
||||
|
||||
func (e ErrStateMismatch) Error() string {
|
||||
return Fmt("State after replay does not match saved state. Got ----\n%v\nExpected ----\n%v\n", e.got, e.expected)
|
||||
}
|
@@ -1,111 +1,205 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/ebuchman/fail-test"
|
||||
|
||||
. "github.com/tendermint/go-common"
|
||||
"github.com/tendermint/go-events"
|
||||
cfg "github.com/tendermint/go-config"
|
||||
"github.com/tendermint/go-crypto"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmsp "github.com/tendermint/tmsp/types"
|
||||
abci "github.com/tendermint/abci/types"
|
||||
)
|
||||
|
||||
// Validate block
|
||||
func (s *State) ValidateBlock(block *types.Block) error {
|
||||
return s.validateBlock(block)
|
||||
}
|
||||
//--------------------------------------------------
|
||||
// Execute the block
|
||||
|
||||
// Execute the block to mutate State.
|
||||
// Validates block and then executes Data.Txs in the block.
|
||||
func (s *State) ExecBlock(eventCache events.Fireable, proxyAppConn proxy.AppConnConsensus, block *types.Block, blockPartsHeader types.PartSetHeader) error {
|
||||
func (s *State) ExecBlock(eventCache types.Fireable, proxyAppConn proxy.AppConnConsensus, block *types.Block, blockPartsHeader types.PartSetHeader) error {
|
||||
|
||||
// Validate the block.
|
||||
err := s.validateBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
if err := s.validateBlock(block); err != nil {
|
||||
return ErrInvalidBlock(err)
|
||||
}
|
||||
|
||||
// Update the validator set
|
||||
// compute bitarray of validators that signed
|
||||
signed := commitBitArrayFromBlock(block)
|
||||
_ = signed // TODO send on begin block
|
||||
|
||||
// copy the valset
|
||||
valSet := s.Validators.Copy()
|
||||
// Update valSet with signatures from block.
|
||||
updateValidatorsWithBlock(s.LastValidators, valSet, block)
|
||||
// TODO: Update the validator set (e.g. block.Data.ValidatorUpdates?)
|
||||
nextValSet := valSet.Copy()
|
||||
|
||||
// Execute the block txs
|
||||
err = s.execBlockOnProxyApp(eventCache, proxyAppConn, block)
|
||||
changedValidators, err := execBlockOnProxyApp(eventCache, proxyAppConn, block)
|
||||
if err != nil {
|
||||
// There was some error in proxyApp
|
||||
// TODO Report error and wait for proxyApp to be available.
|
||||
return err
|
||||
return ErrProxyAppConn(err)
|
||||
}
|
||||
|
||||
// update the validator set
|
||||
err = updateValidators(nextValSet, changedValidators)
|
||||
if err != nil {
|
||||
log.Warn("Error changing validator set", "error", err)
|
||||
// TODO: err or carry on?
|
||||
}
|
||||
|
||||
// All good!
|
||||
// Update validator accums and set state variables
|
||||
nextValSet.IncrementAccum(1)
|
||||
s.LastBlockHeight = block.Height
|
||||
s.LastBlockHash = block.Hash()
|
||||
s.LastBlockParts = blockPartsHeader
|
||||
s.LastBlockTime = block.Time
|
||||
s.Validators = nextValSet
|
||||
s.LastValidators = valSet
|
||||
s.SetBlockAndValidators(block.Header, blockPartsHeader, valSet, nextValSet)
|
||||
|
||||
// save state with updated height/blockhash/validators
|
||||
// but stale apphash, in case we fail between Commit and Save
|
||||
s.SaveIntermediate()
|
||||
|
||||
fail.Fail() // XXX
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Executes block's transactions on proxyAppConn.
|
||||
// Returns a list of updates to the validator set
|
||||
// TODO: Generate a bitmap or otherwise store tx validity in state.
|
||||
func (s *State) execBlockOnProxyApp(eventCache events.Fireable, proxyAppConn proxy.AppConnConsensus, block *types.Block) error {
|
||||
func execBlockOnProxyApp(eventCache types.Fireable, proxyAppConn proxy.AppConnConsensus, block *types.Block) ([]*abci.Validator, error) {
|
||||
|
||||
var validTxs, invalidTxs = 0, 0
|
||||
|
||||
// Execute transactions and get hash
|
||||
proxyCb := func(req *tmsp.Request, res *tmsp.Response) {
|
||||
proxyCb := func(req *abci.Request, res *abci.Response) {
|
||||
switch r := res.Value.(type) {
|
||||
case *tmsp.Response_AppendTx:
|
||||
case *abci.Response_DeliverTx:
|
||||
// TODO: make use of res.Log
|
||||
// TODO: make use of this info
|
||||
// Blocks may include invalid txs.
|
||||
// reqAppendTx := req.(tmsp.RequestAppendTx)
|
||||
if r.AppendTx.Code == tmsp.CodeType_OK {
|
||||
// reqDeliverTx := req.(abci.RequestDeliverTx)
|
||||
txError := ""
|
||||
apTx := r.DeliverTx
|
||||
if apTx.Code == abci.CodeType_OK {
|
||||
validTxs += 1
|
||||
} else {
|
||||
log.Debug("Invalid tx", "code", r.AppendTx.Code, "log", r.AppendTx.Log)
|
||||
log.Debug("Invalid tx", "code", r.DeliverTx.Code, "log", r.DeliverTx.Log)
|
||||
invalidTxs += 1
|
||||
txError = apTx.Code.String()
|
||||
}
|
||||
// NOTE: if we count we can access the tx from the block instead of
|
||||
// pulling it from the req
|
||||
eventCache.FireEvent(types.EventStringTx(req.GetAppendTx().Tx), res)
|
||||
event := types.EventDataTx{
|
||||
Tx: req.GetDeliverTx().Tx,
|
||||
Data: apTx.Data,
|
||||
Code: apTx.Code,
|
||||
Log: apTx.Log,
|
||||
Error: txError,
|
||||
}
|
||||
types.FireEventTx(eventCache, event)
|
||||
}
|
||||
}
|
||||
proxyAppConn.SetResponseCallback(proxyCb)
|
||||
|
||||
// TODO: BeginBlock
|
||||
// Begin block
|
||||
err := proxyAppConn.BeginBlockSync(block.Hash(), types.TM2PB.Header(block.Header))
|
||||
if err != nil {
|
||||
log.Warn("Error in proxyAppConn.BeginBlock", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fail.Fail() // XXX
|
||||
|
||||
// Run txs of block
|
||||
for _, tx := range block.Txs {
|
||||
proxyAppConn.AppendTxAsync(tx)
|
||||
fail.FailRand(len(block.Txs)) // XXX
|
||||
proxyAppConn.DeliverTxAsync(tx)
|
||||
if err := proxyAppConn.Error(); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
fail.Fail() // XXX
|
||||
|
||||
// End block
|
||||
changedValidators, err := proxyAppConn.EndBlockSync(uint64(block.Height))
|
||||
respEndBlock, err := proxyAppConn.EndBlockSync(uint64(block.Height))
|
||||
if err != nil {
|
||||
log.Warn("Error in proxyAppConn.EndBlock", "error", err)
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
// TODO: Do something with changedValidators
|
||||
log.Info("TODO: Do something with changedValidators", changedValidators)
|
||||
|
||||
log.Info(Fmt("ExecBlock got %v valid txs and %v invalid txs", validTxs, invalidTxs))
|
||||
fail.Fail() // XXX
|
||||
|
||||
log.Info("Executed block", "height", block.Height, "valid txs", validTxs, "invalid txs", invalidTxs)
|
||||
if len(respEndBlock.Diffs) > 0 {
|
||||
log.Info("Update to validator set", "updates", abci.ValidatorsString(respEndBlock.Diffs))
|
||||
}
|
||||
return respEndBlock.Diffs, nil
|
||||
}
|
||||
|
||||
func updateValidators(validators *types.ValidatorSet, changedValidators []*abci.Validator) error {
|
||||
// TODO: prevent change of 1/3+ at once
|
||||
|
||||
for _, v := range changedValidators {
|
||||
pubkey, err := crypto.PubKeyFromBytes(v.PubKey) // NOTE: expects go-wire encoded pubkey
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
address := pubkey.Address()
|
||||
power := int64(v.Power)
|
||||
// mind the overflow from uint64
|
||||
if power < 0 {
|
||||
return errors.New(Fmt("Power (%d) overflows int64", v.Power))
|
||||
}
|
||||
|
||||
_, val := validators.GetByAddress(address)
|
||||
if val == nil {
|
||||
// add val
|
||||
added := validators.Add(types.NewValidator(pubkey, power))
|
||||
if !added {
|
||||
return errors.New(Fmt("Failed to add new validator %X with voting power %d", address, power))
|
||||
}
|
||||
} else if v.Power == 0 {
|
||||
// remove val
|
||||
_, removed := validators.Remove(address)
|
||||
if !removed {
|
||||
return errors.New(Fmt("Failed to remove validator %X)"))
|
||||
}
|
||||
} else {
|
||||
// update val
|
||||
val.VotingPower = power
|
||||
updated := validators.Update(val)
|
||||
if !updated {
|
||||
return errors.New(Fmt("Failed to update validator %X with voting power %d", address, power))
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// return a bit array of validators that signed the last commit
|
||||
// NOTE: assumes commits have already been authenticated
|
||||
func commitBitArrayFromBlock(block *types.Block) *BitArray {
|
||||
signed := NewBitArray(len(block.LastCommit.Precommits))
|
||||
for i, precommit := range block.LastCommit.Precommits {
|
||||
if precommit != nil {
|
||||
signed.SetIndex(i, true) // val_.LastCommitHeight = block.Height - 1
|
||||
}
|
||||
}
|
||||
return signed
|
||||
}
|
||||
|
||||
//-----------------------------------------------------
|
||||
// Validate block
|
||||
|
||||
func (s *State) ValidateBlock(block *types.Block) error {
|
||||
return s.validateBlock(block)
|
||||
}
|
||||
|
||||
func (s *State) validateBlock(block *types.Block) error {
|
||||
// Basic block validation.
|
||||
err := block.ValidateBasic(s.ChainID, s.LastBlockHeight, s.LastBlockHash, s.LastBlockParts, s.LastBlockTime, s.AppHash)
|
||||
err := block.ValidateBasic(s.ChainID, s.LastBlockHeight, s.LastBlockID, s.LastBlockTime, s.AppHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -117,11 +211,11 @@ func (s *State) validateBlock(block *types.Block) error {
|
||||
}
|
||||
} else {
|
||||
if len(block.LastCommit.Precommits) != s.LastValidators.Size() {
|
||||
return fmt.Errorf("Invalid block commit size. Expected %v, got %v",
|
||||
s.LastValidators.Size(), len(block.LastCommit.Precommits))
|
||||
return errors.New(Fmt("Invalid block commit size. Expected %v, got %v",
|
||||
s.LastValidators.Size(), len(block.LastCommit.Precommits)))
|
||||
}
|
||||
err := s.LastValidators.VerifyCommit(
|
||||
s.ChainID, s.LastBlockHash, s.LastBlockParts, block.Height-1, block.LastCommit)
|
||||
s.ChainID, s.LastBlockID, block.Height-1, block.LastCommit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -130,41 +224,214 @@ func (s *State) validateBlock(block *types.Block) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Updates the LastCommitHeight of the validators in valSet, in place.
|
||||
// Assumes that lastValSet matches the valset of block.LastCommit
|
||||
// CONTRACT: lastValSet is not mutated.
|
||||
func updateValidatorsWithBlock(lastValSet *types.ValidatorSet, valSet *types.ValidatorSet, block *types.Block) {
|
||||
//-----------------------------------------------------------------------------
|
||||
// ApplyBlock executes the block, then commits and updates the mempool atomically
|
||||
|
||||
for i, precommit := range block.LastCommit.Precommits {
|
||||
if precommit == nil {
|
||||
continue
|
||||
}
|
||||
_, val := lastValSet.GetByIndex(i)
|
||||
if val == nil {
|
||||
PanicCrisis(Fmt("Failed to fetch validator at index %v", i))
|
||||
}
|
||||
if _, val_ := valSet.GetByAddress(val.Address); val_ != nil {
|
||||
val_.LastCommitHeight = block.Height - 1
|
||||
updated := valSet.Update(val_)
|
||||
if !updated {
|
||||
PanicCrisis("Failed to update validator LastCommitHeight")
|
||||
}
|
||||
} else {
|
||||
// XXX This is not an error if validator was removed.
|
||||
// But, we don't mutate validators yet so go ahead and panic.
|
||||
PanicCrisis("Could not find validator")
|
||||
}
|
||||
// Execute and commit block against app, save block and state
|
||||
func (s *State) ApplyBlock(eventCache types.Fireable, proxyAppConn proxy.AppConnConsensus,
|
||||
block *types.Block, partsHeader types.PartSetHeader, mempool Mempool) error {
|
||||
|
||||
// Run the block on the State:
|
||||
// + update validator sets
|
||||
// + run txs on the proxyAppConn
|
||||
err := s.ExecBlock(eventCache, proxyAppConn, block, partsHeader)
|
||||
if err != nil {
|
||||
return errors.New(Fmt("Exec failed for application: %v", err))
|
||||
}
|
||||
|
||||
// lock mempool, commit state, update mempoool
|
||||
err = s.CommitStateUpdateMempool(proxyAppConn, block, mempool)
|
||||
if err != nil {
|
||||
return errors.New(Fmt("Commit failed for application: %v", err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// mempool must be locked during commit and update
|
||||
// because state is typically reset on Commit and old txs must be replayed
|
||||
// against committed state before new txs are run in the mempool, lest they be invalid
|
||||
func (s *State) CommitStateUpdateMempool(proxyAppConn proxy.AppConnConsensus, block *types.Block, mempool Mempool) error {
|
||||
mempool.Lock()
|
||||
defer mempool.Unlock()
|
||||
|
||||
type InvalidTxError struct {
|
||||
Tx types.Tx
|
||||
Code tmsp.CodeType
|
||||
// Commit block, get hash back
|
||||
res := proxyAppConn.CommitSync()
|
||||
if res.IsErr() {
|
||||
log.Warn("Error in proxyAppConn.CommitSync", "error", res)
|
||||
return res
|
||||
}
|
||||
if res.Log != "" {
|
||||
log.Debug("Commit.Log: " + res.Log)
|
||||
}
|
||||
|
||||
// Set the state's new AppHash
|
||||
s.AppHash = res.Data
|
||||
|
||||
// Update mempool.
|
||||
mempool.Update(block.Height, block.Txs)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (txErr InvalidTxError) Error() string {
|
||||
return Fmt("Invalid tx: [%v] code: [%v]", txErr.Tx, txErr.Code)
|
||||
// Updates to the mempool need to be synchronized with committing a block
|
||||
// so apps can reset their transient state on Commit
|
||||
type Mempool interface {
|
||||
Lock()
|
||||
Unlock()
|
||||
Update(height int, txs []types.Tx)
|
||||
}
|
||||
|
||||
type MockMempool struct {
|
||||
}
|
||||
|
||||
func (m MockMempool) Lock() {}
|
||||
func (m MockMempool) Unlock() {}
|
||||
func (m MockMempool) Update(height int, txs []types.Tx) {}
|
||||
|
||||
//----------------------------------------------------------------
|
||||
// Handshake with app to sync to latest state of core by replaying blocks
|
||||
|
||||
// TODO: Should we move blockchain/store.go to its own package?
|
||||
type BlockStore interface {
|
||||
Height() int
|
||||
LoadBlock(height int) *types.Block
|
||||
LoadBlockMeta(height int) *types.BlockMeta
|
||||
}
|
||||
|
||||
type Handshaker struct {
|
||||
config cfg.Config
|
||||
state *State
|
||||
store BlockStore
|
||||
|
||||
nBlocks int // number of blocks applied to the state
|
||||
}
|
||||
|
||||
func NewHandshaker(config cfg.Config, state *State, store BlockStore) *Handshaker {
|
||||
return &Handshaker{config, state, store, 0}
|
||||
}
|
||||
|
||||
// TODO: retry the handshake/replay if it fails ?
|
||||
func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
|
||||
// handshake is done via info request on the query conn
|
||||
res, err := proxyApp.Query().InfoSync()
|
||||
if err != nil {
|
||||
return errors.New(Fmt("Error calling Info: %v", err))
|
||||
}
|
||||
|
||||
blockHeight := int(res.LastBlockHeight) // XXX: beware overflow
|
||||
appHash := res.LastBlockAppHash
|
||||
|
||||
log.Notice("ABCI Handshake", "appHeight", blockHeight, "appHash", appHash)
|
||||
|
||||
// TODO: check version
|
||||
|
||||
// replay blocks up to the latest in the blockstore
|
||||
err = h.ReplayBlocks(appHash, blockHeight, proxyApp.Consensus())
|
||||
if err != nil {
|
||||
return errors.New(Fmt("Error on replay: %v", err))
|
||||
}
|
||||
|
||||
// Save the state
|
||||
h.state.Save()
|
||||
|
||||
// TODO: (on restart) replay mempool
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Replay all blocks after blockHeight and ensure the result matches the current state.
|
||||
func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, appConnConsensus proxy.AppConnConsensus) error {
|
||||
|
||||
storeBlockHeight := h.store.Height()
|
||||
stateBlockHeight := h.state.LastBlockHeight
|
||||
log.Notice("ABCI Replay Blocks", "appHeight", appBlockHeight, "storeHeight", storeBlockHeight, "stateHeight", stateBlockHeight)
|
||||
|
||||
if storeBlockHeight == 0 {
|
||||
return nil
|
||||
} else if storeBlockHeight < appBlockHeight {
|
||||
// if the app is ahead, there's nothing we can do
|
||||
return ErrAppBlockHeightTooHigh{storeBlockHeight, appBlockHeight}
|
||||
|
||||
} else if storeBlockHeight == appBlockHeight {
|
||||
// We ran Commit, but if we crashed before state.Save(),
|
||||
// load the intermediate state and update the state.AppHash.
|
||||
// NOTE: If ABCI allowed rollbacks, we could just replay the
|
||||
// block even though it's been committed
|
||||
stateAppHash := h.state.AppHash
|
||||
lastBlockAppHash := h.store.LoadBlock(storeBlockHeight).AppHash
|
||||
|
||||
if bytes.Equal(stateAppHash, appHash) {
|
||||
// we're all synced up
|
||||
log.Debug("ABCI RelpayBlocks: Already synced")
|
||||
} else if bytes.Equal(stateAppHash, lastBlockAppHash) {
|
||||
// we crashed after commit and before saving state,
|
||||
// so load the intermediate state and update the hash
|
||||
h.state.LoadIntermediate()
|
||||
h.state.AppHash = appHash
|
||||
log.Debug("ABCI RelpayBlocks: Loaded intermediate state and updated state.AppHash")
|
||||
} else {
|
||||
PanicSanity(Fmt("Unexpected state.AppHash: state.AppHash %X; app.AppHash %X, lastBlock.AppHash %X", stateAppHash, appHash, lastBlockAppHash))
|
||||
|
||||
}
|
||||
return nil
|
||||
|
||||
} else if storeBlockHeight == appBlockHeight+1 &&
|
||||
storeBlockHeight == stateBlockHeight+1 {
|
||||
// We crashed after saving the block
|
||||
// but before Commit (both the state and app are behind),
|
||||
// so just replay the block
|
||||
|
||||
// check that the lastBlock.AppHash matches the state apphash
|
||||
block := h.store.LoadBlock(storeBlockHeight)
|
||||
if !bytes.Equal(block.Header.AppHash, appHash) {
|
||||
return ErrLastStateMismatch{storeBlockHeight, block.Header.AppHash, appHash}
|
||||
}
|
||||
|
||||
blockMeta := h.store.LoadBlockMeta(storeBlockHeight)
|
||||
|
||||
h.nBlocks += 1
|
||||
var eventCache types.Fireable // nil
|
||||
|
||||
// replay the latest block
|
||||
return h.state.ApplyBlock(eventCache, appConnConsensus, block, blockMeta.PartsHeader, MockMempool{})
|
||||
} else if storeBlockHeight != stateBlockHeight {
|
||||
// unless we failed before committing or saving state (previous 2 case),
|
||||
// the store and state should be at the same height!
|
||||
PanicSanity(Fmt("Expected storeHeight (%d) and stateHeight (%d) to match.", storeBlockHeight, stateBlockHeight))
|
||||
} else {
|
||||
// store is more than one ahead,
|
||||
// so app wants to replay many blocks
|
||||
|
||||
// replay all blocks starting with appBlockHeight+1
|
||||
var eventCache types.Fireable // nil
|
||||
|
||||
// TODO: use stateBlockHeight instead and let the consensus state
|
||||
// do the replay
|
||||
|
||||
var appHash []byte
|
||||
for i := appBlockHeight + 1; i <= storeBlockHeight; i++ {
|
||||
h.nBlocks += 1
|
||||
block := h.store.LoadBlock(i)
|
||||
_, err := execBlockOnProxyApp(eventCache, appConnConsensus, block)
|
||||
if err != nil {
|
||||
log.Warn("Error executing block on proxy app", "height", i, "err", err)
|
||||
return err
|
||||
}
|
||||
// Commit block, get hash back
|
||||
res := appConnConsensus.CommitSync()
|
||||
if res.IsErr() {
|
||||
log.Warn("Error in proxyAppConn.CommitSync", "error", res)
|
||||
return res
|
||||
}
|
||||
if res.Log != "" {
|
||||
log.Info("Commit.Log: " + res.Log)
|
||||
}
|
||||
appHash = res.Data
|
||||
}
|
||||
if !bytes.Equal(h.state.AppHash, appHash) {
|
||||
return errors.New(Fmt("Tendermint state.AppHash does not match AppHash after replay. Got %X, expected %X", appHash, h.state.AppHash))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
210
state/execution_test.go
Normal file
210
state/execution_test.go
Normal file
@@ -0,0 +1,210 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/tendermint/tendermint/config/tendermint_test"
|
||||
// . "github.com/tendermint/go-common"
|
||||
cfg "github.com/tendermint/go-config"
|
||||
"github.com/tendermint/go-crypto"
|
||||
dbm "github.com/tendermint/go-db"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/abci/example/dummy"
|
||||
)
|
||||
|
||||
var (
|
||||
privKey = crypto.GenPrivKeyEd25519FromSecret([]byte("handshake_test"))
|
||||
chainID = "handshake_chain"
|
||||
nBlocks = 5
|
||||
mempool = MockMempool{}
|
||||
testPartSize = 65536
|
||||
)
|
||||
|
||||
//---------------------------------------
|
||||
// Test block execution
|
||||
|
||||
func TestExecBlock(t *testing.T) {
|
||||
// TODO
|
||||
}
|
||||
|
||||
//---------------------------------------
|
||||
// Test handshake/replay
|
||||
|
||||
// Sync from scratch
|
||||
func TestHandshakeReplayAll(t *testing.T) {
|
||||
testHandshakeReplay(t, 0)
|
||||
}
|
||||
|
||||
// Sync many, not from scratch
|
||||
func TestHandshakeReplaySome(t *testing.T) {
|
||||
testHandshakeReplay(t, 1)
|
||||
}
|
||||
|
||||
// Sync from lagging by one
|
||||
func TestHandshakeReplayOne(t *testing.T) {
|
||||
testHandshakeReplay(t, nBlocks-1)
|
||||
}
|
||||
|
||||
// Sync from caught up
|
||||
func TestHandshakeReplayNone(t *testing.T) {
|
||||
testHandshakeReplay(t, nBlocks)
|
||||
}
|
||||
|
||||
// Make some blocks. Start a fresh app and apply n blocks. Then restart the app and sync it up with the remaining blocks
|
||||
func testHandshakeReplay(t *testing.T, n int) {
|
||||
config := tendermint_test.ResetConfig("proxy_test_")
|
||||
|
||||
state, store := stateAndStore(config)
|
||||
clientCreator := proxy.NewLocalClientCreator(dummy.NewPersistentDummyApplication(path.Join(config.GetString("db_dir"), "1")))
|
||||
clientCreator2 := proxy.NewLocalClientCreator(dummy.NewPersistentDummyApplication(path.Join(config.GetString("db_dir"), "2")))
|
||||
proxyApp := proxy.NewAppConns(config, clientCreator, NewHandshaker(config, state, store))
|
||||
if _, err := proxyApp.Start(); err != nil {
|
||||
t.Fatalf("Error starting proxy app connections: %v", err)
|
||||
}
|
||||
chain := makeBlockchain(t, proxyApp, state)
|
||||
store.chain = chain //
|
||||
latestAppHash := state.AppHash
|
||||
proxyApp.Stop()
|
||||
|
||||
if n > 0 {
|
||||
// start a new app without handshake, play n blocks
|
||||
proxyApp = proxy.NewAppConns(config, clientCreator2, nil)
|
||||
if _, err := proxyApp.Start(); err != nil {
|
||||
t.Fatalf("Error starting proxy app connections: %v", err)
|
||||
}
|
||||
state2, _ := stateAndStore(config)
|
||||
for i := 0; i < n; i++ {
|
||||
block := chain[i]
|
||||
err := state2.ApplyBlock(nil, proxyApp.Consensus(), block, block.MakePartSet(testPartSize).Header(), mempool)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
proxyApp.Stop()
|
||||
}
|
||||
|
||||
// now start it with the handshake
|
||||
handshaker := NewHandshaker(config, state, store)
|
||||
proxyApp = proxy.NewAppConns(config, clientCreator2, handshaker)
|
||||
if _, err := proxyApp.Start(); err != nil {
|
||||
t.Fatalf("Error starting proxy app connections: %v", err)
|
||||
}
|
||||
|
||||
// get the latest app hash from the app
|
||||
res, err := proxyApp.Query().InfoSync()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// the app hash should be synced up
|
||||
if !bytes.Equal(latestAppHash, res.LastBlockAppHash) {
|
||||
t.Fatalf("Expected app hashes to match after handshake/replay. got %X, expected %X", res.LastBlockAppHash, latestAppHash)
|
||||
}
|
||||
|
||||
if handshaker.nBlocks != nBlocks-n {
|
||||
t.Fatalf("Expected handshake to sync %d blocks, got %d", nBlocks-n, handshaker.nBlocks)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//--------------------------
|
||||
// utils for making blocks
|
||||
|
||||
// make some bogus txs
|
||||
func txsFunc(blockNum int) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(blockNum), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
// sign a commit vote
|
||||
func signCommit(height, round int, hash []byte, header types.PartSetHeader) *types.Vote {
|
||||
vote := &types.Vote{
|
||||
ValidatorIndex: 0,
|
||||
ValidatorAddress: privKey.PubKey().Address(),
|
||||
Height: height,
|
||||
Round: round,
|
||||
Type: types.VoteTypePrecommit,
|
||||
BlockID: types.BlockID{hash, header},
|
||||
}
|
||||
|
||||
sig := privKey.Sign(types.SignBytes(chainID, vote))
|
||||
vote.Signature = sig
|
||||
return vote
|
||||
}
|
||||
|
||||
// make a blockchain with one validator
|
||||
func makeBlockchain(t *testing.T, proxyApp proxy.AppConns, state *State) (blockchain []*types.Block) {
|
||||
|
||||
prevHash := state.LastBlockID.Hash
|
||||
lastCommit := new(types.Commit)
|
||||
prevParts := types.PartSetHeader{}
|
||||
valHash := state.Validators.Hash()
|
||||
prevBlockID := types.BlockID{prevHash, prevParts}
|
||||
|
||||
for i := 1; i < nBlocks+1; i++ {
|
||||
block, parts := types.MakeBlock(i, chainID, txsFunc(i), lastCommit,
|
||||
prevBlockID, valHash, state.AppHash, testPartSize)
|
||||
fmt.Println(i)
|
||||
fmt.Println(prevBlockID)
|
||||
fmt.Println(block.LastBlockID)
|
||||
err := state.ApplyBlock(nil, proxyApp.Consensus(), block, block.MakePartSet(testPartSize).Header(), mempool)
|
||||
if err != nil {
|
||||
t.Fatal(i, err)
|
||||
}
|
||||
|
||||
voteSet := types.NewVoteSet(chainID, i, 0, types.VoteTypePrecommit, state.Validators)
|
||||
vote := signCommit(i, 0, block.Hash(), parts.Header())
|
||||
_, err = voteSet.AddVote(vote)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
blockchain = append(blockchain, block)
|
||||
prevHash = block.Hash()
|
||||
prevParts = parts.Header()
|
||||
lastCommit = voteSet.MakeCommit()
|
||||
prevBlockID = types.BlockID{prevHash, prevParts}
|
||||
}
|
||||
return blockchain
|
||||
}
|
||||
|
||||
// fresh state and mock store
|
||||
func stateAndStore(config cfg.Config) (*State, *mockBlockStore) {
|
||||
stateDB := dbm.NewMemDB()
|
||||
return MakeGenesisState(stateDB, &types.GenesisDoc{
|
||||
ChainID: chainID,
|
||||
Validators: []types.GenesisValidator{
|
||||
types.GenesisValidator{privKey.PubKey(), 10000, "test"},
|
||||
},
|
||||
AppHash: nil,
|
||||
}), NewMockBlockStore(config, nil)
|
||||
}
|
||||
|
||||
//----------------------------------
|
||||
// mock block store
|
||||
|
||||
type mockBlockStore struct {
|
||||
config cfg.Config
|
||||
chain []*types.Block
|
||||
}
|
||||
|
||||
func NewMockBlockStore(config cfg.Config, chain []*types.Block) *mockBlockStore {
|
||||
return &mockBlockStore{config, chain}
|
||||
}
|
||||
|
||||
func (bs *mockBlockStore) Height() int { return len(bs.chain) }
|
||||
func (bs *mockBlockStore) LoadBlock(height int) *types.Block { return bs.chain[height-1] }
|
||||
func (bs *mockBlockStore) LoadBlockMeta(height int) *types.BlockMeta {
|
||||
block := bs.chain[height-1]
|
||||
return &types.BlockMeta{
|
||||
Hash: block.Hash(),
|
||||
Header: block.Header,
|
||||
PartsHeader: block.MakePartSet(bs.config.GetInt("block_part_size")).Header(),
|
||||
}
|
||||
}
|
110
state/state.go
110
state/state.go
@@ -7,35 +7,47 @@ import (
|
||||
"time"
|
||||
|
||||
. "github.com/tendermint/go-common"
|
||||
cfg "github.com/tendermint/go-config"
|
||||
dbm "github.com/tendermint/go-db"
|
||||
"github.com/tendermint/go-wire"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var (
|
||||
stateKey = []byte("stateKey")
|
||||
stateKey = []byte("stateKey")
|
||||
stateIntermediateKey = []byte("stateIntermediateKey")
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
// NOTE: not goroutine-safe.
|
||||
type State struct {
|
||||
mtx sync.Mutex
|
||||
db dbm.DB
|
||||
GenesisDoc *types.GenesisDoc
|
||||
ChainID string
|
||||
// mtx for writing to db
|
||||
mtx sync.Mutex
|
||||
db dbm.DB
|
||||
|
||||
// should not change
|
||||
GenesisDoc *types.GenesisDoc
|
||||
ChainID string
|
||||
|
||||
// updated at end of ExecBlock
|
||||
LastBlockHeight int // Genesis state has this set to 0. So, Block(H=0) does not exist.
|
||||
LastBlockHash []byte
|
||||
LastBlockParts types.PartSetHeader
|
||||
LastBlockID types.BlockID
|
||||
LastBlockTime time.Time
|
||||
Validators *types.ValidatorSet
|
||||
LastValidators *types.ValidatorSet
|
||||
AppHash []byte
|
||||
LastValidators *types.ValidatorSet // block.LastCommit validated against this
|
||||
|
||||
// AppHash is updated after Commit
|
||||
AppHash []byte
|
||||
}
|
||||
|
||||
func LoadState(db dbm.DB) *State {
|
||||
return loadState(db, stateKey)
|
||||
}
|
||||
|
||||
func loadState(db dbm.DB, key []byte) *State {
|
||||
s := &State{db: db}
|
||||
buf := db.Get(stateKey)
|
||||
buf := db.Get(key)
|
||||
if len(buf) == 0 {
|
||||
return nil
|
||||
} else {
|
||||
@@ -56,8 +68,7 @@ func (s *State) Copy() *State {
|
||||
GenesisDoc: s.GenesisDoc,
|
||||
ChainID: s.ChainID,
|
||||
LastBlockHeight: s.LastBlockHeight,
|
||||
LastBlockHash: s.LastBlockHash,
|
||||
LastBlockParts: s.LastBlockParts,
|
||||
LastBlockID: s.LastBlockID,
|
||||
LastBlockTime: s.LastBlockTime,
|
||||
Validators: s.Validators.Copy(),
|
||||
LastValidators: s.LastValidators.Copy(),
|
||||
@@ -68,13 +79,83 @@ func (s *State) Copy() *State {
|
||||
func (s *State) Save() {
|
||||
s.mtx.Lock()
|
||||
defer s.mtx.Unlock()
|
||||
s.db.SetSync(stateKey, s.Bytes())
|
||||
}
|
||||
|
||||
func (s *State) SaveIntermediate() {
|
||||
s.mtx.Lock()
|
||||
defer s.mtx.Unlock()
|
||||
s.db.SetSync(stateIntermediateKey, s.Bytes())
|
||||
}
|
||||
|
||||
// Load the intermediate state into the current state
|
||||
// and do some sanity checks
|
||||
func (s *State) LoadIntermediate() {
|
||||
s2 := loadState(s.db, stateIntermediateKey)
|
||||
if s.ChainID != s2.ChainID {
|
||||
PanicSanity(Fmt("State mismatch for ChainID. Got %v, Expected %v", s2.ChainID, s.ChainID))
|
||||
}
|
||||
|
||||
if s.LastBlockHeight+1 != s2.LastBlockHeight {
|
||||
PanicSanity(Fmt("State mismatch for LastBlockHeight. Got %v, Expected %v", s2.LastBlockHeight, s.LastBlockHeight+1))
|
||||
}
|
||||
|
||||
if !bytes.Equal(s.Validators.Hash(), s2.LastValidators.Hash()) {
|
||||
PanicSanity(Fmt("State mismatch for LastValidators. Got %X, Expected %X", s2.LastValidators.Hash(), s.Validators.Hash()))
|
||||
}
|
||||
|
||||
if !bytes.Equal(s.AppHash, s2.AppHash) {
|
||||
PanicSanity(Fmt("State mismatch for AppHash. Got %X, Expected %X", s2.AppHash, s.AppHash))
|
||||
}
|
||||
|
||||
s.setBlockAndValidators(s2.LastBlockHeight, s2.LastBlockID, s2.LastBlockTime, s2.Validators.Copy(), s2.LastValidators.Copy())
|
||||
}
|
||||
|
||||
func (s *State) Equals(s2 *State) bool {
|
||||
return bytes.Equal(s.Bytes(), s2.Bytes())
|
||||
}
|
||||
|
||||
func (s *State) Bytes() []byte {
|
||||
buf, n, err := new(bytes.Buffer), new(int), new(error)
|
||||
wire.WriteBinary(s, buf, n, err)
|
||||
if *err != nil {
|
||||
PanicCrisis(*err)
|
||||
}
|
||||
s.db.Set(stateKey, buf.Bytes())
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// Mutate state variables to match block and validators
|
||||
// after running EndBlock
|
||||
func (s *State) SetBlockAndValidators(header *types.Header, blockPartsHeader types.PartSetHeader, prevValSet, nextValSet *types.ValidatorSet) {
|
||||
s.setBlockAndValidators(header.Height,
|
||||
types.BlockID{header.Hash(), blockPartsHeader}, header.Time,
|
||||
prevValSet, nextValSet)
|
||||
}
|
||||
|
||||
func (s *State) setBlockAndValidators(
|
||||
height int, blockID types.BlockID, blockTime time.Time,
|
||||
prevValSet, nextValSet *types.ValidatorSet) {
|
||||
|
||||
s.LastBlockHeight = height
|
||||
s.LastBlockID = blockID
|
||||
s.LastBlockTime = blockTime
|
||||
s.Validators = nextValSet
|
||||
s.LastValidators = prevValSet
|
||||
}
|
||||
|
||||
func (s *State) GetValidators() (*types.ValidatorSet, *types.ValidatorSet) {
|
||||
return s.LastValidators, s.Validators
|
||||
}
|
||||
|
||||
// Load the most recent state from "state" db,
|
||||
// or create a new one (and save) from genesis.
|
||||
func GetState(config cfg.Config, stateDB dbm.DB) *State {
|
||||
state := LoadState(stateDB)
|
||||
if state == nil {
|
||||
state = MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
|
||||
state.Save()
|
||||
}
|
||||
return state
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
@@ -117,8 +198,7 @@ func MakeGenesisState(db dbm.DB, genDoc *types.GenesisDoc) *State {
|
||||
GenesisDoc: genDoc,
|
||||
ChainID: genDoc.ChainID,
|
||||
LastBlockHeight: 0,
|
||||
LastBlockHash: nil,
|
||||
LastBlockParts: types.PartSetHeader{},
|
||||
LastBlockID: types.BlockID{},
|
||||
LastBlockTime: genDoc.GenesisTime,
|
||||
Validators: types.NewValidatorSet(validators),
|
||||
LastValidators: types.NewValidatorSet(nil),
|
||||
|
42
state/state_test.go
Normal file
42
state/state_test.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
dbm "github.com/tendermint/go-db"
|
||||
"github.com/tendermint/tendermint/config/tendermint_test"
|
||||
)
|
||||
|
||||
func TestStateCopyEquals(t *testing.T) {
|
||||
config := tendermint_test.ResetConfig("state_")
|
||||
// Get State db
|
||||
stateDB := dbm.NewDB("state", config.GetString("db_backend"), config.GetString("db_dir"))
|
||||
state := GetState(config, stateDB)
|
||||
|
||||
stateCopy := state.Copy()
|
||||
|
||||
if !state.Equals(stateCopy) {
|
||||
t.Fatal("expected state and its copy to be identical. got %v\n expected %v\n", stateCopy, state)
|
||||
}
|
||||
|
||||
stateCopy.LastBlockHeight += 1
|
||||
|
||||
if state.Equals(stateCopy) {
|
||||
t.Fatal("expected states to be different. got same %v", state)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateSaveLoad(t *testing.T) {
|
||||
config := tendermint_test.ResetConfig("state_")
|
||||
// Get State db
|
||||
stateDB := dbm.NewDB("state", config.GetString("db_backend"), config.GetString("db_dir"))
|
||||
state := GetState(config, stateDB)
|
||||
|
||||
state.LastBlockHeight += 1
|
||||
state.Save()
|
||||
|
||||
loadedState := LoadState(stateDB)
|
||||
if !state.Equals(loadedState) {
|
||||
t.Fatal("expected state and its copy to be identical. got %v\n expected %v\n", loadedState, state)
|
||||
}
|
||||
}
|
@@ -1,3 +1,4 @@
|
||||
killall tendermint
|
||||
killall dummy
|
||||
killall counter
|
||||
rm -rf ~/.tendermint_app
|
||||
|
@@ -1,5 +1,11 @@
|
||||
#! /bin/bash
|
||||
|
||||
if [[ "$GRPC_BROADCAST_TX" == "" ]]; then
|
||||
GRPC_BROADCAST_TX=""
|
||||
fi
|
||||
|
||||
set -u
|
||||
|
||||
#####################
|
||||
# counter over socket
|
||||
#####################
|
||||
@@ -7,62 +13,105 @@ TESTNAME=$1
|
||||
|
||||
# Send some txs
|
||||
|
||||
function getCode() {
|
||||
R=$1
|
||||
if [[ "$R" == "{}" ]]; then
|
||||
# protobuf auto adds `omitempty` to everything so code OK and empty data/log
|
||||
# will not even show when marshalled into json
|
||||
# apparently we can use github.com/golang/protobuf/jsonpb to do the marshalling ...
|
||||
echo 0
|
||||
else
|
||||
# this wont actually work if theres an error ...
|
||||
echo "$R" | jq .code
|
||||
fi
|
||||
}
|
||||
|
||||
function sendTx() {
|
||||
TX=$1
|
||||
RESPONSE=`curl -s localhost:46657/broadcast_tx_commit?tx=\"$TX\"`
|
||||
CODE=`echo $RESPONSE | jq .result[1].code`
|
||||
ERROR=`echo $RESPONSE | jq .error`
|
||||
ERROR=$(echo "$ERROR" | tr -d '"') # remove surrounding quotes
|
||||
if [[ "$GRPC_BROADCAST_TX" == "" ]]; then
|
||||
RESPONSE=`curl -s localhost:46657/broadcast_tx_commit?tx=0x$TX`
|
||||
ERROR=`echo $RESPONSE | jq .error`
|
||||
ERROR=$(echo "$ERROR" | tr -d '"') # remove surrounding quotes
|
||||
|
||||
RESPONSE=`echo $RESPONSE | jq .result[1]`
|
||||
else
|
||||
if [ -f grpc_client ]; then
|
||||
rm grpc_client
|
||||
fi
|
||||
echo "... building grpc_client"
|
||||
go build -o grpc_client grpc_client.go
|
||||
RESPONSE=`./grpc_client $TX`
|
||||
ERROR=""
|
||||
fi
|
||||
|
||||
echo "RESPONSE"
|
||||
echo $RESPONSE
|
||||
|
||||
echo $RESPONSE | jq . &> /dev/null
|
||||
IS_JSON=$?
|
||||
if [[ "$IS_JSON" != "0" ]]; then
|
||||
ERROR="$RESPONSE"
|
||||
fi
|
||||
APPEND_TX_RESPONSE=`echo $RESPONSE | jq .deliver_tx`
|
||||
APPEND_TX_CODE=`getCode "$APPEND_TX_RESPONSE"`
|
||||
CHECK_TX_RESPONSE=`echo $RESPONSE | jq .check_tx`
|
||||
CHECK_TX_CODE=`getCode "$CHECK_TX_RESPONSE"`
|
||||
|
||||
echo "-------"
|
||||
echo "TX $TX"
|
||||
echo "RESPONSE $RESPONSE"
|
||||
echo "ERROR $ERROR"
|
||||
echo "----"
|
||||
|
||||
if [[ "$ERROR" != "" ]]; then
|
||||
echo "Unexpected error sending tx ($TX): $ERROR"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
echo "... sending tx. expect no error"
|
||||
|
||||
# 0 should pass once and get in block, with no error
|
||||
TX=00
|
||||
sendTx $TX
|
||||
if [[ $CODE != 0 ]]; then
|
||||
if [[ $APPEND_TX_CODE != 0 ]]; then
|
||||
echo "Got non-zero exit code for $TX. $RESPONSE"
|
||||
exit 1
|
||||
fi
|
||||
if [[ "$ERROR" != "" ]]; then
|
||||
echo "Unexpected error. Tx $TX should have been included in a block. $ERROR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
echo "... sending tx. expect error"
|
||||
|
||||
# second time should get rejected by the mempool (return error and non-zero code)
|
||||
sendTx $TX
|
||||
if [[ $CODE == 0 ]]; then
|
||||
echo "CHECKTX CODE: $CHECK_TX_CODE"
|
||||
if [[ "$CHECK_TX_CODE" == 0 ]]; then
|
||||
echo "Got zero exit code for $TX. Expected tx to be rejected by mempool. $RESPONSE"
|
||||
exit 1
|
||||
fi
|
||||
if [[ "$ERROR" == "" ]]; then
|
||||
echo "Expected to get an error - tx $TX should have been rejected from mempool"
|
||||
echo "$RESPONSE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
echo "... sending tx. expect no error"
|
||||
|
||||
# now, TX=01 should pass, with no error
|
||||
TX=01
|
||||
sendTx $TX
|
||||
if [[ $CODE != 0 ]]; then
|
||||
if [[ $APPEND_TX_CODE != 0 ]]; then
|
||||
echo "Got non-zero exit code for $TX. $RESPONSE"
|
||||
exit 1
|
||||
fi
|
||||
if [[ "$ERROR" != "" ]]; then
|
||||
echo "Unexpected error. Tx $TX should have been accepted in block. $ERROR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "... sending tx. expect no error, but invalid"
|
||||
|
||||
# now, TX=03 should get in a block (passes CheckTx, no error), but is invalid
|
||||
TX=03
|
||||
sendTx $TX
|
||||
if [[ $CODE == 0 ]]; then
|
||||
echo "Got zero exit code for $TX. Should have been bad nonce. $RESPONSE"
|
||||
if [[ "$CHECK_TX_CODE" != 0 ]]; then
|
||||
echo "Got non-zero exit code for checktx on $TX. $RESPONSE"
|
||||
exit 1
|
||||
fi
|
||||
if [[ "$ERROR" != "" ]]; then
|
||||
echo "Unexpected error. Tx $TX should have been included in a block. $ERROR"
|
||||
if [[ $APPEND_TX_CODE == 0 ]]; then
|
||||
echo "Got zero exit code for $TX. Should have been bad nonce. $RESPONSE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
@@ -2,7 +2,8 @@
|
||||
set -e
|
||||
|
||||
function toHex() {
|
||||
echo -n $1 | hexdump -ve '1/1 "%.2X"'
|
||||
echo -n $1 | hexdump -ve '1/1 "%.2X"' | awk '{print "0x" $0}'
|
||||
|
||||
}
|
||||
|
||||
#####################
|
||||
@@ -13,20 +14,23 @@ TESTNAME=$1
|
||||
# store key value pair
|
||||
KEY="abcd"
|
||||
VALUE="dcba"
|
||||
curl -s 127.0.0.1:46657/broadcast_tx_commit?tx=\"$(toHex $KEY=$VALUE)\"
|
||||
echo $(toHex $KEY=$VALUE)
|
||||
curl -s 127.0.0.1:46657/broadcast_tx_commit?tx=$(toHex $KEY=$VALUE)
|
||||
echo $?
|
||||
echo ""
|
||||
|
||||
|
||||
###########################
|
||||
# test using the tmsp-cli
|
||||
# test using the abci-cli
|
||||
###########################
|
||||
|
||||
echo "... testing query with abci-cli"
|
||||
|
||||
# we should be able to look up the key
|
||||
RESPONSE=`tmsp-cli query $KEY`
|
||||
RESPONSE=`abci-cli query \"$KEY\"`
|
||||
|
||||
set +e
|
||||
A=`echo $RESPONSE | grep exists=true`
|
||||
A=`echo $RESPONSE | grep '"exists":true'`
|
||||
if [[ $? != 0 ]]; then
|
||||
echo "Failed to find 'exists=true' for $KEY. Response:"
|
||||
echo "$RESPONSE"
|
||||
@@ -35,9 +39,9 @@ fi
|
||||
set -e
|
||||
|
||||
# we should not be able to look up the value
|
||||
RESPONSE=`tmsp-cli query $VALUE`
|
||||
RESPONSE=`abci-cli query \"$VALUE\"`
|
||||
set +e
|
||||
A=`echo $RESPONSE | grep exists=true`
|
||||
A=`echo $RESPONSE | grep '"exists":true'`
|
||||
if [[ $? == 0 ]]; then
|
||||
echo "Found 'exists=true' for $VALUE when we should not have. Response:"
|
||||
echo "$RESPONSE"
|
||||
@@ -46,15 +50,17 @@ fi
|
||||
set -e
|
||||
|
||||
#############################
|
||||
# test using the /tmsp_query
|
||||
# test using the /abci_query
|
||||
#############################
|
||||
|
||||
echo "... testing query with /abci_query"
|
||||
|
||||
# we should be able to look up the key
|
||||
RESPONSE=`curl -s 127.0.0.1:46657/tmsp_query?query=\"$(toHex $KEY)\"`
|
||||
RESPONSE=`curl -s 127.0.0.1:46657/abci_query?query=$(toHex $KEY)`
|
||||
RESPONSE=`echo $RESPONSE | jq .result[1].result.Data | xxd -r -p`
|
||||
|
||||
set +e
|
||||
A=`echo $RESPONSE | grep exists=true`
|
||||
A=`echo $RESPONSE | grep '"exists":true'`
|
||||
if [[ $? != 0 ]]; then
|
||||
echo "Failed to find 'exists=true' for $KEY. Response:"
|
||||
echo "$RESPONSE"
|
||||
@@ -63,10 +69,10 @@ fi
|
||||
set -e
|
||||
|
||||
# we should not be able to look up the value
|
||||
RESPONSE=`curl -s 127.0.0.1:46657/tmsp_query?query=\"$(toHex $VALUE)\"`
|
||||
RESPONSE=`curl -s 127.0.0.1:46657/abci_query?query=\"$(toHex $VALUE)\"`
|
||||
RESPONSE=`echo $RESPONSE | jq .result[1].result.Data | xxd -r -p`
|
||||
set +e
|
||||
A=`echo $RESPONSE | grep exists=true`
|
||||
A=`echo $RESPONSE | grep '"exists":true'`
|
||||
if [[ $? == 0 ]]; then
|
||||
echo "Found 'exists=true' for $VALUE when we should not have. Response:"
|
||||
echo "$RESPONSE"
|
||||
|
36
test/app/grpc_client.go
Normal file
36
test/app/grpc_client.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/tendermint/go-wire"
|
||||
"github.com/tendermint/tendermint/rpc/grpc"
|
||||
)
|
||||
|
||||
var grpcAddr = "tcp://localhost:36656"
|
||||
|
||||
func main() {
|
||||
args := os.Args
|
||||
if len(args) == 1 {
|
||||
fmt.Println("Must enter a transaction to send (hex)")
|
||||
os.Exit(1)
|
||||
}
|
||||
tx := args[1]
|
||||
txBytes, err := hex.DecodeString(tx)
|
||||
if err != nil {
|
||||
fmt.Println("Invalid hex", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
clientGRPC := core_grpc.StartGRPCClient(grpcAddr)
|
||||
res, err := clientGRPC.BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{txBytes})
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println(string(wire.JSONBytes(res)))
|
||||
}
|
@@ -13,7 +13,7 @@ export TMROOT=$HOME/.tendermint_app
|
||||
function dummy_over_socket(){
|
||||
rm -rf $TMROOT
|
||||
tendermint init
|
||||
echo "Starting dummy and tendermint"
|
||||
echo "Starting dummy_over_socket"
|
||||
dummy > /dev/null &
|
||||
pid_dummy=$!
|
||||
tendermint node > tendermint.log &
|
||||
@@ -30,7 +30,7 @@ function dummy_over_socket(){
|
||||
function dummy_over_socket_reorder(){
|
||||
rm -rf $TMROOT
|
||||
tendermint init
|
||||
echo "Starting tendermint and dummy"
|
||||
echo "Starting dummy_over_socket_reorder (ie. start tendermint first)"
|
||||
tendermint node > tendermint.log &
|
||||
pid_tendermint=$!
|
||||
sleep 2
|
||||
@@ -48,7 +48,7 @@ function dummy_over_socket_reorder(){
|
||||
function counter_over_socket() {
|
||||
rm -rf $TMROOT
|
||||
tendermint init
|
||||
echo "Starting counter and tendermint"
|
||||
echo "Starting counter_over_socket"
|
||||
counter --serial > /dev/null &
|
||||
pid_counter=$!
|
||||
tendermint node > tendermint.log &
|
||||
@@ -64,10 +64,10 @@ function counter_over_socket() {
|
||||
function counter_over_grpc() {
|
||||
rm -rf $TMROOT
|
||||
tendermint init
|
||||
echo "Starting counter and tendermint"
|
||||
counter --serial --tmsp grpc > /dev/null &
|
||||
echo "Starting counter_over_grpc"
|
||||
counter --serial --abci grpc > /dev/null &
|
||||
pid_counter=$!
|
||||
tendermint node --tmsp grpc > tendermint.log &
|
||||
tendermint node --abci grpc > tendermint.log &
|
||||
pid_tendermint=$!
|
||||
sleep 5
|
||||
|
||||
@@ -77,6 +77,24 @@ function counter_over_grpc() {
|
||||
kill -9 $pid_counter $pid_tendermint
|
||||
}
|
||||
|
||||
function counter_over_grpc_grpc() {
|
||||
rm -rf $TMROOT
|
||||
tendermint init
|
||||
echo "Starting counter_over_grpc_grpc (ie. with grpc broadcast_tx)"
|
||||
counter --serial --abci grpc > /dev/null &
|
||||
pid_counter=$!
|
||||
sleep 1
|
||||
GRPC_PORT=36656
|
||||
tendermint node --abci grpc --grpc_laddr tcp://localhost:$GRPC_PORT > tendermint.log &
|
||||
pid_tendermint=$!
|
||||
sleep 5
|
||||
|
||||
echo "running test"
|
||||
GRPC_BROADCAST_TX=true bash counter_test.sh "Counter over GRPC via GRPC BroadcastTx"
|
||||
|
||||
kill -9 $pid_counter $pid_tendermint
|
||||
}
|
||||
|
||||
cd $GOPATH/src/github.com/tendermint/tendermint/test/app
|
||||
|
||||
case "$1" in
|
||||
@@ -92,6 +110,9 @@ case "$1" in
|
||||
"counter_over_grpc")
|
||||
counter_over_grpc
|
||||
;;
|
||||
"counter_over_grpc_grpc")
|
||||
counter_over_grpc_grpc
|
||||
;;
|
||||
*)
|
||||
echo "Running all"
|
||||
dummy_over_socket
|
||||
@@ -101,5 +122,7 @@ case "$1" in
|
||||
counter_over_socket
|
||||
echo ""
|
||||
counter_over_grpc
|
||||
echo ""
|
||||
counter_over_grpc_grpc
|
||||
esac
|
||||
|
||||
|
@@ -6,6 +6,8 @@ RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
jq bsdmainutils vim-common psmisc
|
||||
|
||||
# Setup tendermint repo with vendored dependencies
|
||||
# but without code - docker caching prevents reinstall on code change!
|
||||
ENV REPO $GOPATH/src/github.com/tendermint/tendermint
|
||||
WORKDIR $REPO
|
||||
ADD glide.yaml glide.yaml
|
||||
@@ -13,10 +15,14 @@ ADD glide.lock glide.lock
|
||||
ADD Makefile Makefile
|
||||
RUN make get_vendor_deps
|
||||
|
||||
# Now copy in the code
|
||||
COPY . $REPO
|
||||
|
||||
RUN go install ./cmd/tendermint
|
||||
RUN bash scripts/install_tmsp_apps.sh
|
||||
RUN bash scripts/install_abci_apps.sh
|
||||
|
||||
# expose the volume for debugging
|
||||
VOLUME $REPO
|
||||
|
||||
EXPOSE 46656
|
||||
EXPOSE 46657
|
||||
|
26
test/net/setup.sh
Normal file
26
test/net/setup.sh
Normal file
@@ -0,0 +1,26 @@
|
||||
#! /bin/bash
|
||||
set -eu
|
||||
|
||||
# grab glide for dependency mgmt
|
||||
go get github.com/Masterminds/glide
|
||||
|
||||
# grab network monitor, install mintnet, netmon
|
||||
# these might err
|
||||
echo "... fetching repos. ignore go get errors"
|
||||
set +e
|
||||
go get github.com/tendermint/network_testing
|
||||
go get github.com/tendermint/mintnet
|
||||
go get github.com/tendermint/netmon
|
||||
set -e
|
||||
|
||||
# install vendored deps
|
||||
echo "GOPATH $GOPATH"
|
||||
|
||||
cd $GOPATH/src/github.com/tendermint/mintnet
|
||||
echo "... install mintnet dir $(pwd)"
|
||||
glide install
|
||||
go install
|
||||
cd $GOPATH/src/github.com/tendermint/netmon
|
||||
echo "... install netmon dir $(pwd)"
|
||||
glide install
|
||||
go install
|
34
test/net/start.sh
Normal file
34
test/net/start.sh
Normal file
@@ -0,0 +1,34 @@
|
||||
#! /bin/bash
|
||||
set -eu
|
||||
|
||||
# start a testnet and benchmark throughput using mintnet+netmon via the network_testing repo
|
||||
|
||||
DATACENTER=single
|
||||
VALSETSIZE=4
|
||||
BLOCKSIZE=8092
|
||||
TX_SIZE=200
|
||||
NTXS=$((BLOCKSIZE*4))
|
||||
RESULTSDIR=results
|
||||
CLOUD_PROVIDER=digitalocean
|
||||
|
||||
set +u
|
||||
if [[ "$MACH_PREFIX" == "" ]]; then
|
||||
MACH_PREFIX=mach
|
||||
fi
|
||||
set -u
|
||||
|
||||
export TMHEAD=`git rev-parse --abbrev-ref HEAD`
|
||||
export TM_IMAGE="tendermint/tmbase"
|
||||
|
||||
cd $GOPATH/src/github.com/tendermint/network_testing
|
||||
echo "... running network test $(pwd)"
|
||||
bash experiments/exp_throughput.sh $DATACENTER $VALSETSIZE $BLOCKSIZE $TX_SIZE $NTXS $MACH_PREFIX $RESULTSDIR $CLOUD_PROVIDER
|
||||
|
||||
# TODO: publish result!
|
||||
|
||||
# cleanup
|
||||
|
||||
echo "... destroying machines"
|
||||
mintnet destroy --machines $MACH_PREFIX[1-$VALSETSIZE]
|
||||
|
||||
|
@@ -1,44 +1,8 @@
|
||||
#! /bin/bash
|
||||
set -eu
|
||||
|
||||
# start a testnet and benchmark throughput using mintnet+netmon via the network_testing repo
|
||||
|
||||
DATACENTER=single
|
||||
VALSETSIZE=4
|
||||
BLOCKSIZE=8092
|
||||
TX_SIZE=200
|
||||
NTXS=$((BLOCKSIZE*4))
|
||||
RESULTSDIR=results
|
||||
CLOUD_PROVIDER=digitalocean
|
||||
|
||||
set +u
|
||||
if [[ "$MACH_PREFIX" == "" ]]; then
|
||||
MACH_PREFIX=mach
|
||||
fi
|
||||
set -u
|
||||
|
||||
export TMHEAD=`git rev-parse --abbrev-ref HEAD`
|
||||
export TM_IMAGE="tendermint/tmbase"
|
||||
|
||||
# not a go repo
|
||||
set +e
|
||||
go get github.com/tendermint/network_testing
|
||||
set -e
|
||||
|
||||
# install mintnet, netmon
|
||||
# TODO: specify branch
|
||||
|
||||
go get github.com/tendermint/mintnet
|
||||
go get github.com/tendermint/netmon
|
||||
|
||||
|
||||
cd $GOPATH/src/github.com/tendermint/network_testing
|
||||
bash experiments/exp_throughput.sh $DATACENTER $VALSETSIZE $BLOCKSIZE $TX_SIZE $NTXS $MACH_PREFIX $RESULTSDIR $CLOUD_PROVIDER
|
||||
|
||||
# TODO: publish result!
|
||||
|
||||
# cleanup
|
||||
|
||||
mintnet destroy --machines $MACH_PREFIX[1-$VALSETSIZE]
|
||||
|
||||
# install mintnet, netmon, fetch network_testing
|
||||
bash test/net/setup.sh
|
||||
|
||||
# start the testnet
|
||||
bash test/net/start.sh
|
||||
|
@@ -1,59 +1,28 @@
|
||||
#! /bin/bash
|
||||
set -u
|
||||
|
||||
N=$1
|
||||
|
||||
###################################################################
|
||||
# wait for all peers to come online
|
||||
# assumes peers are already synced up
|
||||
# test sending txs
|
||||
# for each peer:
|
||||
# wait to have 3 peers
|
||||
# wait to be at height > 1
|
||||
# send a tx, wait for commit
|
||||
# assert app hash on every peer reflects the post tx state
|
||||
###################################################################
|
||||
|
||||
N=4
|
||||
|
||||
# wait for everyone to come online
|
||||
echo "Waiting for nodes to come online"
|
||||
for i in `seq 1 $N`; do
|
||||
addr=$(test/p2p/ip.sh $i):46657
|
||||
curl -s $addr/status > /dev/null
|
||||
ERR=$?
|
||||
while [ "$ERR" != 0 ]; do
|
||||
sleep 1
|
||||
curl -s $addr/status > /dev/null
|
||||
ERR=$?
|
||||
done
|
||||
echo "... node $i is up"
|
||||
done
|
||||
|
||||
echo ""
|
||||
# run the test on each of them
|
||||
for i in `seq 1 $N`; do
|
||||
addr=$(test/p2p/ip.sh $i):46657
|
||||
|
||||
# - assert everyone has 3 other peers
|
||||
N_PEERS=`curl -s $addr/net_info | jq '.result[1].peers | length'`
|
||||
while [ "$N_PEERS" != 3 ]; do
|
||||
echo "Waiting for node $i to connect to all peers ..."
|
||||
sleep 1
|
||||
N_PEERS=`curl -s $addr/net_info | jq '.result[1].peers | length'`
|
||||
done
|
||||
|
||||
# - assert block height is greater than 1
|
||||
BLOCK_HEIGHT=`curl -s $addr/status | jq .result[1].latest_block_height`
|
||||
while [ "$BLOCK_HEIGHT" -le 1 ]; do
|
||||
echo "Waiting for node $i to commit a block ..."
|
||||
sleep 1
|
||||
BLOCK_HEIGHT=`curl -s $addr/status | jq .result[1].latest_block_height`
|
||||
done
|
||||
echo "Node $i is connected to all peers and at block $BLOCK_HEIGHT"
|
||||
|
||||
# current state
|
||||
HASH1=`curl -s $addr/status | jq .result[1].latest_app_hash`
|
||||
|
||||
# - send a tx
|
||||
TX=\"aadeadbeefbeefbeef0$i\"
|
||||
TX=aadeadbeefbeefbeef0$i
|
||||
echo "Broadcast Tx $TX"
|
||||
curl -s $addr/broadcast_tx_commit?tx=$TX
|
||||
curl -s $addr/broadcast_tx_commit?tx=0x$TX
|
||||
echo ""
|
||||
|
||||
# we need to wait another block to get the new app_hash
|
||||
|
53
test/p2p/basic/test.sh
Normal file
53
test/p2p/basic/test.sh
Normal file
@@ -0,0 +1,53 @@
|
||||
#! /bin/bash
|
||||
set -u
|
||||
|
||||
N=$1
|
||||
|
||||
###################################################################
|
||||
# wait for all peers to come online
|
||||
# for each peer:
|
||||
# wait to have N-1 peers
|
||||
# wait to be at height > 1
|
||||
###################################################################
|
||||
|
||||
# wait for everyone to come online
|
||||
echo "Waiting for nodes to come online"
|
||||
for i in `seq 1 $N`; do
|
||||
addr=$(test/p2p/ip.sh $i):46657
|
||||
curl -s $addr/status > /dev/null
|
||||
ERR=$?
|
||||
while [ "$ERR" != 0 ]; do
|
||||
sleep 1
|
||||
curl -s $addr/status > /dev/null
|
||||
ERR=$?
|
||||
done
|
||||
echo "... node $i is up"
|
||||
done
|
||||
|
||||
echo ""
|
||||
# wait for each of them to sync up
|
||||
for i in `seq 1 $N`; do
|
||||
addr=$(test/p2p/ip.sh $i):46657
|
||||
N_1=$(($N - 1))
|
||||
|
||||
# - assert everyone has N-1 other peers
|
||||
N_PEERS=`curl -s $addr/net_info | jq '.result[1].peers | length'`
|
||||
while [ "$N_PEERS" != $N_1 ]; do
|
||||
echo "Waiting for node $i to connect to all peers ..."
|
||||
sleep 1
|
||||
N_PEERS=`curl -s $addr/net_info | jq '.result[1].peers | length'`
|
||||
done
|
||||
|
||||
# - assert block height is greater than 1
|
||||
BLOCK_HEIGHT=`curl -s $addr/status | jq .result[1].latest_block_height`
|
||||
while [ "$BLOCK_HEIGHT" -le 1 ]; do
|
||||
echo "Waiting for node $i to commit a block ..."
|
||||
sleep 1
|
||||
BLOCK_HEIGHT=`curl -s $addr/status | jq .result[1].latest_block_height`
|
||||
done
|
||||
echo "Node $i is connected to all peers and at block $BLOCK_HEIGHT"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "PASS"
|
||||
echo ""
|
@@ -6,13 +6,14 @@ NETWORK_NAME=$2
|
||||
ID=$3
|
||||
CMD=$4
|
||||
|
||||
NAME=test_container_$ID
|
||||
|
||||
echo "starting test client container with CMD=$CMD"
|
||||
# run the test container on the local network
|
||||
docker run -t \
|
||||
docker run -t --rm \
|
||||
-v $GOPATH/src/github.com/tendermint/tendermint/test/p2p/:/go/src/github.com/tendermint/tendermint/test/p2p \
|
||||
--net=$NETWORK_NAME \
|
||||
--ip=$(test/p2p/ip.sh "-1") \
|
||||
--name test_container_$ID \
|
||||
--name $NAME \
|
||||
--entrypoint bash \
|
||||
$DOCKER_IMAGE $CMD
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#! /bin/bash
|
||||
# This is a sample bash script for a TMSP application
|
||||
# This is a sample bash script for a ABCI application
|
||||
|
||||
cd app/
|
||||
git clone https://github.com/tendermint/nomnomcoin.git
|
||||
|
@@ -8,7 +8,7 @@ BRANCH="master"
|
||||
|
||||
go get -d $TMREPO/cmd/tendermint
|
||||
### DEPENDENCIES (example)
|
||||
# cd $GOPATH/src/github.com/tendermint/tmsp
|
||||
# cd $GOPATH/src/github.com/tendermint/abci
|
||||
# git fetch origin $BRANCH
|
||||
# git checkout $BRANCH
|
||||
### DEPENDENCIES END
|
||||
|
43
test/p2p/fast_sync/check_peer.sh
Normal file
43
test/p2p/fast_sync/check_peer.sh
Normal file
@@ -0,0 +1,43 @@
|
||||
#! /bin/bash
|
||||
set -eu
|
||||
set -o pipefail
|
||||
|
||||
ID=$1
|
||||
|
||||
###########################################
|
||||
#
|
||||
# Wait for peer to catchup to other peers
|
||||
#
|
||||
###########################################
|
||||
|
||||
addr=$(test/p2p/ip.sh $ID):46657
|
||||
peerID=$(( $(($ID % 4)) + 1 )) # 1->2 ... 3->4 ... 4->1
|
||||
peer_addr=$(test/p2p/ip.sh $peerID):46657
|
||||
|
||||
# get another peer's height
|
||||
h1=`curl -s $peer_addr/status | jq .result[1].latest_block_height`
|
||||
|
||||
# get another peer's state
|
||||
root1=`curl -s $peer_addr/status | jq .result[1].latest_app_hash`
|
||||
|
||||
echo "Other peer is on height $h1 with state $root1"
|
||||
echo "Waiting for peer $ID to catch up"
|
||||
|
||||
# wait for it to sync to past its previous height
|
||||
set +e
|
||||
set +o pipefail
|
||||
h2="0"
|
||||
while [[ "$h2" -lt "$(($h1+3))" ]]; do
|
||||
sleep 1
|
||||
h2=`curl -s $addr/status | jq .result[1].latest_block_height`
|
||||
echo "... $h2"
|
||||
done
|
||||
|
||||
# check the app hash
|
||||
root2=`curl -s $addr/status | jq .result[1].latest_app_hash`
|
||||
|
||||
if [[ "$root1" != "$root2" ]]; then
|
||||
echo "App hash after fast sync does not match. Got $root2; expected $root1"
|
||||
exit 1
|
||||
fi
|
||||
echo "... fast sync successful"
|
@@ -1,44 +1,16 @@
|
||||
#! /bin/bash
|
||||
set -eu
|
||||
set -o pipefail
|
||||
|
||||
###############################################################
|
||||
# for each peer:
|
||||
# kill peer
|
||||
# bring it back online via fast sync
|
||||
# check app hash
|
||||
###############################################################
|
||||
DOCKER_IMAGE=$1
|
||||
NETWORK_NAME=$2
|
||||
N=$3
|
||||
PROXY_APP=$4
|
||||
|
||||
ID=$1
|
||||
cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
addr=$(test/p2p/ip.sh $ID):46657
|
||||
peerID=$(( $(($ID % 4)) + 1 )) # 1->2 ... 3->4 ... 4->1
|
||||
peer_addr=$(test/p2p/ip.sh $peerID):46657
|
||||
|
||||
# get another peer's height
|
||||
h1=`curl -s $peer_addr/status | jq .result[1].latest_block_height`
|
||||
|
||||
# get another peer's state
|
||||
root1=`curl -s $peer_addr/status | jq .result[1].latest_app_hash`
|
||||
|
||||
echo "Other peer is on height $h1 with state $root1"
|
||||
echo "Waiting for peer $ID to catch up"
|
||||
|
||||
# wait for it to sync to past its previous height
|
||||
set +e
|
||||
set +o pipefail
|
||||
h2="0"
|
||||
while [[ "$h2" -lt "$(($h1+3))" ]]; do
|
||||
sleep 1
|
||||
h2=`curl -s $addr/status | jq .result[1].latest_block_height`
|
||||
echo "... $h2"
|
||||
# run it on each of them
|
||||
for i in `seq 1 $N`; do
|
||||
bash test/p2p/fast_sync/test_peer.sh $DOCKER_IMAGE $NETWORK_NAME $i $N $PROXY_APP
|
||||
done
|
||||
|
||||
# check the app hash
|
||||
root2=`curl -s $addr/status | jq .result[1].latest_app_hash`
|
||||
|
||||
if [[ "$root1" != "$root2" ]]; then
|
||||
echo "App hash after fast sync does not match. Got $root2; expected $root1"
|
||||
exit 1
|
||||
fi
|
||||
echo "... fast sync successful"
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user