mirror of
https://github.com/fluencelabs/tendermint
synced 2025-04-25 14:52:17 +00:00
Merge branch 'develop' into jae/aminoify
This commit is contained in:
commit
c170800fbd
@ -130,19 +130,6 @@ jobs:
|
||||
paths:
|
||||
- "profiles/*"
|
||||
|
||||
test_libs:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v1-pkg-cache
|
||||
- restore_cache:
|
||||
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- run:
|
||||
name: Run tests
|
||||
command: bash test/test_libs.sh
|
||||
|
||||
test_persistence:
|
||||
<<: *defaults
|
||||
steps:
|
||||
@ -205,14 +192,6 @@ workflows:
|
||||
- test_cover:
|
||||
requires:
|
||||
- setup_dependencies
|
||||
- test_libs:
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- develop
|
||||
- master
|
||||
requires:
|
||||
- setup_dependencies
|
||||
- test_persistence:
|
||||
requires:
|
||||
- setup_abci
|
||||
|
4
.gitignore
vendored
4
.gitignore
vendored
@ -17,7 +17,11 @@ test/logs
|
||||
coverage.txt
|
||||
docs/_build
|
||||
docs/tools
|
||||
docs/abci-spec.rst
|
||||
*.log
|
||||
|
||||
scripts/wal2json/wal2json
|
||||
scripts/cutWALUntil/cutWALUntil
|
||||
|
||||
.idea/
|
||||
*.iml
|
||||
|
64
CHANGELOG.md
64
CHANGELOG.md
@ -7,7 +7,6 @@ BREAKING CHANGES:
|
||||
- Upgrade consensus for more real-time use of evidence
|
||||
|
||||
FEATURES:
|
||||
- Peer reputation management
|
||||
- Use the chain as its own CA for nodes and validators
|
||||
- Tooling to run multiple blockchains/apps, possibly in a single process
|
||||
- State syncing (without transaction replay)
|
||||
@ -25,19 +24,66 @@ BUG FIXES:
|
||||
- Graceful handling/recovery for apps that have non-determinism or fail to halt
|
||||
- Graceful handling/recovery for violations of safety, or liveness
|
||||
|
||||
## 0.17.0 (TBD)
|
||||
## 0.18.0 (April 6th, 2018)
|
||||
|
||||
BREAKING:
|
||||
- [genesis] rename `app_options` to `app_state`
|
||||
|
||||
- [types] Merkle tree uses different encoding for varints (see tmlibs v0.8.0)
|
||||
- [types] ValidtorSet.GetByAddress returns -1 if no validator found
|
||||
- [p2p] require all addresses come with an ID no matter what
|
||||
- [rpc] Listening address must contain tcp:// or unix:// prefix
|
||||
|
||||
FEATURES:
|
||||
|
||||
- [rpc] StartHTTPAndTLSServer (not used yet)
|
||||
- [rpc] Include validator's voting power in `/status`
|
||||
- [rpc] `/tx` and `/tx_search` responses now include the transaction hash
|
||||
- [rpc] Include peer NodeIDs in `/net_info`
|
||||
|
||||
IMPROVEMENTS:
|
||||
- [config] exposed `auth_enc` flag to enable/disable encryption
|
||||
- [p2p] when `auth_enc` is true, all dialed peers must have a node ID in their address
|
||||
- [all] renamed `dummy` (`persistent_dummy`) to `kvstore`
|
||||
(`persistent_kvstore`) (name "dummy" is deprecated and will not work in
|
||||
release after this one)
|
||||
- [config] trim whitespace from elements of lists (like `persistent_peers`)
|
||||
- [rpc] `/tx_search` results are sorted by height
|
||||
- [p2p] do not try to connect to ourselves (ok, maybe only once)
|
||||
- [p2p] seeds respond with a bias towards good peers
|
||||
|
||||
## 0.16.0 (February 20th, 2017)
|
||||
BUG FIXES:
|
||||
- [rpc] fix subscribing using an abci.ResponseDeliverTx tag
|
||||
- [rpc] fix tx_indexers matchRange
|
||||
- [rpc] fix unsubscribing (see tmlibs v0.8.0)
|
||||
|
||||
## 0.17.1 (March 27th, 2018)
|
||||
|
||||
BUG FIXES:
|
||||
- [types] Actually support `app_state` in genesis as `AppStateJSON`
|
||||
|
||||
## 0.17.0 (March 27th, 2018)
|
||||
|
||||
BREAKING:
|
||||
- [types] WriteSignBytes -> SignBytes
|
||||
|
||||
IMPROVEMENTS:
|
||||
- [all] renamed `dummy` (`persistent_dummy`) to `kvstore` (`persistent_kvstore`) (name "dummy" is deprecated and will not work in the next breaking release)
|
||||
- [docs] note on determinism (docs/determinism.rst)
|
||||
- [genesis] `app_options` field is deprecated. please rename it to `app_state` in your genesis file(s). `app_options` will not work in the next breaking release
|
||||
- [p2p] dial seeds directly without potential peers
|
||||
- [p2p] exponential backoff for addrs in the address book
|
||||
- [p2p] mark peer as good if it contributed enough votes or block parts
|
||||
- [p2p] stop peer if it sends incorrect data, msg to unknown channel, msg we did not expect
|
||||
- [p2p] when `auth_enc` is true, all dialed peers must have a node ID in their address
|
||||
- [spec] various improvements
|
||||
- switched from glide to dep internally for package management
|
||||
- [wire] prep work for upgrading to new go-wire (which is now called go-amino)
|
||||
|
||||
FEATURES:
|
||||
- [config] exposed `auth_enc` flag to enable/disable encryption
|
||||
- [config] added the `--p2p.private_peer_ids` flag and `PrivatePeerIDs` config variable (see config for description)
|
||||
- [rpc] added `/health` endpoint, which returns empty result for now
|
||||
- [types/priv_validator] new format and socket client, allowing for remote signing
|
||||
|
||||
BUG FIXES:
|
||||
- [consensus] fix liveness bug by introducing ValidBlock mechanism
|
||||
|
||||
## 0.16.0 (February 20th, 2018)
|
||||
|
||||
BREAKING CHANGES:
|
||||
- [config] use $TMHOME/config for all config and json files
|
||||
|
@ -1,8 +1,8 @@
|
||||
FROM alpine:3.6
|
||||
FROM alpine:3.7
|
||||
|
||||
# This is the release of tendermint to pull in.
|
||||
ENV TM_VERSION 0.15.0
|
||||
ENV TM_SHA256SUM 71cc271c67eca506ca492c8b90b090132f104bf5dbfe0af2702a50886e88de17
|
||||
ENV TM_VERSION 0.17.1
|
||||
ENV TM_SHA256SUM d57008c63d2d9176861137e38ed203da486febf20ae7d388fb810a75afff8f24
|
||||
|
||||
# Tendermint will be looking for genesis file in /tendermint (unless you change
|
||||
# `genesis_file` in config.toml). You can put your config.toml and private
|
||||
@ -26,7 +26,7 @@ RUN mkdir -p $DATA_ROOT && \
|
||||
RUN apk add --no-cache bash curl jq
|
||||
|
||||
RUN apk add --no-cache openssl && \
|
||||
wget https://s3-us-west-2.amazonaws.com/tendermint/binaries/tendermint/v${TM_VERSION}/tendermint_${TM_VERSION}_linux_amd64.zip && \
|
||||
wget https://github.com/tendermint/tendermint/releases/download/v${TM_VERSION}/tendermint_${TM_VERSION}_linux_amd64.zip && \
|
||||
echo "${TM_SHA256SUM} tendermint_${TM_VERSION}_linux_amd64.zip" | sha256sum -c && \
|
||||
unzip -d /bin tendermint_${TM_VERSION}_linux_amd64.zip && \
|
||||
apk del openssl && \
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM alpine:3.6
|
||||
FROM alpine:3.7
|
||||
|
||||
ENV DATA_ROOT /tendermint
|
||||
ENV TMHOME $DATA_ROOT
|
||||
|
@ -1,6 +1,7 @@
|
||||
# Supported tags and respective `Dockerfile` links
|
||||
|
||||
- `0.15.0`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/170777300ea92dc21a8aec1abc16cb51812513a4/DOCKER/Dockerfile)
|
||||
- `0.17.1`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/208ac32fa266657bd6c304e84ec828aa252bb0b8/DOCKER/Dockerfile)
|
||||
- `0.15.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/170777300ea92dc21a8aec1abc16cb51812513a4/DOCKER/Dockerfile)
|
||||
- `0.13.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/a28b3fff49dce2fb31f90abb2fc693834e0029c2/DOCKER/Dockerfile)
|
||||
- `0.12.1` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/457c688346b565e90735431619ca3ca597ef9007/DOCKER/Dockerfile)
|
||||
- `0.12.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/70d8afa6e952e24c573ece345560a5971bf2cc0e/DOCKER/Dockerfile)
|
||||
|
46
Gopkg.lock
generated
46
Gopkg.lock
generated
@ -105,7 +105,11 @@
|
||||
"json/scanner",
|
||||
"json/token"
|
||||
]
|
||||
<<<<<<< HEAD
|
||||
revision = "f40e974e75af4e271d97ce0fc917af5898ae7bda"
|
||||
=======
|
||||
revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168"
|
||||
>>>>>>> develop
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
@ -167,8 +171,8 @@
|
||||
".",
|
||||
"mem"
|
||||
]
|
||||
revision = "bb8f1927f2a9d3ab41c9340aa034f6b803f4359c"
|
||||
version = "v1.0.2"
|
||||
revision = "63644898a8da0bc22138abf860edaf5277b6102e"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/cast"
|
||||
@ -179,8 +183,8 @@
|
||||
[[projects]]
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = ["."]
|
||||
revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b"
|
||||
version = "v0.0.1"
|
||||
revision = "a1f051bc3eba734da4772d60e2d677f47cf93ef4"
|
||||
version = "v0.0.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@ -226,7 +230,11 @@
|
||||
"leveldb/table",
|
||||
"leveldb/util"
|
||||
]
|
||||
<<<<<<< HEAD
|
||||
revision = "169b1b37be738edb2813dab48c97a549bcf99bb5"
|
||||
=======
|
||||
revision = "714f901b98fdb3aa954b4193d8cbd64a28d80cad"
|
||||
>>>>>>> develop
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/tendermint/abci"
|
||||
@ -238,8 +246,13 @@
|
||||
"server",
|
||||
"types"
|
||||
]
|
||||
<<<<<<< HEAD
|
||||
revision = "c62aed95f2ce399ec815b0cafe478af002cdc4e6"
|
||||
version = "v0.10.3-dev"
|
||||
=======
|
||||
revision = "46686763ba8ea595ede16530ed4a40fb38f49f94"
|
||||
version = "v0.10.2"
|
||||
>>>>>>> develop
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@ -301,7 +314,11 @@
|
||||
"ripemd160",
|
||||
"salsa20/salsa"
|
||||
]
|
||||
<<<<<<< HEAD
|
||||
revision = "88942b9c40a4c9d203b82b3731787b672d6e809b"
|
||||
=======
|
||||
revision = "b2aa35443fbc700ab74c586ae79b81c171851023"
|
||||
>>>>>>> develop
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@ -315,13 +332,21 @@
|
||||
"lex/httplex",
|
||||
"trace"
|
||||
]
|
||||
<<<<<<< HEAD
|
||||
revision = "6078986fec03a1dcc236c34816c71b0e05018fda"
|
||||
=======
|
||||
revision = "b3c676e531a6dc479fa1b35ac961c13f5e2b4d2e"
|
||||
>>>>>>> develop
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix"]
|
||||
<<<<<<< HEAD
|
||||
revision = "91ee8cde435411ca3f1cd365e8f20131aed4d0a1"
|
||||
=======
|
||||
revision = "1d206c9fa8975fb4cf00df1dc8bf3283dc24ba0e"
|
||||
>>>>>>> develop
|
||||
|
||||
[[projects]]
|
||||
name = "golang.org/x/text"
|
||||
@ -348,7 +373,11 @@
|
||||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
<<<<<<< HEAD
|
||||
revision = "ab0870e398d5dd054b868c0db1481ab029b9a9f2"
|
||||
=======
|
||||
revision = "35de2414665fc36f56b72d982c5af480d86de5ab"
|
||||
>>>>>>> develop
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/grpc"
|
||||
@ -377,12 +406,21 @@
|
||||
[[projects]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
<<<<<<< HEAD
|
||||
revision = "7f97868eec74b32b0982dd158a51a446d1da7eb5"
|
||||
version = "v2.1.1"
|
||||
=======
|
||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||
version = "v2.2.1"
|
||||
>>>>>>> develop
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
<<<<<<< HEAD
|
||||
inputs-digest = "ed1149ed5293b7b28b7505627648c6a1152aaff0ed06a3849995b29751ae00f3"
|
||||
=======
|
||||
inputs-digest = "b7c02a311569ec5fe2197614444fb231ea60f3e65a11a20e318421f1752054d7"
|
||||
>>>>>>> develop
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
37
Gopkg.toml
37
Gopkg.toml
@ -35,23 +35,23 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/go-kit/kit"
|
||||
version = "0.6.0"
|
||||
version = "~0.6.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gogo/protobuf"
|
||||
version = "1.0.0"
|
||||
version = "~1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/golang/protobuf"
|
||||
version = "1.0.0"
|
||||
version = "~1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gorilla/websocket"
|
||||
version = "1.2.0"
|
||||
version = "~1.2.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/pkg/errors"
|
||||
version = "0.8.0"
|
||||
version = "~0.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/rcrowley/go-metrics"
|
||||
@ -59,18 +59,19 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/spf13/cobra"
|
||||
version = "0.0.1"
|
||||
version = "~0.0.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/spf13/viper"
|
||||
version = "1.0.0"
|
||||
version = "~1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/stretchr/testify"
|
||||
version = "1.2.1"
|
||||
version = "~1.2.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tendermint/abci"
|
||||
<<<<<<< HEAD
|
||||
version = "0.10.3-dev"
|
||||
|
||||
[[constraint]]
|
||||
@ -80,14 +81,32 @@
|
||||
[[constraint]]
|
||||
name = "github.com/tendermint/go-amino"
|
||||
version = "0.9.6"
|
||||
=======
|
||||
version = "~0.10.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tendermint/go-crypto"
|
||||
version = "~0.5.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tendermint/go-wire"
|
||||
source = "github.com/tendermint/go-amino"
|
||||
version = "~0.7.3"
|
||||
>>>>>>> develop
|
||||
|
||||
[[override]]
|
||||
# [[constraint]]
|
||||
name = "github.com/tendermint/tmlibs"
|
||||
<<<<<<< HEAD
|
||||
version = "0.8.1"
|
||||
=======
|
||||
version = "~0.8.1"
|
||||
# branch = "develop"
|
||||
>>>>>>> develop
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/grpc"
|
||||
version = "1.7.3"
|
||||
version = "~1.7.3"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
|
13
Makefile
13
Makefile
@ -14,13 +14,13 @@ check: check_tools ensure_deps
|
||||
### Build
|
||||
|
||||
build:
|
||||
go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/
|
||||
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/
|
||||
|
||||
build_race:
|
||||
go build -race $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint
|
||||
CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint
|
||||
|
||||
install:
|
||||
go install $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' ./cmd/tendermint
|
||||
CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' ./cmd/tendermint
|
||||
|
||||
########################################
|
||||
### Distribution
|
||||
@ -119,11 +119,6 @@ test_integrations:
|
||||
make test_persistence
|
||||
make test_p2p
|
||||
|
||||
test_libs:
|
||||
# checkout every github.com/tendermint dir and run its tests
|
||||
# NOTE: on release-* or master branches only (set by Jenkins)
|
||||
docker run --name run_libs -t tester bash test/test_libs.sh
|
||||
|
||||
test_release:
|
||||
@go test -tags release $(PACKAGES)
|
||||
|
||||
@ -186,4 +181,4 @@ metalinter_all:
|
||||
# To avoid unintended conflicts with file names, always add to .PHONY
|
||||
# unless there is a reason not to.
|
||||
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
|
||||
.PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_libs test_integrations test_release test100 vagrant_test fmt
|
||||
.PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt
|
||||
|
@ -75,9 +75,9 @@ func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *Bl
|
||||
store.Height()))
|
||||
}
|
||||
|
||||
const cap = 1000 // must be bigger than peers count
|
||||
requestsCh := make(chan BlockRequest, cap)
|
||||
errorsCh := make(chan peerError, cap) // so we don't block in #Receive#pool.AddBlock
|
||||
const capacity = 1000 // must be bigger than peers count
|
||||
requestsCh := make(chan BlockRequest, capacity)
|
||||
errorsCh := make(chan peerError, capacity) // so we don't block in #Receive#pool.AddBlock
|
||||
|
||||
pool := NewBlockPool(
|
||||
store.Height()+1,
|
||||
|
@ -34,14 +34,14 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
LiteCmd.Flags().StringVar(&listenAddr, "laddr", ":8888", "Serve the proxy on the given port")
|
||||
LiteCmd.Flags().StringVar(&listenAddr, "laddr", "tcp://localhost:8888", "Serve the proxy on the given address")
|
||||
LiteCmd.Flags().StringVar(&nodeAddr, "node", "tcp://localhost:46657", "Connect to a Tendermint node at this address")
|
||||
LiteCmd.Flags().StringVar(&chainID, "chain-id", "tendermint", "Specify the Tendermint chain ID")
|
||||
LiteCmd.Flags().StringVar(&home, "home-dir", ".tendermint-lite", "Specify the home directory")
|
||||
}
|
||||
|
||||
func ensureAddrHasSchemeOrDefaultToTCP(addr string) (string, error) {
|
||||
u, err := url.Parse(nodeAddr)
|
||||
u, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -31,18 +31,19 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
|
||||
// p2p flags
|
||||
cmd.Flags().String("p2p.laddr", config.P2P.ListenAddress, "Node listen address. (0.0.0.0:0 means any interface, any port)")
|
||||
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "Comma delimited host:port seed nodes")
|
||||
cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "Comma delimited host:port persistent peers")
|
||||
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "Comma-delimited ID@host:port seed nodes")
|
||||
cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "Comma-delimited ID@host:port persistent peers")
|
||||
cmd.Flags().Bool("p2p.skip_upnp", config.P2P.SkipUPNP, "Skip UPNP configuration")
|
||||
cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "Enable/disable Peer-Exchange")
|
||||
cmd.Flags().Bool("p2p.seed_mode", config.P2P.SeedMode, "Enable/disable seed mode")
|
||||
cmd.Flags().String("p2p.private_peer_ids", config.P2P.PrivatePeerIDs, "Comma-delimited private peer IDs")
|
||||
|
||||
// consensus flags
|
||||
cmd.Flags().Bool("consensus.create_empty_blocks", config.Consensus.CreateEmptyBlocks, "Set this to false to only produce blocks when there are txs or when the AppHash changes")
|
||||
}
|
||||
|
||||
// NewRunNodeCmd returns the command that allows the CLI to start a
|
||||
// node. It can be used with a custom PrivValidator and in-process ABCI application.
|
||||
// NewRunNodeCmd returns the command that allows the CLI to start a node.
|
||||
// It can be used with a custom PrivValidator and in-process ABCI application.
|
||||
func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "node",
|
||||
@ -56,9 +57,8 @@ func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command {
|
||||
|
||||
if err := n.Start(); err != nil {
|
||||
return fmt.Errorf("Failed to start node: %v", err)
|
||||
} else {
|
||||
logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo())
|
||||
}
|
||||
logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo())
|
||||
|
||||
// Trap signal, run forever.
|
||||
n.RunForever()
|
||||
|
@ -16,3 +16,8 @@ comment:
|
||||
require_changes: no
|
||||
require_base: no
|
||||
require_head: yes
|
||||
|
||||
ignore:
|
||||
- "docs"
|
||||
- "DOCKER"
|
||||
- "scripts"
|
||||
|
165
config/config.go
165
config/config.go
@ -137,10 +137,6 @@ type BaseConfig struct {
|
||||
DBPath string `mapstructure:"db_dir"`
|
||||
}
|
||||
|
||||
func (c BaseConfig) ChainID() string {
|
||||
return c.chainID
|
||||
}
|
||||
|
||||
// DefaultBaseConfig returns a default base configuration for a Tendermint node
|
||||
func DefaultBaseConfig() BaseConfig {
|
||||
return BaseConfig{
|
||||
@ -161,32 +157,36 @@ func DefaultBaseConfig() BaseConfig {
|
||||
|
||||
// TestBaseConfig returns a base configuration for testing a Tendermint node
|
||||
func TestBaseConfig() BaseConfig {
|
||||
conf := DefaultBaseConfig()
|
||||
conf.chainID = "tendermint_test"
|
||||
conf.ProxyApp = "kvstore"
|
||||
conf.FastSync = false
|
||||
conf.DBBackend = "memdb"
|
||||
return conf
|
||||
cfg := DefaultBaseConfig()
|
||||
cfg.chainID = "tendermint_test"
|
||||
cfg.ProxyApp = "kvstore"
|
||||
cfg.FastSync = false
|
||||
cfg.DBBackend = "memdb"
|
||||
return cfg
|
||||
}
|
||||
|
||||
func (cfg BaseConfig) ChainID() string {
|
||||
return cfg.chainID
|
||||
}
|
||||
|
||||
// GenesisFile returns the full path to the genesis.json file
|
||||
func (b BaseConfig) GenesisFile() string {
|
||||
return rootify(b.Genesis, b.RootDir)
|
||||
func (cfg BaseConfig) GenesisFile() string {
|
||||
return rootify(cfg.Genesis, cfg.RootDir)
|
||||
}
|
||||
|
||||
// PrivValidatorFile returns the full path to the priv_validator.json file
|
||||
func (b BaseConfig) PrivValidatorFile() string {
|
||||
return rootify(b.PrivValidator, b.RootDir)
|
||||
func (cfg BaseConfig) PrivValidatorFile() string {
|
||||
return rootify(cfg.PrivValidator, cfg.RootDir)
|
||||
}
|
||||
|
||||
// NodeKeyFile returns the full path to the node_key.json file
|
||||
func (b BaseConfig) NodeKeyFile() string {
|
||||
return rootify(b.NodeKey, b.RootDir)
|
||||
func (cfg BaseConfig) NodeKeyFile() string {
|
||||
return rootify(cfg.NodeKey, cfg.RootDir)
|
||||
}
|
||||
|
||||
// DBDir returns the full path to the database directory
|
||||
func (b BaseConfig) DBDir() string {
|
||||
return rootify(b.DBPath, b.RootDir)
|
||||
func (cfg BaseConfig) DBDir() string {
|
||||
return rootify(cfg.DBPath, cfg.RootDir)
|
||||
}
|
||||
|
||||
// DefaultLogLevel returns a default log level of "error"
|
||||
@ -229,11 +229,11 @@ func DefaultRPCConfig() *RPCConfig {
|
||||
|
||||
// TestRPCConfig returns a configuration for testing the RPC server
|
||||
func TestRPCConfig() *RPCConfig {
|
||||
conf := DefaultRPCConfig()
|
||||
conf.ListenAddress = "tcp://0.0.0.0:36657"
|
||||
conf.GRPCListenAddress = "tcp://0.0.0.0:36658"
|
||||
conf.Unsafe = true
|
||||
return conf
|
||||
cfg := DefaultRPCConfig()
|
||||
cfg.ListenAddress = "tcp://0.0.0.0:36657"
|
||||
cfg.GRPCListenAddress = "tcp://0.0.0.0:36658"
|
||||
cfg.Unsafe = true
|
||||
return cfg
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
@ -250,8 +250,8 @@ type P2PConfig struct {
|
||||
// We only use these if we can’t connect to peers in the addrbook
|
||||
Seeds string `mapstructure:"seeds"`
|
||||
|
||||
// Comma separated list of persistent peers to connect to
|
||||
// We always connect to these
|
||||
// Comma separated list of nodes to keep persistent connections to
|
||||
// Do not add private peers to this list if you don't want them advertised
|
||||
PersistentPeers string `mapstructure:"persistent_peers"`
|
||||
|
||||
// Skip UPNP port forwarding
|
||||
@ -289,6 +289,9 @@ type P2PConfig struct {
|
||||
|
||||
// Authenticated encryption
|
||||
AuthEnc bool `mapstructure:"auth_enc"`
|
||||
|
||||
// Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||
PrivatePeerIDs string `mapstructure:"private_peer_ids"`
|
||||
}
|
||||
|
||||
// DefaultP2PConfig returns a default configuration for the peer-to-peer layer
|
||||
@ -310,16 +313,16 @@ func DefaultP2PConfig() *P2PConfig {
|
||||
|
||||
// TestP2PConfig returns a configuration for testing the peer-to-peer layer
|
||||
func TestP2PConfig() *P2PConfig {
|
||||
conf := DefaultP2PConfig()
|
||||
conf.ListenAddress = "tcp://0.0.0.0:36656"
|
||||
conf.SkipUPNP = true
|
||||
conf.FlushThrottleTimeout = 10
|
||||
return conf
|
||||
cfg := DefaultP2PConfig()
|
||||
cfg.ListenAddress = "tcp://0.0.0.0:36656"
|
||||
cfg.SkipUPNP = true
|
||||
cfg.FlushThrottleTimeout = 10
|
||||
return cfg
|
||||
}
|
||||
|
||||
// AddrBookFile returns the full path to the address book
|
||||
func (p *P2PConfig) AddrBookFile() string {
|
||||
return rootify(p.AddrBook, p.RootDir)
|
||||
func (cfg *P2PConfig) AddrBookFile() string {
|
||||
return rootify(cfg.AddrBook, cfg.RootDir)
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
@ -348,14 +351,14 @@ func DefaultMempoolConfig() *MempoolConfig {
|
||||
|
||||
// TestMempoolConfig returns a configuration for testing the Tendermint mempool
|
||||
func TestMempoolConfig() *MempoolConfig {
|
||||
config := DefaultMempoolConfig()
|
||||
config.CacheSize = 1000
|
||||
return config
|
||||
cfg := DefaultMempoolConfig()
|
||||
cfg.CacheSize = 1000
|
||||
return cfg
|
||||
}
|
||||
|
||||
// WalDir returns the full path to the mempool's write-ahead log
|
||||
func (m *MempoolConfig) WalDir() string {
|
||||
return rootify(m.WalPath, m.RootDir)
|
||||
func (cfg *MempoolConfig) WalDir() string {
|
||||
return rootify(cfg.WalPath, cfg.RootDir)
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
@ -394,6 +397,44 @@ type ConsensusConfig struct {
|
||||
PeerQueryMaj23SleepDuration int `mapstructure:"peer_query_maj23_sleep_duration"`
|
||||
}
|
||||
|
||||
// DefaultConsensusConfig returns a default configuration for the consensus service
|
||||
func DefaultConsensusConfig() *ConsensusConfig {
|
||||
return &ConsensusConfig{
|
||||
WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"),
|
||||
WalLight: false,
|
||||
TimeoutPropose: 3000,
|
||||
TimeoutProposeDelta: 500,
|
||||
TimeoutPrevote: 1000,
|
||||
TimeoutPrevoteDelta: 500,
|
||||
TimeoutPrecommit: 1000,
|
||||
TimeoutPrecommitDelta: 500,
|
||||
TimeoutCommit: 1000,
|
||||
SkipTimeoutCommit: false,
|
||||
MaxBlockSizeTxs: 10000,
|
||||
MaxBlockSizeBytes: 1, // TODO
|
||||
CreateEmptyBlocks: true,
|
||||
CreateEmptyBlocksInterval: 0,
|
||||
PeerGossipSleepDuration: 100,
|
||||
PeerQueryMaj23SleepDuration: 2000,
|
||||
}
|
||||
}
|
||||
|
||||
// TestConsensusConfig returns a configuration for testing the consensus service
|
||||
func TestConsensusConfig() *ConsensusConfig {
|
||||
cfg := DefaultConsensusConfig()
|
||||
cfg.TimeoutPropose = 100
|
||||
cfg.TimeoutProposeDelta = 1
|
||||
cfg.TimeoutPrevote = 10
|
||||
cfg.TimeoutPrevoteDelta = 1
|
||||
cfg.TimeoutPrecommit = 10
|
||||
cfg.TimeoutPrecommitDelta = 1
|
||||
cfg.TimeoutCommit = 10
|
||||
cfg.SkipTimeoutCommit = true
|
||||
cfg.PeerGossipSleepDuration = 5
|
||||
cfg.PeerQueryMaj23SleepDuration = 250
|
||||
return cfg
|
||||
}
|
||||
|
||||
// WaitForTxs returns true if the consensus should wait for transactions before entering the propose step
|
||||
func (cfg *ConsensusConfig) WaitForTxs() bool {
|
||||
return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0
|
||||
@ -434,55 +475,17 @@ func (cfg *ConsensusConfig) PeerQueryMaj23Sleep() time.Duration {
|
||||
return time.Duration(cfg.PeerQueryMaj23SleepDuration) * time.Millisecond
|
||||
}
|
||||
|
||||
// DefaultConsensusConfig returns a default configuration for the consensus service
|
||||
func DefaultConsensusConfig() *ConsensusConfig {
|
||||
return &ConsensusConfig{
|
||||
WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"),
|
||||
WalLight: false,
|
||||
TimeoutPropose: 3000,
|
||||
TimeoutProposeDelta: 500,
|
||||
TimeoutPrevote: 1000,
|
||||
TimeoutPrevoteDelta: 500,
|
||||
TimeoutPrecommit: 1000,
|
||||
TimeoutPrecommitDelta: 500,
|
||||
TimeoutCommit: 1000,
|
||||
SkipTimeoutCommit: false,
|
||||
MaxBlockSizeTxs: 10000,
|
||||
MaxBlockSizeBytes: 1, // TODO
|
||||
CreateEmptyBlocks: true,
|
||||
CreateEmptyBlocksInterval: 0,
|
||||
PeerGossipSleepDuration: 100,
|
||||
PeerQueryMaj23SleepDuration: 2000,
|
||||
}
|
||||
}
|
||||
|
||||
// TestConsensusConfig returns a configuration for testing the consensus service
|
||||
func TestConsensusConfig() *ConsensusConfig {
|
||||
config := DefaultConsensusConfig()
|
||||
config.TimeoutPropose = 100
|
||||
config.TimeoutProposeDelta = 1
|
||||
config.TimeoutPrevote = 10
|
||||
config.TimeoutPrevoteDelta = 1
|
||||
config.TimeoutPrecommit = 10
|
||||
config.TimeoutPrecommitDelta = 1
|
||||
config.TimeoutCommit = 10
|
||||
config.SkipTimeoutCommit = true
|
||||
config.PeerGossipSleepDuration = 5
|
||||
config.PeerQueryMaj23SleepDuration = 250
|
||||
return config
|
||||
}
|
||||
|
||||
// WalFile returns the full path to the write-ahead log file
|
||||
func (c *ConsensusConfig) WalFile() string {
|
||||
if c.walFile != "" {
|
||||
return c.walFile
|
||||
func (cfg *ConsensusConfig) WalFile() string {
|
||||
if cfg.walFile != "" {
|
||||
return cfg.walFile
|
||||
}
|
||||
return rootify(c.WalPath, c.RootDir)
|
||||
return rootify(cfg.WalPath, cfg.RootDir)
|
||||
}
|
||||
|
||||
// SetWalFile sets the path to the write-ahead log file
|
||||
func (c *ConsensusConfig) SetWalFile(walFile string) {
|
||||
c.walFile = walFile
|
||||
func (cfg *ConsensusConfig) SetWalFile(walFile string) {
|
||||
cfg.walFile = walFile
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
@ -127,6 +127,7 @@ laddr = "{{ .P2P.ListenAddress }}"
|
||||
seeds = ""
|
||||
|
||||
# Comma separated list of nodes to keep persistent connections to
|
||||
# Do not add private peers to this list if you don't want them advertised
|
||||
persistent_peers = ""
|
||||
|
||||
# Path to address book
|
||||
@ -162,6 +163,9 @@ seed_mode = {{ .P2P.SeedMode }}
|
||||
# Authenticated encryption
|
||||
auth_enc = {{ .P2P.AuthEnc }}
|
||||
|
||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||
private_peer_ids = "{{ .P2P.PrivatePeerIDs }}"
|
||||
|
||||
##### mempool configuration options #####
|
||||
[mempool]
|
||||
|
||||
|
@ -102,13 +102,13 @@ func signVotes(voteType byte, hash []byte, header types.PartSetHeader, vss ...*v
|
||||
|
||||
func incrementHeight(vss ...*validatorStub) {
|
||||
for _, vs := range vss {
|
||||
vs.Height += 1
|
||||
vs.Height++
|
||||
}
|
||||
}
|
||||
|
||||
func incrementRound(vss ...*validatorStub) {
|
||||
for _, vs := range vss {
|
||||
vs.Round += 1
|
||||
vs.Round++
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -152,6 +152,7 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
txs := cs.mempool.Reap(1)
|
||||
if len(txs) == 0 {
|
||||
emptyMempoolCh <- struct{}{}
|
||||
return
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
@ -199,7 +200,7 @@ func (app *CounterApplication) DeliverTx(tx []byte) abci.ResponseDeliverTx {
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
|
||||
}
|
||||
app.txCount += 1
|
||||
app.txCount++
|
||||
return abci.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
@ -210,7 +211,7 @@ func (app *CounterApplication) CheckTx(tx []byte) abci.ResponseCheckTx {
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.mempoolTxCount, txValue)}
|
||||
}
|
||||
app.mempoolTxCount += 1
|
||||
app.mempoolTxCount++
|
||||
return abci.ResponseCheckTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
@ -224,9 +225,8 @@ func (app *CounterApplication) Commit() abci.ResponseCommit {
|
||||
app.mempoolTxCount = app.txCount
|
||||
if app.txCount == 0 {
|
||||
return abci.ResponseCommit{}
|
||||
} else {
|
||||
hash := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
|
||||
return abci.ResponseCommit{Data: hash}
|
||||
}
|
||||
hash := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
|
||||
return abci.ResponseCommit{Data: hash}
|
||||
}
|
||||
|
@ -26,6 +26,8 @@ const (
|
||||
VoteSetBitsChannel = byte(0x23)
|
||||
|
||||
maxConsensusMessageSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes.
|
||||
|
||||
blocksToContributeToBecomeGoodPeer = 10000
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
@ -254,7 +256,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
ps.ApplyProposalPOLMessage(msg)
|
||||
case *BlockPartMessage:
|
||||
ps.SetHasProposalBlockPart(msg.Height, msg.Round, msg.Part.Index)
|
||||
if numBlocks := ps.RecordBlockPart(msg); numBlocks > 10000 {
|
||||
if numBlocks := ps.RecordBlockPart(msg); numBlocks%blocksToContributeToBecomeGoodPeer == 0 {
|
||||
conR.Switch.MarkPeerAsGood(src)
|
||||
}
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()}
|
||||
@ -276,7 +278,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
ps.EnsureVoteBitArrays(height, valSize)
|
||||
ps.EnsureVoteBitArrays(height-1, lastCommitSize)
|
||||
ps.SetHasVote(msg.Vote)
|
||||
if blocks := ps.RecordVote(msg.Vote); blocks > 10000 {
|
||||
if blocks := ps.RecordVote(msg.Vote); blocks%blocksToContributeToBecomeGoodPeer == 0 {
|
||||
conR.Switch.MarkPeerAsGood(src)
|
||||
}
|
||||
|
||||
@ -372,19 +374,21 @@ func (conR *ConsensusReactor) startBroadcastRoutine() error {
|
||||
}
|
||||
|
||||
go func() {
|
||||
var data interface{}
|
||||
var ok bool
|
||||
for {
|
||||
select {
|
||||
case data, ok := <-stepsCh:
|
||||
case data, ok = <-stepsCh:
|
||||
if ok { // a receive from a closed channel returns the zero value immediately
|
||||
edrs := data.(types.EventDataRoundState)
|
||||
conR.broadcastNewRoundStep(edrs.RoundState.(*cstypes.RoundState))
|
||||
}
|
||||
case data, ok := <-votesCh:
|
||||
case data, ok = <-votesCh:
|
||||
if ok {
|
||||
edv := data.(types.EventDataVote)
|
||||
conR.broadcastHasVoteMessage(edv.Vote)
|
||||
}
|
||||
case data, ok := <-heartbeatsCh:
|
||||
case data, ok = <-heartbeatsCh:
|
||||
if ok {
|
||||
edph := data.(types.EventDataProposalHeartbeat)
|
||||
conR.broadcastProposalHeartbeatMessage(edph)
|
||||
@ -393,6 +397,10 @@ func (conR *ConsensusReactor) startBroadcastRoutine() error {
|
||||
conR.eventBus.UnsubscribeAll(ctx, subscriber)
|
||||
return
|
||||
}
|
||||
if !ok {
|
||||
conR.eventBus.UnsubscribeAll(ctx, subscriber)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
@ -603,11 +611,9 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype
|
||||
logger.Debug("Sending block part for catchup failed")
|
||||
}
|
||||
return
|
||||
} else {
|
||||
//logger.Info("No parts to send in catch-up, sleeping")
|
||||
time.Sleep(conR.conS.config.PeerGossipSleep())
|
||||
return
|
||||
}
|
||||
//logger.Info("No parts to send in catch-up, sleeping")
|
||||
time.Sleep(conR.conS.config.PeerGossipSleep())
|
||||
}
|
||||
|
||||
func (conR *ConsensusReactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
|
||||
@ -853,6 +859,10 @@ type peerStateStats struct {
|
||||
blockParts int
|
||||
}
|
||||
|
||||
func (pss peerStateStats) String() string {
|
||||
return fmt.Sprintf("peerStateStats{votes: %d, blockParts: %d}", pss.votes, pss.blockParts)
|
||||
}
|
||||
|
||||
// NewPeerState returns a new PeerState for the given Peer
|
||||
func NewPeerState(peer p2p.Peer) *PeerState {
|
||||
return &PeerState{
|
||||
@ -1083,27 +1093,46 @@ func (ps *PeerState) RecordVote(vote *types.Vote) int {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
if ps.stats.lastVoteHeight == vote.Height {
|
||||
if ps.stats.lastVoteHeight >= vote.Height {
|
||||
return ps.stats.votes
|
||||
}
|
||||
ps.stats.lastVoteHeight = vote.Height
|
||||
ps.stats.votes += 1
|
||||
ps.stats.votes++
|
||||
return ps.stats.votes
|
||||
}
|
||||
|
||||
// RecordVote updates internal statistics for this peer by recording the block part.
|
||||
// It returns the total number of block parts (1 per block). This essentially means
|
||||
// the number of blocks for which peer has been sending us block parts.
|
||||
// VotesSent returns the number of blocks for which peer has been sending us
|
||||
// votes.
|
||||
func (ps *PeerState) VotesSent() int {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
return ps.stats.votes
|
||||
}
|
||||
|
||||
// RecordBlockPart updates internal statistics for this peer by recording the
|
||||
// block part. It returns the total number of block parts (1 per block). This
|
||||
// essentially means the number of blocks for which peer has been sending us
|
||||
// block parts.
|
||||
func (ps *PeerState) RecordBlockPart(bp *BlockPartMessage) int {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
if ps.stats.lastBlockPartHeight == bp.Height {
|
||||
if ps.stats.lastBlockPartHeight >= bp.Height {
|
||||
return ps.stats.blockParts
|
||||
}
|
||||
|
||||
ps.stats.lastBlockPartHeight = bp.Height
|
||||
ps.stats.blockParts += 1
|
||||
ps.stats.blockParts++
|
||||
return ps.stats.blockParts
|
||||
}
|
||||
|
||||
// BlockPartsSent returns the number of blocks for which peer has been sending
|
||||
// us block parts.
|
||||
func (ps *PeerState) BlockPartsSent() int {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
return ps.stats.blockParts
|
||||
}
|
||||
|
||||
@ -1253,11 +1282,13 @@ func (ps *PeerState) StringIndented(indent string) string {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
return fmt.Sprintf(`PeerState{
|
||||
%s Key %v
|
||||
%s PRS %v
|
||||
%s Key %v
|
||||
%s PRS %v
|
||||
%s Stats %v
|
||||
%s}`,
|
||||
indent, ps.Peer.ID(),
|
||||
indent, ps.PeerRoundState.StringIndented(indent+" "),
|
||||
indent, ps.stats,
|
||||
indent)
|
||||
}
|
||||
|
||||
|
@ -11,10 +11,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/abci/example/kvstore"
|
||||
wire "github.com/tendermint/tendermint/wire"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
p2pdummy "github.com/tendermint/tendermint/p2p/dummy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -121,6 +124,112 @@ func TestReactorProposalHeartbeats(t *testing.T) {
|
||||
}, css)
|
||||
}
|
||||
|
||||
// Test we record block parts from other peers
|
||||
func TestReactorRecordsBlockParts(t *testing.T) {
|
||||
// create dummy peer
|
||||
peer := p2pdummy.NewPeer()
|
||||
ps := NewPeerState(peer).SetLogger(log.TestingLogger())
|
||||
peer.Set(types.PeerStateKey, ps)
|
||||
|
||||
// create reactor
|
||||
css := randConsensusNet(1, "consensus_reactor_records_block_parts_test", newMockTickerFunc(true), newPersistentKVStore)
|
||||
reactor := NewConsensusReactor(css[0], false) // so we dont start the consensus states
|
||||
reactor.SetEventBus(css[0].eventBus)
|
||||
reactor.SetLogger(log.TestingLogger())
|
||||
sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { return sw })
|
||||
reactor.SetSwitch(sw)
|
||||
err := reactor.Start()
|
||||
require.NoError(t, err)
|
||||
defer reactor.Stop()
|
||||
|
||||
// 1) new block part
|
||||
parts := types.NewPartSetFromData(cmn.RandBytes(100), 10)
|
||||
msg := &BlockPartMessage{
|
||||
Height: 2,
|
||||
Round: 0,
|
||||
Part: parts.GetPart(0),
|
||||
}
|
||||
bz, err := wire.MarshalBinary(struct{ ConsensusMessage }{msg})
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(DataChannel, peer, bz)
|
||||
assert.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should have increased by 1")
|
||||
|
||||
// 2) block part with the same height, but different round
|
||||
msg.Round = 1
|
||||
|
||||
bz, err = wire.MarshalBinary(struct{ ConsensusMessage }{msg})
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(DataChannel, peer, bz)
|
||||
assert.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same")
|
||||
|
||||
// 3) block part from earlier height
|
||||
msg.Height = 1
|
||||
msg.Round = 0
|
||||
|
||||
bz, err = wire.MarshalBinary(struct{ ConsensusMessage }{msg})
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(DataChannel, peer, bz)
|
||||
assert.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same")
|
||||
}
|
||||
|
||||
// Test we record votes from other peers
|
||||
func TestReactorRecordsVotes(t *testing.T) {
|
||||
// create dummy peer
|
||||
peer := p2pdummy.NewPeer()
|
||||
ps := NewPeerState(peer).SetLogger(log.TestingLogger())
|
||||
peer.Set(types.PeerStateKey, ps)
|
||||
|
||||
// create reactor
|
||||
css := randConsensusNet(1, "consensus_reactor_records_votes_test", newMockTickerFunc(true), newPersistentKVStore)
|
||||
reactor := NewConsensusReactor(css[0], false) // so we dont start the consensus states
|
||||
reactor.SetEventBus(css[0].eventBus)
|
||||
reactor.SetLogger(log.TestingLogger())
|
||||
sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { return sw })
|
||||
reactor.SetSwitch(sw)
|
||||
err := reactor.Start()
|
||||
require.NoError(t, err)
|
||||
defer reactor.Stop()
|
||||
_, val := css[0].state.Validators.GetByIndex(0)
|
||||
|
||||
// 1) new vote
|
||||
vote := &types.Vote{
|
||||
ValidatorIndex: 0,
|
||||
ValidatorAddress: val.Address,
|
||||
Height: 2,
|
||||
Round: 0,
|
||||
Timestamp: time.Now().UTC(),
|
||||
Type: types.VoteTypePrevote,
|
||||
BlockID: types.BlockID{},
|
||||
}
|
||||
bz, err := wire.MarshalBinary(struct{ ConsensusMessage }{&VoteMessage{vote}})
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(VoteChannel, peer, bz)
|
||||
assert.Equal(t, 1, ps.VotesSent(), "number of votes sent should have increased by 1")
|
||||
|
||||
// 2) vote with the same height, but different round
|
||||
vote.Round = 1
|
||||
|
||||
bz, err = wire.MarshalBinary(struct{ ConsensusMessage }{&VoteMessage{vote}})
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(VoteChannel, peer, bz)
|
||||
assert.Equal(t, 1, ps.VotesSent(), "number of votes sent should stay the same")
|
||||
|
||||
// 3) vote from earlier height
|
||||
vote.Height = 1
|
||||
vote.Round = 0
|
||||
|
||||
bz, err = wire.MarshalBinary(struct{ ConsensusMessage }{&VoteMessage{vote}})
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(VoteChannel, peer, bz)
|
||||
assert.Equal(t, 1, ps.VotesSent(), "number of votes sent should stay the same")
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------
|
||||
// ensure we can make blocks despite cycling a validator set
|
||||
|
||||
@ -332,7 +441,7 @@ func waitForAndValidateBlockWithTx(t *testing.T, n int, activeVals map[string]st
|
||||
// but they should be in order.
|
||||
for _, tx := range newBlock.Data.Txs {
|
||||
assert.EqualValues(t, txs[ntxs], tx)
|
||||
ntxs += 1
|
||||
ntxs++
|
||||
}
|
||||
|
||||
if ntxs == len(txs) {
|
||||
|
@ -112,7 +112,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int64) error {
|
||||
}
|
||||
}
|
||||
if found {
|
||||
return fmt.Errorf("WAL should not contain #ENDHEIGHT %d.", csHeight)
|
||||
return fmt.Errorf("WAL should not contain #ENDHEIGHT %d", csHeight)
|
||||
}
|
||||
|
||||
// Search for last height marker
|
||||
@ -125,7 +125,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int64) error {
|
||||
return err
|
||||
}
|
||||
if !found {
|
||||
return fmt.Errorf("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d.", csHeight, csHeight-1)
|
||||
return fmt.Errorf("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d", csHeight, csHeight-1)
|
||||
}
|
||||
defer gr.Close() // nolint: errcheck
|
||||
|
||||
@ -352,7 +352,7 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl
|
||||
var err error
|
||||
finalBlock := storeBlockHeight
|
||||
if mutateState {
|
||||
finalBlock -= 1
|
||||
finalBlock--
|
||||
}
|
||||
for i := appBlockHeight + 1; i <= finalBlock; i++ {
|
||||
h.logger.Info("Applying block", "height", i)
|
||||
@ -362,7 +362,7 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h.nBlocks += 1
|
||||
h.nBlocks++
|
||||
}
|
||||
|
||||
if mutateState {
|
||||
@ -390,7 +390,7 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap
|
||||
return sm.State{}, err
|
||||
}
|
||||
|
||||
h.nBlocks += 1
|
||||
h.nBlocks++
|
||||
|
||||
return state, nil
|
||||
}
|
||||
@ -429,7 +429,7 @@ type mockProxyApp struct {
|
||||
|
||||
func (mock *mockProxyApp) DeliverTx(tx []byte) abci.ResponseDeliverTx {
|
||||
r := mock.abciResponses.DeliverTx[mock.txCount]
|
||||
mock.txCount += 1
|
||||
mock.txCount++
|
||||
return *r
|
||||
}
|
||||
|
||||
|
@ -87,9 +87,9 @@ func (cs *ConsensusState) ReplayFile(file string, console bool) error {
|
||||
}
|
||||
|
||||
if nextN > 0 {
|
||||
nextN -= 1
|
||||
nextN--
|
||||
}
|
||||
pb.count += 1
|
||||
pb.count++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -153,7 +153,7 @@ func (pb *playback) replayReset(count int, newStepCh chan interface{}) error {
|
||||
if err := pb.cs.readReplayMessage(msg, newStepCh); err != nil {
|
||||
return err
|
||||
}
|
||||
pb.count += 1
|
||||
pb.count++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -197,13 +197,12 @@ func (pb *playback) replayConsoleLoop() int {
|
||||
|
||||
if len(tokens) == 1 {
|
||||
return 0
|
||||
}
|
||||
i, err := strconv.Atoi(tokens[1])
|
||||
if err != nil {
|
||||
fmt.Println("next takes an integer argument")
|
||||
} else {
|
||||
i, err := strconv.Atoi(tokens[1])
|
||||
if err != nil {
|
||||
fmt.Println("next takes an integer argument")
|
||||
} else {
|
||||
return i
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
case "back":
|
||||
@ -299,7 +298,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
|
||||
// Create proxyAppConn connection (consensus, mempool, query)
|
||||
clientCreator := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir())
|
||||
proxyApp := proxy.NewAppConns(clientCreator,
|
||||
NewHandshaker(stateDB, state, blockStore, gdoc.AppState))
|
||||
NewHandshaker(stateDB, state, blockStore, gdoc.AppState()))
|
||||
err = proxyApp.Start()
|
||||
if err != nil {
|
||||
cmn.Exit(cmn.Fmt("Error starting proxy app conns: %v", err))
|
||||
|
@ -382,9 +382,9 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
|
||||
|
||||
expectedBlocksToSync := NUM_BLOCKS - nBlocks
|
||||
if nBlocks == NUM_BLOCKS && mode > 0 {
|
||||
expectedBlocksToSync += 1
|
||||
expectedBlocksToSync++
|
||||
} else if nBlocks > 0 && mode == 1 {
|
||||
expectedBlocksToSync += 1
|
||||
expectedBlocksToSync++
|
||||
}
|
||||
|
||||
if handshaker.NBlocks() != expectedBlocksToSync {
|
||||
@ -533,7 +533,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
|
||||
}
|
||||
blocks = append(blocks, block)
|
||||
commits = append(commits, thisBlockCommit)
|
||||
height += 1
|
||||
height++
|
||||
}
|
||||
case *types.PartSetHeader:
|
||||
thisBlockParts = types.NewPartSetFromHeader(*p)
|
||||
|
@ -492,7 +492,7 @@ func (cs *ConsensusState) updateToState(state sm.State) {
|
||||
func (cs *ConsensusState) newStep() {
|
||||
rs := cs.RoundStateEvent()
|
||||
cs.wal.Save(rs)
|
||||
cs.nSteps += 1
|
||||
cs.nSteps++
|
||||
// newStep is called by updateToStep in NewConsensusState before the eventBus is set!
|
||||
if cs.eventBus != nil {
|
||||
cs.eventBus.PublishEventNewRoundStep(rs)
|
||||
@ -718,11 +718,7 @@ func (cs *ConsensusState) needProofBlock(height int64) bool {
|
||||
func (cs *ConsensusState) proposalHeartbeat(height int64, round int) {
|
||||
counter := 0
|
||||
addr := cs.privValidator.GetAddress()
|
||||
valIndex, v := cs.Validators.GetByAddress(addr)
|
||||
if v == nil {
|
||||
// not a validator
|
||||
valIndex = -1
|
||||
}
|
||||
valIndex, _ := cs.Validators.GetByAddress(addr)
|
||||
chainID := cs.state.ChainID
|
||||
for {
|
||||
rs := cs.GetRoundState()
|
||||
@ -739,7 +735,7 @@ func (cs *ConsensusState) proposalHeartbeat(height int64, round int) {
|
||||
}
|
||||
cs.privValidator.SignHeartbeat(chainID, heartbeat)
|
||||
cs.eventBus.PublishEventProposalHeartbeat(types.EventDataProposalHeartbeat{heartbeat})
|
||||
counter += 1
|
||||
counter++
|
||||
time.Sleep(proposalHeartbeatIntervalSeconds * time.Second)
|
||||
}
|
||||
}
|
||||
@ -850,10 +846,10 @@ func (cs *ConsensusState) isProposalComplete() bool {
|
||||
// make sure we have the prevotes from it too
|
||||
if cs.Proposal.POLRound < 0 {
|
||||
return true
|
||||
} else {
|
||||
// if this is false the proposer is lying or we haven't received the POL yet
|
||||
return cs.Votes.Prevotes(cs.Proposal.POLRound).HasTwoThirdsMajority()
|
||||
}
|
||||
// if this is false the proposer is lying or we haven't received the POL yet
|
||||
return cs.Votes.Prevotes(cs.Proposal.POLRound).HasTwoThirdsMajority()
|
||||
|
||||
}
|
||||
|
||||
// Create the next block to propose and return it.
|
||||
@ -1357,111 +1353,115 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
|
||||
return added, ErrVoteHeightMismatch
|
||||
}
|
||||
added, err = cs.LastCommit.AddVote(vote)
|
||||
if added {
|
||||
cs.Logger.Info(cmn.Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
|
||||
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
|
||||
if !added {
|
||||
return added, err
|
||||
}
|
||||
|
||||
// if we can skip timeoutCommit and have all the votes now,
|
||||
if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() {
|
||||
// go straight to new round (skip timeout commit)
|
||||
// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight)
|
||||
cs.enterNewRound(cs.Height, 0)
|
||||
}
|
||||
cs.Logger.Info(cmn.Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
|
||||
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
|
||||
|
||||
// if we can skip timeoutCommit and have all the votes now,
|
||||
if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() {
|
||||
// go straight to new round (skip timeout commit)
|
||||
// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight)
|
||||
cs.enterNewRound(cs.Height, 0)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// A prevote/precommit for this height?
|
||||
if vote.Height == cs.Height {
|
||||
height := cs.Height
|
||||
added, err = cs.Votes.AddVote(vote, peerID)
|
||||
if added {
|
||||
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
|
||||
// Height mismatch is ignored.
|
||||
// Not necessarily a bad peer, but not favourable behaviour.
|
||||
if vote.Height != cs.Height {
|
||||
err = ErrVoteHeightMismatch
|
||||
cs.Logger.Info("Vote ignored and not added", "voteHeight", vote.Height, "csHeight", cs.Height, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
switch vote.Type {
|
||||
case types.VoteTypePrevote:
|
||||
prevotes := cs.Votes.Prevotes(vote.Round)
|
||||
cs.Logger.Info("Added to prevote", "vote", vote, "prevotes", prevotes.StringShort())
|
||||
blockID, ok := prevotes.TwoThirdsMajority()
|
||||
// First, unlock if prevotes is a valid POL.
|
||||
// >> lockRound < POLRound <= unlockOrChangeLockRound (see spec)
|
||||
// NOTE: If (lockRound < POLRound) but !(POLRound <= unlockOrChangeLockRound),
|
||||
// we'll still enterNewRound(H,vote.R) and enterPrecommit(H,vote.R) to process it
|
||||
// there.
|
||||
if (cs.LockedBlock != nil) && (cs.LockedRound < vote.Round) && (vote.Round <= cs.Round) {
|
||||
if ok && !cs.LockedBlock.HashesTo(blockID.Hash) {
|
||||
cs.Logger.Info("Unlocking because of POL.", "lockedRound", cs.LockedRound, "POLRound", vote.Round)
|
||||
cs.LockedRound = 0
|
||||
cs.LockedBlock = nil
|
||||
cs.LockedBlockParts = nil
|
||||
cs.eventBus.PublishEventUnlock(cs.RoundStateEvent())
|
||||
}
|
||||
}
|
||||
// Update ValidBlock
|
||||
if ok && !blockID.IsZero() && !cs.ValidBlock.HashesTo(blockID.Hash) && vote.Round > cs.ValidRound {
|
||||
// update valid value
|
||||
if cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
cs.ValidRound = vote.Round
|
||||
cs.ValidBlock = cs.ProposalBlock
|
||||
cs.ValidBlockParts = cs.ProposalBlockParts
|
||||
}
|
||||
//TODO: We might want to update ValidBlock also in case we don't have that block yet,
|
||||
// and obtain the required block using gossiping
|
||||
}
|
||||
|
||||
if cs.Round <= vote.Round && prevotes.HasTwoThirdsAny() {
|
||||
// Round-skip over to PrevoteWait or goto Precommit.
|
||||
cs.enterNewRound(height, vote.Round) // if the vote is ahead of us
|
||||
if prevotes.HasTwoThirdsMajority() {
|
||||
cs.enterPrecommit(height, vote.Round)
|
||||
} else {
|
||||
cs.enterPrevote(height, vote.Round) // if the vote is ahead of us
|
||||
cs.enterPrevoteWait(height, vote.Round)
|
||||
}
|
||||
} else if cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round {
|
||||
// If the proposal is now complete, enter prevote of cs.Round.
|
||||
if cs.isProposalComplete() {
|
||||
cs.enterPrevote(height, cs.Round)
|
||||
}
|
||||
}
|
||||
case types.VoteTypePrecommit:
|
||||
precommits := cs.Votes.Precommits(vote.Round)
|
||||
cs.Logger.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort())
|
||||
blockID, ok := precommits.TwoThirdsMajority()
|
||||
if ok {
|
||||
if len(blockID.Hash) == 0 {
|
||||
cs.enterNewRound(height, vote.Round+1)
|
||||
} else {
|
||||
cs.enterNewRound(height, vote.Round)
|
||||
cs.enterPrecommit(height, vote.Round)
|
||||
cs.enterCommit(height, vote.Round)
|
||||
|
||||
if cs.config.SkipTimeoutCommit && precommits.HasAll() {
|
||||
// if we have all the votes now,
|
||||
// go straight to new round (skip timeout commit)
|
||||
// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight)
|
||||
cs.enterNewRound(cs.Height, 0)
|
||||
}
|
||||
|
||||
}
|
||||
} else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() {
|
||||
cs.enterNewRound(height, vote.Round)
|
||||
cs.enterPrecommit(height, vote.Round)
|
||||
cs.enterPrecommitWait(height, vote.Round)
|
||||
}
|
||||
default:
|
||||
cmn.PanicSanity(cmn.Fmt("Unexpected vote type %X", vote.Type)) // Should not happen.
|
||||
}
|
||||
}
|
||||
height := cs.Height
|
||||
added, err = cs.Votes.AddVote(vote, peerID)
|
||||
if !added {
|
||||
// Either duplicate, or error upon cs.Votes.AddByIndex()
|
||||
return
|
||||
} else {
|
||||
err = ErrVoteHeightMismatch
|
||||
}
|
||||
|
||||
// Height mismatch, bad peer?
|
||||
cs.Logger.Info("Vote ignored and not added", "voteHeight", vote.Height, "csHeight", cs.Height, "err", err)
|
||||
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
|
||||
|
||||
switch vote.Type {
|
||||
case types.VoteTypePrevote:
|
||||
prevotes := cs.Votes.Prevotes(vote.Round)
|
||||
cs.Logger.Info("Added to prevote", "vote", vote, "prevotes", prevotes.StringShort())
|
||||
blockID, ok := prevotes.TwoThirdsMajority()
|
||||
// First, unlock if prevotes is a valid POL.
|
||||
// >> lockRound < POLRound <= unlockOrChangeLockRound (see spec)
|
||||
// NOTE: If (lockRound < POLRound) but !(POLRound <= unlockOrChangeLockRound),
|
||||
// we'll still enterNewRound(H,vote.R) and enterPrecommit(H,vote.R) to process it
|
||||
// there.
|
||||
if (cs.LockedBlock != nil) && (cs.LockedRound < vote.Round) && (vote.Round <= cs.Round) {
|
||||
if ok && !cs.LockedBlock.HashesTo(blockID.Hash) {
|
||||
cs.Logger.Info("Unlocking because of POL.", "lockedRound", cs.LockedRound, "POLRound", vote.Round)
|
||||
cs.LockedRound = 0
|
||||
cs.LockedBlock = nil
|
||||
cs.LockedBlockParts = nil
|
||||
cs.eventBus.PublishEventUnlock(cs.RoundStateEvent())
|
||||
}
|
||||
}
|
||||
// Update ValidBlock
|
||||
if ok && !blockID.IsZero() && !cs.ValidBlock.HashesTo(blockID.Hash) && vote.Round > cs.ValidRound {
|
||||
// update valid value
|
||||
if cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
cs.ValidRound = vote.Round
|
||||
cs.ValidBlock = cs.ProposalBlock
|
||||
cs.ValidBlockParts = cs.ProposalBlockParts
|
||||
}
|
||||
//TODO: We might want to update ValidBlock also in case we don't have that block yet,
|
||||
// and obtain the required block using gossiping
|
||||
}
|
||||
|
||||
if cs.Round <= vote.Round && prevotes.HasTwoThirdsAny() {
|
||||
// Round-skip over to PrevoteWait or goto Precommit.
|
||||
cs.enterNewRound(height, vote.Round) // if the vote is ahead of us
|
||||
if prevotes.HasTwoThirdsMajority() {
|
||||
cs.enterPrecommit(height, vote.Round)
|
||||
} else {
|
||||
cs.enterPrevote(height, vote.Round) // if the vote is ahead of us
|
||||
cs.enterPrevoteWait(height, vote.Round)
|
||||
}
|
||||
} else if cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round {
|
||||
// If the proposal is now complete, enter prevote of cs.Round.
|
||||
if cs.isProposalComplete() {
|
||||
cs.enterPrevote(height, cs.Round)
|
||||
}
|
||||
}
|
||||
case types.VoteTypePrecommit:
|
||||
precommits := cs.Votes.Precommits(vote.Round)
|
||||
cs.Logger.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort())
|
||||
blockID, ok := precommits.TwoThirdsMajority()
|
||||
if ok {
|
||||
if len(blockID.Hash) == 0 {
|
||||
cs.enterNewRound(height, vote.Round+1)
|
||||
} else {
|
||||
cs.enterNewRound(height, vote.Round)
|
||||
cs.enterPrecommit(height, vote.Round)
|
||||
cs.enterCommit(height, vote.Round)
|
||||
|
||||
if cs.config.SkipTimeoutCommit && precommits.HasAll() {
|
||||
// if we have all the votes now,
|
||||
// go straight to new round (skip timeout commit)
|
||||
// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight)
|
||||
cs.enterNewRound(cs.Height, 0)
|
||||
}
|
||||
|
||||
}
|
||||
} else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() {
|
||||
cs.enterNewRound(height, vote.Round)
|
||||
cs.enterPrecommit(height, vote.Round)
|
||||
cs.enterPrecommitWait(height, vote.Round)
|
||||
}
|
||||
default:
|
||||
panic(cmn.Fmt("Unexpected vote type %X", vote.Type)) // go-wire should prevent this.
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@ -1492,12 +1492,11 @@ func (cs *ConsensusState) signAddVote(type_ byte, hash []byte, header types.Part
|
||||
cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""})
|
||||
cs.Logger.Info("Signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err)
|
||||
return vote
|
||||
} else {
|
||||
//if !cs.replayMode {
|
||||
cs.Logger.Error("Error signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err)
|
||||
//}
|
||||
return nil
|
||||
}
|
||||
//if !cs.replayMode {
|
||||
cs.Logger.Error("Error signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err)
|
||||
//}
|
||||
return nil
|
||||
}
|
||||
|
||||
//---------------------------------------------------------
|
||||
|
@ -53,7 +53,7 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
|
||||
return nil, errors.Wrap(err, "failed to make genesis state")
|
||||
}
|
||||
blockStore := bc.NewBlockStore(blockStoreDB)
|
||||
handshaker := NewHandshaker(stateDB, state, blockStore, genDoc.AppState)
|
||||
handshaker := NewHandshaker(stateDB, state, blockStore, genDoc.AppState())
|
||||
proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app), handshaker)
|
||||
proxyApp.SetLogger(logger.With("module", "proxy"))
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
|
@ -196,9 +196,8 @@ urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/statefuls
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/t_plus_k.png', filename=assets_dir+'/t_plus_k.png')
|
||||
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/terraform-digitalocean/README.rst', filename=tools_dir+'/terraform-digitalocean.rst')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/tm-bench/README.rst', filename=tools_dir+'/benchmarking-and-monitoring.rst')
|
||||
# the readme for below is included in tm-bench
|
||||
# urllib.urlretrieve('https://raw.githubusercontent.com/tendermint/tools/master/tm-monitor/README.rst', filename='tools/tm-monitor.rst')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/tm-bench/README.rst', filename=tools_dir+'/benchmarking.rst')
|
||||
urllib.urlretrieve('https://raw.githubusercontent.com/tendermint/tools/master/tm-monitor/README.rst', filename='tools/monitoring.rst')
|
||||
|
||||
#### abci spec #################################
|
||||
|
||||
|
@ -12,7 +12,7 @@ and want to get started right away, continue. Otherwise, [review the documentati
|
||||
On a fresh Ubuntu 16.04 machine can be done with [this script](https://git.io/vNLfY), like so:
|
||||
|
||||
```
|
||||
curl -L https://git.io/vNLfY | bash
|
||||
curl -L https://git.io/vxWlX | bash
|
||||
source ~/.profile
|
||||
```
|
||||
|
||||
|
@ -4,8 +4,8 @@
|
||||
# and has only been tested on Digital Ocean
|
||||
|
||||
# get and unpack golang
|
||||
curl -O https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz
|
||||
tar -xvf go1.9.2.linux-amd64.tar.gz
|
||||
curl -O https://storage.googleapis.com/golang/go1.10.linux-amd64.tar.gz
|
||||
tar -xvf go1.10.linux-amd64.tar.gz
|
||||
|
||||
apt install make
|
||||
|
||||
@ -26,7 +26,7 @@ go get $REPO
|
||||
cd $GOPATH/src/$REPO
|
||||
|
||||
## build
|
||||
git checkout v0.15.0
|
||||
git checkout v0.17.0
|
||||
make get_tools
|
||||
make get_vendor_deps
|
||||
make install
|
||||
make install
|
||||
|
@ -44,7 +44,8 @@ Tendermint Tools
|
||||
tools/docker.rst
|
||||
tools/mintnet-kubernetes.rst
|
||||
tools/terraform-digitalocean.rst
|
||||
tools/benchmarking-and-monitoring.rst
|
||||
tools/benchmarking.rst
|
||||
tools/monitoring.rst
|
||||
|
||||
Tendermint 102
|
||||
--------------
|
||||
|
@ -329,11 +329,11 @@ collateral on all other forks. Clients should verify the signatures on
|
||||
the reorg-proposal, verify any evidence, and make a judgement or prompt
|
||||
the end-user for a decision. For example, a phone wallet app may prompt
|
||||
the user with a security warning, while a refrigerator may accept any
|
||||
reorg-proposal signed by +½ of the original validators.
|
||||
reorg-proposal signed by +1/2 of the original validators.
|
||||
|
||||
No non-synchronous Byzantine fault-tolerant algorithm can come to
|
||||
consensus when ⅓+ of validators are dishonest, yet a fork assumes that
|
||||
⅓+ of validators have already been dishonest by double-signing or
|
||||
consensus when 1/3+ of validators are dishonest, yet a fork assumes that
|
||||
1/3+ of validators have already been dishonest by double-signing or
|
||||
lock-changing without justification. So, signing the reorg-proposal is a
|
||||
coordination problem that cannot be solved by any non-synchronous
|
||||
protocol (i.e. automatically, and without making assumptions about the
|
||||
|
@ -89,6 +89,7 @@ like the file below, however, double check by inspecting the
|
||||
seeds = ""
|
||||
|
||||
# Comma separated list of nodes to keep persistent connections to
|
||||
# Do not add private peers to this list if you don't want them advertised
|
||||
persistent_peers = ""
|
||||
|
||||
# Path to address book
|
||||
@ -124,6 +125,9 @@ like the file below, however, double check by inspecting the
|
||||
# Authenticated encryption
|
||||
auth_enc = true
|
||||
|
||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||
private_peer_ids = ""
|
||||
|
||||
##### mempool configuration options #####
|
||||
[mempool]
|
||||
|
||||
|
@ -5,7 +5,7 @@ Time in Tendermint is defined with the Time field of the block header.
|
||||
|
||||
It satisfies the following properties:
|
||||
|
||||
- Time Monotonicity: Time is monotonically increasing, i.e., given
|
||||
- Time Monotonicity: Time is monotonically increasing, i.e., given
|
||||
a header H1 for height h1 and a header H2 for height `h2 = h1 + 1`, `H1.Time < H2.Time`.
|
||||
- Time Validity: Given a set of Commit votes that forms the `block.LastCommit` field, a range of
|
||||
valid values for the Time field of the block header is defined only by
|
||||
@ -16,7 +16,21 @@ In the context of Tendermint, time is of type int64 and denotes UNIX time in mil
|
||||
corresponds to the number of milliseconds since January 1, 1970. Before defining rules that need to be enforced by the
|
||||
Tendermint consensus protocol, so the properties above holds, we introduce the following definition:
|
||||
|
||||
- median of a set of `Vote` messages is equal to the median of `Vote.Time` fields of the corresponding `Vote` messages
|
||||
- median of a set of `Vote` messages is equal to the median of `Vote.Time` fields of the corresponding `Vote` messages,
|
||||
where the value of `Vote.Time` is counted number of times proportional to the process voting power. As in Tendermint
|
||||
the voting power is not uniform (one process one vote), a vote message is actually an aggregator of the same votes whose
|
||||
number is equal to the voting power of the process that has casted the corresponding votes message.
|
||||
|
||||
Let's consider the following example:
|
||||
- we have four processes p1, p2, p3 and p4, with the following voting power distribution (p1, 23), (p2, 27), (p3, 10)
|
||||
and (p4, 10). The total voting power is 70 (`N = 3f+1`, where `N` is the total voting power, and `f` is the maximum voting
|
||||
power of the faulty processes), so we assume that the faulty processes have at most 23 of voting power.
|
||||
Furthermore, we have the following vote messages in some LastCommit field (we ignore all fields except Time field):
|
||||
- (p1, 100), (p2, 98), (p3, 1000), (p4, 500). We assume that p3 and p4 are faulty processes. Let's assume that the
|
||||
`block.LastCommit` message contains votes of processes p2, p3 and p4. Median is then chosen the following way:
|
||||
the value 98 is counted 27 times, the value 1000 is counted 10 times and the value 500 is counted also 10 times.
|
||||
So the median value will be the value 98. No matter what set of messages with at least `2f+1` voting power we
|
||||
choose, the median value will always be between the values sent by correct processes.
|
||||
|
||||
We ensure Time Monotonicity and Time Validity properties by the following rules:
|
||||
|
||||
|
114
docs/specification/new-spec/light-client.md
Normal file
114
docs/specification/new-spec/light-client.md
Normal file
@ -0,0 +1,114 @@
|
||||
# Light client
|
||||
|
||||
A light client is a process that connects to the Tendermint Full Node(s) and then tries to verify the Merkle proofs
|
||||
about the blockchain application. In this document we describe mechanisms that ensures that the Tendermint light client
|
||||
has the same level of security as Full Node processes (without being itself a Full Node).
|
||||
|
||||
To be able to validate a Merkle proof, a light client needs to validate the blockchain header that contains the root app hash.
|
||||
Validating a blockchain header in Tendermint consists in verifying that the header is committed (signed) by >2/3 of the
|
||||
voting power of the corresponding validator set. As the validator set is a dynamic set (it is changing), one of the
|
||||
core functionality of the light client is updating the current validator set, that is then used to verify the
|
||||
blockchain header, and further the corresponding Merkle proofs.
|
||||
|
||||
For the purpose of this light client specification, we assume that the Tendermint Full Node exposes the following functions over
|
||||
Tendermint RPC:
|
||||
|
||||
```golang
|
||||
Header(height int64) (SignedHeader, error) // returns signed header for the given height
|
||||
Validators(height int64) (ResultValidators, error) // returns validator set for the given height
|
||||
LastHeader(valSetNumber int64) (SignedHeader, error) // returns last header signed by the validator set with the given validator set number
|
||||
|
||||
type SignedHeader struct {
|
||||
Header Header
|
||||
Commit Commit
|
||||
ValSetNumber int64
|
||||
}
|
||||
|
||||
type ResultValidators struct {
|
||||
BlockHeight int64
|
||||
Validators []Validator
|
||||
// time the current validator set is initialised, i.e, time of the last validator change before header BlockHeight
|
||||
ValSetTime int64
|
||||
}
|
||||
```
|
||||
|
||||
We assume that Tendermint keeps track of the validator set changes and that each time a validator set is changed it is
|
||||
being assigned the next sequence number. We can call this number the validator set sequence number. Tendermint also remembers
|
||||
the Time from the header when the next validator set is initialised (starts to be in power), and we refer to this time
|
||||
as validator set init time.
|
||||
Furthermore, we assume that each validator set change is signed (committed) by the current validator set. More precisely,
|
||||
given a block `H` that contains transactions that are modifying the current validator set, the Merkle root hash of the next
|
||||
validator set (modified based on transactions from block H) will be in block `H+1` (and signed by the current validator
|
||||
set), and then starting from the block `H+2`, it will be signed by the next validator set.
|
||||
|
||||
Note that the real Tendermint RPC API is slightly different (for example, response messages contain more data and function
|
||||
names are slightly different); we shortened (and modified) it for the purpose of this document to make the spec more
|
||||
clear and simple. Furthermore, note that in case of the third function, the returned header has `ValSetNumber` equals to
|
||||
`valSetNumber+1`.
|
||||
|
||||
|
||||
Locally, light client manages the following state:
|
||||
|
||||
```golang
|
||||
valSet []Validator // current validator set (last known and verified validator set)
|
||||
valSetNumber int64 // sequence number of the current validator set
|
||||
valSetHash []byte // hash of the current validator set
|
||||
valSetTime int64 // time when the current validator set is initialised
|
||||
```
|
||||
|
||||
The light client is initialised with the trusted validator set, for example based on the known validator set hash,
|
||||
validator set sequence number and the validator set init time.
|
||||
The core of the light client logic is captured by the VerifyAndUpdate function that is used to 1) verify if the given header is valid,
|
||||
and 2) update the validator set (when the given header is valid and it is more recent than the seen headers).
|
||||
|
||||
```golang
|
||||
VerifyAndUpdate(signedHeader SignedHeader):
|
||||
assertThat signedHeader.valSetNumber >= valSetNumber
|
||||
if isValid(signedHeader) and signedHeader.Header.Time <= valSetTime + UNBONDING_PERIOD then
|
||||
setValidatorSet(signedHeader)
|
||||
return true
|
||||
else
|
||||
updateValidatorSet(signedHeader.ValSetNumber)
|
||||
return VerifyAndUpdate(signedHeader)
|
||||
|
||||
isValid(signedHeader SignedHeader):
|
||||
valSetOfTheHeader = Validators(signedHeader.Header.Height)
|
||||
assertThat Hash(valSetOfTheHeader) == signedHeader.Header.ValSetHash
|
||||
assertThat signedHeader is passing basic validation
|
||||
if votingPower(signedHeader.Commit) > 2/3 * votingPower(valSetOfTheHeader) then return true
|
||||
else
|
||||
return false
|
||||
|
||||
setValidatorSet(signedHeader SignedHeader):
|
||||
nextValSet = Validators(signedHeader.Header.Height)
|
||||
assertThat Hash(nextValSet) == signedHeader.Header.ValidatorsHash
|
||||
valSet = nextValSet.Validators
|
||||
valSetHash = signedHeader.Header.ValidatorsHash
|
||||
valSetNumber = signedHeader.ValSetNumber
|
||||
valSetTime = nextValSet.ValSetTime
|
||||
|
||||
votingPower(commit Commit):
|
||||
votingPower = 0
|
||||
for each precommit in commit.Precommits do:
|
||||
if precommit.ValidatorAddress is in valSet and signature of the precommit verifies then
|
||||
votingPower += valSet[precommit.ValidatorAddress].VotingPower
|
||||
return votingPower
|
||||
|
||||
votingPower(validatorSet []Validator):
|
||||
for each validator in validatorSet do:
|
||||
votingPower += validator.VotingPower
|
||||
return votingPower
|
||||
|
||||
updateValidatorSet(valSetNumberOfTheHeader):
|
||||
while valSetNumber != valSetNumberOfTheHeader do
|
||||
signedHeader = LastHeader(valSetNumber)
|
||||
if isValid(signedHeader) then
|
||||
setValidatorSet(signedHeader)
|
||||
else return error
|
||||
return
|
||||
```
|
||||
|
||||
Note that in the logic above we assume that the light client will always go upward with respect to header verifications,
|
||||
i.e., that it will always be used to verify more recent headers. In case a light client needs to be used to verify older
|
||||
headers (go backward) the same mechanisms and similar logic can be used. In case a call to the FullNode or subsequent
|
||||
checks fail, a light client need to implement some recovery strategy, for example connecting to other FullNode.
|
@ -57,10 +57,17 @@ a trust metric (see below), but it's best to start with something simple.
|
||||
## Select Peers to Dial
|
||||
|
||||
When we need more peers, we pick them randomly from the addrbook with some
|
||||
configurable bias for unvetted peers. The bias should be lower when we have fewer peers,
|
||||
configurable bias for unvetted peers. The bias should be lower when we have fewer peers
|
||||
and can increase as we obtain more, ensuring that our first peers are more trustworthy,
|
||||
but always giving us the chance to discover new good peers.
|
||||
|
||||
We track the last time we dialed a peer and the number of unsuccessful attempts
|
||||
we've made. If too many attempts are made, we mark the peer as bad.
|
||||
|
||||
Connection attempts are made with exponential backoff (plus jitter). Because
|
||||
the selection process happens every `ensurePeersPeriod`, we might not end up
|
||||
dialing a peer for much longer than the backoff duration.
|
||||
|
||||
## Select Peers to Exchange
|
||||
|
||||
When we’re asked for peers, we select them as follows:
|
||||
|
@ -97,6 +97,7 @@ An HTTP Get request to the root RPC endpoint (e.g.
|
||||
http://localhost:46657/genesis
|
||||
http://localhost:46657/net_info
|
||||
http://localhost:46657/num_unconfirmed_txs
|
||||
http://localhost:46657/health
|
||||
http://localhost:46657/status
|
||||
http://localhost:46657/unconfirmed_txs
|
||||
http://localhost:46657/unsafe_flush_mempool
|
||||
|
@ -11,11 +11,17 @@ import (
|
||||
)
|
||||
|
||||
func ValidateBlockMeta(meta *types.BlockMeta, check lite.Commit) error {
|
||||
if meta == nil {
|
||||
return errors.New("expecting a non-nil BlockMeta")
|
||||
}
|
||||
// TODO: check the BlockID??
|
||||
return ValidateHeader(meta.Header, check)
|
||||
}
|
||||
|
||||
func ValidateBlock(meta *types.Block, check lite.Commit) error {
|
||||
if meta == nil {
|
||||
return errors.New("expecting a non-nil Block")
|
||||
}
|
||||
err := ValidateHeader(meta.Header, check)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -27,6 +33,9 @@ func ValidateBlock(meta *types.Block, check lite.Commit) error {
|
||||
}
|
||||
|
||||
func ValidateHeader(head *types.Header, check lite.Commit) error {
|
||||
if head == nil {
|
||||
return errors.New("expecting a non-nil Header")
|
||||
}
|
||||
// make sure they are for the same height (obvious fail)
|
||||
if head.Height != check.Height() {
|
||||
return certerr.ErrHeightMismatch(head.Height, check.Height())
|
||||
|
218
lite/proxy/validate_test.go
Normal file
218
lite/proxy/validate_test.go
Normal file
@ -0,0 +1,218 @@
|
||||
package proxy_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/tendermint/tendermint/lite"
|
||||
"github.com/tendermint/tendermint/lite/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var (
|
||||
deadBeefTxs = types.Txs{[]byte("DE"), []byte("AD"), []byte("BE"), []byte("EF")}
|
||||
deadBeefHash = deadBeefTxs.Hash()
|
||||
testTime1 = time.Date(2018, 1, 1, 1, 1, 1, 1, time.UTC)
|
||||
testTime2 = time.Date(2017, 1, 2, 1, 1, 1, 1, time.UTC)
|
||||
)
|
||||
|
||||
var hdrHeight11 = &types.Header{
|
||||
Height: 11,
|
||||
Time: testTime1,
|
||||
ValidatorsHash: []byte("Tendermint"),
|
||||
}
|
||||
|
||||
func TestValidateBlock(t *testing.T) {
|
||||
tests := []struct {
|
||||
block *types.Block
|
||||
commit lite.Commit
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
block: nil, wantErr: "non-nil Block",
|
||||
},
|
||||
{
|
||||
block: &types.Block{}, wantErr: "nil Header",
|
||||
},
|
||||
{
|
||||
block: &types.Block{Header: new(types.Header)},
|
||||
},
|
||||
|
||||
// Start Header.Height mismatch test
|
||||
{
|
||||
block: &types.Block{Header: &types.Header{Height: 10}},
|
||||
commit: lite.Commit{Header: &types.Header{Height: 11}},
|
||||
wantErr: "don't match - 10 vs 11",
|
||||
},
|
||||
|
||||
{
|
||||
block: &types.Block{Header: &types.Header{Height: 11}},
|
||||
commit: lite.Commit{Header: &types.Header{Height: 11}},
|
||||
},
|
||||
// End Header.Height mismatch test
|
||||
|
||||
// Start Header.Hash mismatch test
|
||||
{
|
||||
block: &types.Block{Header: hdrHeight11},
|
||||
commit: lite.Commit{Header: &types.Header{Height: 11}},
|
||||
wantErr: "Headers don't match",
|
||||
},
|
||||
|
||||
{
|
||||
block: &types.Block{Header: hdrHeight11},
|
||||
commit: lite.Commit{Header: hdrHeight11},
|
||||
},
|
||||
// End Header.Hash mismatch test
|
||||
|
||||
// Start Header.Data hash mismatch test
|
||||
{
|
||||
block: &types.Block{
|
||||
Header: &types.Header{Height: 11},
|
||||
Data: &types.Data{Txs: []types.Tx{[]byte("0xDE"), []byte("AD")}},
|
||||
},
|
||||
commit: lite.Commit{
|
||||
Header: &types.Header{Height: 11},
|
||||
Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("0xDEADBEEF")}},
|
||||
},
|
||||
wantErr: "Data hash doesn't match header",
|
||||
},
|
||||
{
|
||||
block: &types.Block{
|
||||
Header: &types.Header{Height: 11, DataHash: deadBeefHash},
|
||||
Data: &types.Data{Txs: deadBeefTxs},
|
||||
},
|
||||
commit: lite.Commit{
|
||||
Header: &types.Header{Height: 11},
|
||||
Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}},
|
||||
},
|
||||
},
|
||||
// End Header.Data hash mismatch test
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
err := proxy.ValidateBlock(tt.block, tt.commit)
|
||||
if tt.wantErr != "" {
|
||||
if err == nil {
|
||||
assert.FailNowf(t, "Unexpectedly passed", "#%d", i)
|
||||
} else {
|
||||
assert.Contains(t, err.Error(), tt.wantErr, "#%d should contain the substring\n\n", i)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
assert.Nil(t, err, "#%d: expecting a nil error", i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateBlockMeta(t *testing.T) {
|
||||
tests := []struct {
|
||||
meta *types.BlockMeta
|
||||
commit lite.Commit
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
meta: nil, wantErr: "non-nil BlockMeta",
|
||||
},
|
||||
{
|
||||
meta: &types.BlockMeta{}, wantErr: "non-nil Header",
|
||||
},
|
||||
{
|
||||
meta: &types.BlockMeta{Header: new(types.Header)},
|
||||
},
|
||||
|
||||
// Start Header.Height mismatch test
|
||||
{
|
||||
meta: &types.BlockMeta{Header: &types.Header{Height: 10}},
|
||||
commit: lite.Commit{Header: &types.Header{Height: 11}},
|
||||
wantErr: "don't match - 10 vs 11",
|
||||
},
|
||||
|
||||
{
|
||||
meta: &types.BlockMeta{Header: &types.Header{Height: 11}},
|
||||
commit: lite.Commit{Header: &types.Header{Height: 11}},
|
||||
},
|
||||
// End Header.Height mismatch test
|
||||
|
||||
// Start Headers don't match test
|
||||
{
|
||||
meta: &types.BlockMeta{Header: hdrHeight11},
|
||||
commit: lite.Commit{Header: &types.Header{Height: 11}},
|
||||
wantErr: "Headers don't match",
|
||||
},
|
||||
|
||||
{
|
||||
meta: &types.BlockMeta{Header: hdrHeight11},
|
||||
commit: lite.Commit{Header: hdrHeight11},
|
||||
},
|
||||
|
||||
{
|
||||
meta: &types.BlockMeta{
|
||||
Header: &types.Header{
|
||||
Height: 11,
|
||||
ValidatorsHash: []byte("lite-test"),
|
||||
// TODO: should be able to use empty time after Amino upgrade
|
||||
Time: testTime1,
|
||||
},
|
||||
},
|
||||
commit: lite.Commit{
|
||||
Header: &types.Header{Height: 11, DataHash: deadBeefHash},
|
||||
},
|
||||
wantErr: "Headers don't match",
|
||||
},
|
||||
|
||||
{
|
||||
meta: &types.BlockMeta{
|
||||
Header: &types.Header{
|
||||
Height: 11, DataHash: deadBeefHash,
|
||||
ValidatorsHash: []byte("Tendermint"),
|
||||
Time: testTime1,
|
||||
},
|
||||
},
|
||||
commit: lite.Commit{
|
||||
Header: &types.Header{
|
||||
Height: 11, DataHash: deadBeefHash,
|
||||
ValidatorsHash: []byte("Tendermint"),
|
||||
Time: testTime2,
|
||||
},
|
||||
Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}},
|
||||
},
|
||||
wantErr: "Headers don't match",
|
||||
},
|
||||
|
||||
{
|
||||
meta: &types.BlockMeta{
|
||||
Header: &types.Header{
|
||||
Height: 11, DataHash: deadBeefHash,
|
||||
ValidatorsHash: []byte("Tendermint"),
|
||||
Time: testTime2,
|
||||
},
|
||||
},
|
||||
commit: lite.Commit{
|
||||
Header: &types.Header{
|
||||
Height: 11, DataHash: deadBeefHash,
|
||||
ValidatorsHash: []byte("Tendermint-x"),
|
||||
Time: testTime2,
|
||||
},
|
||||
Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}},
|
||||
},
|
||||
wantErr: "Headers don't match",
|
||||
},
|
||||
// End Headers don't match test
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
err := proxy.ValidateBlockMeta(tt.meta, tt.commit)
|
||||
if tt.wantErr != "" {
|
||||
if err == nil {
|
||||
assert.FailNowf(t, "Unexpectedly passed", "#%d: wanted error %q", i, tt.wantErr)
|
||||
} else {
|
||||
assert.Contains(t, err.Error(), tt.wantErr, "#%d should contain the substring\n\n", i)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
assert.Nil(t, err, "#%d: expecting a nil error", i)
|
||||
}
|
||||
}
|
40
node/node.go
40
node/node.go
@ -7,7 +7,6 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
abci "github.com/tendermint/abci/types"
|
||||
amino "github.com/tendermint/go-amino"
|
||||
@ -163,7 +162,7 @@ func NewNode(config *cfg.Config,
|
||||
// and sync tendermint and the app by performing a handshake
|
||||
// and replaying any necessary blocks
|
||||
consensusLogger := logger.With("module", "consensus")
|
||||
handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc.AppState)
|
||||
handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc.AppState())
|
||||
handshaker.SetLogger(consensusLogger)
|
||||
proxyApp := proxy.NewAppConns(clientCreator, handshaker)
|
||||
proxyApp.SetLogger(logger.With("module", "proxy"))
|
||||
@ -278,12 +277,11 @@ func NewNode(config *cfg.Config,
|
||||
trustMetricStore = trust.NewTrustMetricStore(trustHistoryDB, trust.DefaultConfig())
|
||||
trustMetricStore.SetLogger(p2pLogger)
|
||||
|
||||
var seeds []string
|
||||
if config.P2P.Seeds != "" {
|
||||
seeds = strings.Split(config.P2P.Seeds, ",")
|
||||
}
|
||||
pexReactor := pex.NewPEXReactor(addrBook,
|
||||
&pex.PEXReactorConfig{Seeds: seeds, SeedMode: config.P2P.SeedMode})
|
||||
&pex.PEXReactorConfig{
|
||||
Seeds: cmn.SplitAndTrim(config.P2P.Seeds, ",", " "),
|
||||
SeedMode: config.P2P.SeedMode,
|
||||
PrivatePeerIDs: cmn.SplitAndTrim(config.P2P.PrivatePeerIDs, ",", " ")})
|
||||
pexReactor.SetLogger(p2pLogger)
|
||||
sw.AddReactor("PEX", pexReactor)
|
||||
}
|
||||
@ -333,7 +331,7 @@ func NewNode(config *cfg.Config,
|
||||
return nil, err
|
||||
}
|
||||
if config.TxIndex.IndexTags != "" {
|
||||
txIndexer = kv.NewTxIndex(store, kv.IndexTags(strings.Split(config.TxIndex.IndexTags, ",")))
|
||||
txIndexer = kv.NewTxIndex(store, kv.IndexTags(cmn.SplitAndTrim(config.TxIndex.IndexTags, ",", " ")))
|
||||
} else if config.TxIndex.IndexAllTags {
|
||||
txIndexer = kv.NewTxIndex(store, kv.IndexAllTags())
|
||||
} else {
|
||||
@ -408,9 +406,14 @@ func (n *Node) OnStart() error {
|
||||
}
|
||||
n.Logger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", n.config.NodeKeyFile())
|
||||
|
||||
// Start the switch
|
||||
n.sw.SetNodeInfo(n.makeNodeInfo(nodeKey.PubKey()))
|
||||
nodeInfo := n.makeNodeInfo(nodeKey.PubKey())
|
||||
n.sw.SetNodeInfo(nodeInfo)
|
||||
n.sw.SetNodeKey(nodeKey)
|
||||
|
||||
// Add ourselves to addrbook to prevent dialing ourselves
|
||||
n.addrBook.AddOurAddress(nodeInfo.NetAddress())
|
||||
|
||||
// Start the switch
|
||||
err = n.sw.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -418,7 +421,7 @@ func (n *Node) OnStart() error {
|
||||
|
||||
// Always connect to persistent peers
|
||||
if n.config.P2P.PersistentPeers != "" {
|
||||
err = n.sw.DialPeersAsync(n.addrBook, strings.Split(n.config.P2P.PersistentPeers, ","), true)
|
||||
err = n.sw.DialPeersAsync(n.addrBook, cmn.SplitAndTrim(n.config.P2P.PersistentPeers, ",", " "), true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -489,7 +492,7 @@ func (n *Node) ConfigureRPC() {
|
||||
|
||||
func (n *Node) startRPC() ([]net.Listener, error) {
|
||||
n.ConfigureRPC()
|
||||
listenAddrs := strings.Split(n.config.RPC.ListenAddress, ",")
|
||||
listenAddrs := cmn.SplitAndTrim(n.config.RPC.ListenAddress, ",", " ")
|
||||
coreCodec := amino.NewCodec()
|
||||
ctypes.RegisterAmino(coreCodec)
|
||||
|
||||
@ -639,14 +642,13 @@ func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
|
||||
bytes := db.Get(genesisDocKey)
|
||||
if len(bytes) == 0 {
|
||||
return nil, errors.New("Genesis doc not found")
|
||||
} else {
|
||||
var genDoc *types.GenesisDoc
|
||||
err := json.Unmarshal(bytes, &genDoc)
|
||||
if err != nil {
|
||||
cmn.PanicCrisis(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, bytes))
|
||||
}
|
||||
return genDoc, nil
|
||||
}
|
||||
var genDoc *types.GenesisDoc
|
||||
err := json.Unmarshal(bytes, &genDoc)
|
||||
if err != nil {
|
||||
cmn.PanicCrisis(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, bytes))
|
||||
}
|
||||
return genDoc, nil
|
||||
}
|
||||
|
||||
// panics if failed to marshal the given genesis document
|
||||
|
@ -47,7 +47,7 @@ func NewBaseReactor(name string, impl Reactor) *BaseReactor {
|
||||
func (br *BaseReactor) SetSwitch(sw *Switch) {
|
||||
br.Switch = sw
|
||||
}
|
||||
func (_ *BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil }
|
||||
func (_ *BaseReactor) AddPeer(peer Peer) {}
|
||||
func (_ *BaseReactor) RemovePeer(peer Peer, reason interface{}) {}
|
||||
func (_ *BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {}
|
||||
func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil }
|
||||
func (*BaseReactor) AddPeer(peer Peer) {}
|
||||
func (*BaseReactor) RemovePeer(peer Peer, reason interface{}) {}
|
||||
func (*BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {}
|
||||
|
@ -432,9 +432,8 @@ func (c *MConnection) sendPacketMsg() bool {
|
||||
// Nothing to send?
|
||||
if leastChannel == nil {
|
||||
return true
|
||||
} else {
|
||||
// c.Logger.Info("Found a PacketMsg to send")
|
||||
}
|
||||
// c.Logger.Info("Found a msgPacket to send")
|
||||
|
||||
// Make & send a PacketMsg from this channel
|
||||
_n, err := leastChannel.writePacketMsgTo(c.bufConnWriter)
|
||||
|
@ -111,7 +111,7 @@ func (sc *SecretConnection) RemotePubKey() crypto.PubKey {
|
||||
// CONTRACT: data smaller than dataMaxSize is read atomically.
|
||||
func (sc *SecretConnection) Write(data []byte) (n int, err error) {
|
||||
for 0 < len(data) {
|
||||
var frame []byte = make([]byte, totalFrameSize)
|
||||
var frame = make([]byte, totalFrameSize)
|
||||
var chunk []byte
|
||||
if dataMaxSize < len(data) {
|
||||
chunk = data[:dataMaxSize]
|
||||
@ -134,9 +134,8 @@ func (sc *SecretConnection) Write(data []byte) (n int, err error) {
|
||||
_, err := sc.conn.Write(sealedFrame)
|
||||
if err != nil {
|
||||
return n, err
|
||||
} else {
|
||||
n += len(chunk)
|
||||
}
|
||||
n += len(chunk)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -229,7 +228,6 @@ func shareEphPubKey(conn io.ReadWriteCloser, locEphPub *[32]byte) (remEphPub *[3
|
||||
// Otherwise:
|
||||
var _remEphPub = trs.FirstValue().([32]byte)
|
||||
return &_remEphPub, nil
|
||||
|
||||
}
|
||||
|
||||
func computeSharedSecret(remPubKey, locPrivKey *[32]byte) (shrSecret *[32]byte) {
|
||||
@ -342,7 +340,7 @@ func incr2Nonce(nonce *[24]byte) {
|
||||
// increment nonce big-endian by 1 with wraparound.
|
||||
func incrNonce(nonce *[24]byte) {
|
||||
for i := 23; 0 <= i; i-- {
|
||||
nonce[i] += 1
|
||||
nonce[i]++
|
||||
if nonce[i] != 0 {
|
||||
return
|
||||
}
|
||||
|
@ -73,6 +73,7 @@ func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection
|
||||
return nil, nil, false
|
||||
},
|
||||
)
|
||||
|
||||
require.Nil(tb, trs.FirstError())
|
||||
require.True(tb, ok, "Unexpected task abortion")
|
||||
|
||||
@ -181,7 +182,7 @@ func TestSecretConnectionReadWrite(t *testing.T) {
|
||||
var readCount = 0
|
||||
for _, readChunk := range reads {
|
||||
read += readChunk
|
||||
readCount += 1
|
||||
readCount++
|
||||
if len(write) <= len(read) {
|
||||
break
|
||||
}
|
||||
|
72
p2p/dummy/peer.go
Normal file
72
p2p/dummy/peer.go
Normal file
@ -0,0 +1,72 @@
|
||||
package dummy
|
||||
|
||||
import (
|
||||
p2p "github.com/tendermint/tendermint/p2p"
|
||||
tmconn "github.com/tendermint/tendermint/p2p/conn"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
|
||||
type peer struct {
|
||||
cmn.BaseService
|
||||
kv map[string]interface{}
|
||||
}
|
||||
|
||||
var _ p2p.Peer = (*peer)(nil)
|
||||
|
||||
// NewPeer creates new dummy peer.
|
||||
func NewPeer() *peer {
|
||||
p := &peer{
|
||||
kv: make(map[string]interface{}),
|
||||
}
|
||||
p.BaseService = *cmn.NewBaseService(nil, "peer", p)
|
||||
return p
|
||||
}
|
||||
|
||||
// ID always returns dummy.
|
||||
func (p *peer) ID() p2p.ID {
|
||||
return p2p.ID("dummy")
|
||||
}
|
||||
|
||||
// IsOutbound always returns false.
|
||||
func (p *peer) IsOutbound() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPersistent always returns false.
|
||||
func (p *peer) IsPersistent() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// NodeInfo always returns empty node info.
|
||||
func (p *peer) NodeInfo() p2p.NodeInfo {
|
||||
return p2p.NodeInfo{}
|
||||
}
|
||||
|
||||
// Status always returns empry connection status.
|
||||
func (p *peer) Status() tmconn.ConnectionStatus {
|
||||
return tmconn.ConnectionStatus{}
|
||||
}
|
||||
|
||||
// Send does not do anything and just returns true.
|
||||
func (p *peer) Send(byte, interface{}) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// TrySend does not do anything and just returns true.
|
||||
func (p *peer) TrySend(byte, interface{}) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Set records value under key specified in the map.
|
||||
func (p *peer) Set(key string, value interface{}) {
|
||||
p.kv[key] = value
|
||||
}
|
||||
|
||||
// Get returns a value associated with the key. Nil is returned if no value
|
||||
// found.
|
||||
func (p *peer) Get(key string) interface{} {
|
||||
if value, ok := p.kv[key]; ok {
|
||||
return value
|
||||
}
|
||||
return nil
|
||||
}
|
@ -52,9 +52,8 @@ func LoadOrGenNodeKey(filePath string) (*NodeKey, error) {
|
||||
return nil, err
|
||||
}
|
||||
return nodeKey, nil
|
||||
} else {
|
||||
return genNodeKey(filePath)
|
||||
}
|
||||
return genNodeKey(filePath)
|
||||
}
|
||||
|
||||
func loadNodeKey(filePath string) (*NodeKey, error) {
|
||||
@ -65,7 +64,7 @@ func loadNodeKey(filePath string) (*NodeKey, error) {
|
||||
nodeKey := new(NodeKey)
|
||||
err = cdc.UnmarshalJSON(jsonBytes, nodeKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error reading NodeKey from %v: %v\n", filePath, err)
|
||||
return nil, fmt.Errorf("Error reading NodeKey from %v: %v", filePath, err)
|
||||
}
|
||||
return nodeKey, nil
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ func NewDefaultListener(protocol string, lAddr string, skipUPNP bool, logger log
|
||||
|
||||
// Determine internal address...
|
||||
var intAddr *NetAddress
|
||||
intAddr, err = NewNetAddressString(lAddr)
|
||||
intAddr, err = NewNetAddressStringWithOptionalID(lAddr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -48,33 +48,45 @@ func NewNetAddress(id ID, addr net.Addr) *NetAddress {
|
||||
}
|
||||
ip := tcpAddr.IP
|
||||
port := uint16(tcpAddr.Port)
|
||||
netAddr := NewNetAddressIPPort(ip, port)
|
||||
netAddr.ID = id
|
||||
return netAddr
|
||||
na := NewNetAddressIPPort(ip, port)
|
||||
na.ID = id
|
||||
return na
|
||||
}
|
||||
|
||||
// NewNetAddressString returns a new NetAddress using the provided
|
||||
// address in the form of "ID@IP:Port", where the ID is optional.
|
||||
// NewNetAddressString returns a new NetAddress using the provided address in
|
||||
// the form of "ID@IP:Port".
|
||||
// Also resolves the host if host is not an IP.
|
||||
func NewNetAddressString(addr string) (*NetAddress, error) {
|
||||
addr = removeProtocolIfDefined(addr)
|
||||
spl := strings.Split(addr, "@")
|
||||
if len(spl) < 2 {
|
||||
return nil, fmt.Errorf("Address (%s) does not contain ID", addr)
|
||||
}
|
||||
return NewNetAddressStringWithOptionalID(addr)
|
||||
}
|
||||
|
||||
// NewNetAddressStringWithOptionalID returns a new NetAddress using the
|
||||
// provided address in the form of "ID@IP:Port", where the ID is optional.
|
||||
// Also resolves the host if host is not an IP.
|
||||
func NewNetAddressStringWithOptionalID(addr string) (*NetAddress, error) {
|
||||
addrWithoutProtocol := removeProtocolIfDefined(addr)
|
||||
|
||||
var id ID
|
||||
spl := strings.Split(addr, "@")
|
||||
spl := strings.Split(addrWithoutProtocol, "@")
|
||||
if len(spl) == 2 {
|
||||
idStr := spl[0]
|
||||
idBytes, err := hex.DecodeString(idStr)
|
||||
if err != nil {
|
||||
return nil, cmn.ErrorWrap(err, fmt.Sprintf("Address (%s) contains invalid ID", addr))
|
||||
return nil, cmn.ErrorWrap(err, fmt.Sprintf("Address (%s) contains invalid ID", addrWithoutProtocol))
|
||||
}
|
||||
if len(idBytes) != IDByteLength {
|
||||
return nil, fmt.Errorf("Address (%s) contains ID of invalid length (%d). Should be %d hex-encoded bytes",
|
||||
addr, len(idBytes), IDByteLength)
|
||||
addrWithoutProtocol, len(idBytes), IDByteLength)
|
||||
}
|
||||
id, addr = ID(idStr), spl[1]
|
||||
|
||||
id, addrWithoutProtocol = ID(idStr), spl[1]
|
||||
}
|
||||
|
||||
host, portStr, err := net.SplitHostPort(addr)
|
||||
host, portStr, err := net.SplitHostPort(addrWithoutProtocol)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -119,11 +131,10 @@ func NewNetAddressStrings(addrs []string) ([]*NetAddress, []error) {
|
||||
// NewNetAddressIPPort returns a new NetAddress using the provided IP
|
||||
// and port number.
|
||||
func NewNetAddressIPPort(ip net.IP, port uint16) *NetAddress {
|
||||
na := &NetAddress{
|
||||
return &NetAddress{
|
||||
IP: ip,
|
||||
Port: port,
|
||||
}
|
||||
return na
|
||||
}
|
||||
|
||||
// Equals reports whether na and other are the same addresses,
|
||||
@ -293,7 +304,7 @@ func (na *NetAddress) RFC6145() bool { return rfc6145.Contains(na.IP) }
|
||||
func removeProtocolIfDefined(addr string) string {
|
||||
if strings.Contains(addr, "://") {
|
||||
return strings.Split(addr, "://")[1]
|
||||
} else {
|
||||
return addr
|
||||
}
|
||||
return addr
|
||||
|
||||
}
|
||||
|
@ -9,20 +9,18 @@ import (
|
||||
)
|
||||
|
||||
func TestNewNetAddress(t *testing.T) {
|
||||
assert, require := assert.New(t), require.New(t)
|
||||
|
||||
tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:8080")
|
||||
require.Nil(err)
|
||||
require.Nil(t, err)
|
||||
addr := NewNetAddress("", tcpAddr)
|
||||
|
||||
assert.Equal("127.0.0.1:8080", addr.String())
|
||||
assert.Equal(t, "127.0.0.1:8080", addr.String())
|
||||
|
||||
assert.NotPanics(func() {
|
||||
assert.NotPanics(t, func() {
|
||||
NewNetAddress("", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8000})
|
||||
}, "Calling NewNetAddress with UDPAddr should not panic in testing")
|
||||
}
|
||||
|
||||
func TestNewNetAddressString(t *testing.T) {
|
||||
func TestNewNetAddressStringWithOptionalID(t *testing.T) {
|
||||
testCases := []struct {
|
||||
addr string
|
||||
expected string
|
||||
@ -57,6 +55,28 @@ func TestNewNetAddressString(t *testing.T) {
|
||||
{" @ ", "", false},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
addr, err := NewNetAddressStringWithOptionalID(tc.addr)
|
||||
if tc.correct {
|
||||
if assert.Nil(t, err, tc.addr) {
|
||||
assert.Equal(t, tc.expected, addr.String())
|
||||
}
|
||||
} else {
|
||||
assert.NotNil(t, err, tc.addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewNetAddressString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
addr string
|
||||
expected string
|
||||
correct bool
|
||||
}{
|
||||
{"127.0.0.1:8080", "127.0.0.1:8080", false},
|
||||
{"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
addr, err := NewNetAddressString(tc.addr)
|
||||
if tc.correct {
|
||||
@ -70,23 +90,22 @@ func TestNewNetAddressString(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewNetAddressStrings(t *testing.T) {
|
||||
addrs, errs := NewNetAddressStrings([]string{"127.0.0.1:8080", "127.0.0.2:8080"})
|
||||
assert.Len(t, errs, 0)
|
||||
addrs, errs := NewNetAddressStrings([]string{
|
||||
"127.0.0.1:8080",
|
||||
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080",
|
||||
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeed@127.0.0.2:8080"})
|
||||
assert.Len(t, errs, 1)
|
||||
assert.Equal(t, 2, len(addrs))
|
||||
}
|
||||
|
||||
func TestNewNetAddressIPPort(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
addr := NewNetAddressIPPort(net.ParseIP("127.0.0.1"), 8080)
|
||||
|
||||
assert.Equal("127.0.0.1:8080", addr.String())
|
||||
assert.Equal(t, "127.0.0.1:8080", addr.String())
|
||||
}
|
||||
|
||||
func TestNetAddressProperties(t *testing.T) {
|
||||
assert, require := assert.New(t), require.New(t)
|
||||
|
||||
// TODO add more test cases
|
||||
tests := []struct {
|
||||
testCases := []struct {
|
||||
addr string
|
||||
valid bool
|
||||
local bool
|
||||
@ -96,21 +115,19 @@ func TestNetAddressProperties(t *testing.T) {
|
||||
{"ya.ru:80", true, false, true},
|
||||
}
|
||||
|
||||
for _, t := range tests {
|
||||
addr, err := NewNetAddressString(t.addr)
|
||||
require.Nil(err)
|
||||
for _, tc := range testCases {
|
||||
addr, err := NewNetAddressStringWithOptionalID(tc.addr)
|
||||
require.Nil(t, err)
|
||||
|
||||
assert.Equal(t.valid, addr.Valid())
|
||||
assert.Equal(t.local, addr.Local())
|
||||
assert.Equal(t.routable, addr.Routable())
|
||||
assert.Equal(t, tc.valid, addr.Valid())
|
||||
assert.Equal(t, tc.local, addr.Local())
|
||||
assert.Equal(t, tc.routable, addr.Routable())
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetAddressReachabilityTo(t *testing.T) {
|
||||
assert, require := assert.New(t), require.New(t)
|
||||
|
||||
// TODO add more test cases
|
||||
tests := []struct {
|
||||
testCases := []struct {
|
||||
addr string
|
||||
other string
|
||||
reachability int
|
||||
@ -119,13 +136,13 @@ func TestNetAddressReachabilityTo(t *testing.T) {
|
||||
{"ya.ru:80", "127.0.0.1:8080", 1},
|
||||
}
|
||||
|
||||
for _, t := range tests {
|
||||
addr, err := NewNetAddressString(t.addr)
|
||||
require.Nil(err)
|
||||
for _, tc := range testCases {
|
||||
addr, err := NewNetAddressStringWithOptionalID(tc.addr)
|
||||
require.Nil(t, err)
|
||||
|
||||
other, err := NewNetAddressString(t.other)
|
||||
require.Nil(err)
|
||||
other, err := NewNetAddressStringWithOptionalID(tc.other)
|
||||
require.Nil(t, err)
|
||||
|
||||
assert.Equal(t.reachability, addr.ReachabilityTo(other))
|
||||
assert.Equal(t, tc.reachability, addr.ReachabilityTo(other))
|
||||
}
|
||||
}
|
||||
|
@ -107,6 +107,7 @@ OUTER_LOOP:
|
||||
return nil
|
||||
}
|
||||
|
||||
// ID returns node's ID.
|
||||
func (info NodeInfo) ID() ID {
|
||||
return PubKeyToID(info.PubKey)
|
||||
}
|
||||
|
@ -351,7 +351,9 @@ func createMConnection(conn net.Conn, p *peer, reactorsByCh map[byte]Reactor, ch
|
||||
onReceive := func(chID byte, msgBytes []byte) {
|
||||
reactor := reactorsByCh[chID]
|
||||
if reactor == nil {
|
||||
onPeerError(p, fmt.Errorf("Unknown channel %X", chID))
|
||||
// Note that its ok to panic here as it's caught in the conn._recover,
|
||||
// which does onPeerError.
|
||||
panic(cmn.Fmt("Unknown channel %X", chID))
|
||||
}
|
||||
reactor.Receive(chID, p, msgBytes)
|
||||
}
|
||||
|
@ -68,9 +68,8 @@ func (ps *PeerSet) Get(peerKey ID) Peer {
|
||||
item, ok := ps.lookup[peerKey]
|
||||
if ok {
|
||||
return item.peer
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove discards peer by its Key, if the peer was previously memoized.
|
||||
|
@ -115,7 +115,7 @@ func TestPeerSetAddDuplicate(t *testing.T) {
|
||||
errsTally := make(map[error]int)
|
||||
for i := 0; i < n; i++ {
|
||||
err := <-errsChan
|
||||
errsTally[err] += 1
|
||||
errsTally[err]++
|
||||
}
|
||||
|
||||
// Our next procedure is to ensure that only one addition
|
||||
|
@ -140,6 +140,8 @@ func (p *remotePeer) Stop() {
|
||||
}
|
||||
|
||||
func (p *remotePeer) accept(l net.Listener) {
|
||||
conns := []net.Conn{}
|
||||
|
||||
for {
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
@ -160,10 +162,15 @@ func (p *remotePeer) accept(l net.Listener) {
|
||||
if err != nil {
|
||||
golog.Fatalf("Failed to perform handshake: %+v", err)
|
||||
}
|
||||
|
||||
conns = append(conns, conn)
|
||||
|
||||
select {
|
||||
case <-p.quit:
|
||||
if err := conn.Close(); err != nil {
|
||||
golog.Fatal(err)
|
||||
for _, conn := range conns {
|
||||
if err := conn.Close(); err != nil {
|
||||
golog.Fatal(err)
|
||||
}
|
||||
}
|
||||
return
|
||||
default:
|
||||
|
@ -33,24 +33,33 @@ type AddrBook interface {
|
||||
|
||||
// Add our own addresses so we don't later add ourselves
|
||||
AddOurAddress(*p2p.NetAddress)
|
||||
// Check if it is our address
|
||||
OurAddress(*p2p.NetAddress) bool
|
||||
|
||||
// Add and remove an address
|
||||
AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error
|
||||
RemoveAddress(addr *p2p.NetAddress)
|
||||
RemoveAddress(*p2p.NetAddress)
|
||||
|
||||
// Check if the address is in the book
|
||||
HasAddress(*p2p.NetAddress) bool
|
||||
|
||||
// Do we need more peers?
|
||||
NeedMoreAddrs() bool
|
||||
|
||||
// Pick an address to dial
|
||||
PickAddress(newBias int) *p2p.NetAddress
|
||||
PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress
|
||||
|
||||
// Mark address
|
||||
MarkGood(*p2p.NetAddress)
|
||||
MarkAttempt(*p2p.NetAddress)
|
||||
MarkBad(*p2p.NetAddress)
|
||||
|
||||
IsGood(*p2p.NetAddress) bool
|
||||
|
||||
// Send a selection of addresses to peers
|
||||
GetSelection() []*p2p.NetAddress
|
||||
// Send a selection of addresses with bias
|
||||
GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress
|
||||
|
||||
// TODO: remove
|
||||
ListOfKnownAddresses() []*knownAddress
|
||||
@ -74,7 +83,7 @@ type addrBook struct {
|
||||
// accessed concurrently
|
||||
mtx sync.Mutex
|
||||
rand *rand.Rand
|
||||
ourAddrs map[string]*p2p.NetAddress
|
||||
ourAddrs map[string]struct{}
|
||||
addrLookup map[p2p.ID]*knownAddress // new & old
|
||||
bucketsOld []map[string]*knownAddress
|
||||
bucketsNew []map[string]*knownAddress
|
||||
@ -89,7 +98,7 @@ type addrBook struct {
|
||||
func NewAddrBook(filePath string, routabilityStrict bool) *addrBook {
|
||||
am := &addrBook{
|
||||
rand: rand.New(rand.NewSource(time.Now().UnixNano())), // TODO: seed from outside
|
||||
ourAddrs: make(map[string]*p2p.NetAddress),
|
||||
ourAddrs: make(map[string]struct{}),
|
||||
addrLookup: make(map[p2p.ID]*knownAddress),
|
||||
filePath: filePath,
|
||||
routabilityStrict: routabilityStrict,
|
||||
@ -150,7 +159,15 @@ func (a *addrBook) AddOurAddress(addr *p2p.NetAddress) {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
a.Logger.Info("Add our address to book", "addr", addr)
|
||||
a.ourAddrs[addr.String()] = addr
|
||||
a.ourAddrs[addr.String()] = struct{}{}
|
||||
}
|
||||
|
||||
// OurAddress returns true if it is our address.
|
||||
func (a *addrBook) OurAddress(addr *p2p.NetAddress) bool {
|
||||
a.mtx.Lock()
|
||||
_, ok := a.ourAddrs[addr.String()]
|
||||
a.mtx.Unlock()
|
||||
return ok
|
||||
}
|
||||
|
||||
// AddAddress implements AddrBook - adds the given address as received from the given source.
|
||||
@ -173,6 +190,22 @@ func (a *addrBook) RemoveAddress(addr *p2p.NetAddress) {
|
||||
a.removeFromAllBuckets(ka)
|
||||
}
|
||||
|
||||
// IsGood returns true if peer was ever marked as good and haven't
|
||||
// done anything wrong since then.
|
||||
func (a *addrBook) IsGood(addr *p2p.NetAddress) bool {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
return a.addrLookup[addr.ID].isOld()
|
||||
}
|
||||
|
||||
// HasAddress returns true if the address is in the book.
|
||||
func (a *addrBook) HasAddress(addr *p2p.NetAddress) bool {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
ka := a.addrLookup[addr.ID]
|
||||
return ka != nil
|
||||
}
|
||||
|
||||
// NeedMoreAddrs implements AddrBook - returns true if there are not have enough addresses in the book.
|
||||
func (a *addrBook) NeedMoreAddrs() bool {
|
||||
return a.Size() < needAddressThreshold
|
||||
@ -180,27 +213,27 @@ func (a *addrBook) NeedMoreAddrs() bool {
|
||||
|
||||
// PickAddress implements AddrBook. It picks an address to connect to.
|
||||
// The address is picked randomly from an old or new bucket according
|
||||
// to the newBias argument, which must be between [0, 100] (or else is truncated to that range)
|
||||
// to the biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to that range)
|
||||
// and determines how biased we are to pick an address from a new bucket.
|
||||
// PickAddress returns nil if the AddrBook is empty or if we try to pick
|
||||
// from an empty bucket.
|
||||
func (a *addrBook) PickAddress(newBias int) *p2p.NetAddress {
|
||||
func (a *addrBook) PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
|
||||
if a.size() == 0 {
|
||||
return nil
|
||||
}
|
||||
if newBias > 100 {
|
||||
newBias = 100
|
||||
if biasTowardsNewAddrs > 100 {
|
||||
biasTowardsNewAddrs = 100
|
||||
}
|
||||
if newBias < 0 {
|
||||
newBias = 0
|
||||
if biasTowardsNewAddrs < 0 {
|
||||
biasTowardsNewAddrs = 0
|
||||
}
|
||||
|
||||
// Bias between new and old addresses.
|
||||
oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(newBias))
|
||||
newCorrelation := math.Sqrt(float64(a.nNew)) * float64(newBias)
|
||||
oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(biasTowardsNewAddrs))
|
||||
newCorrelation := math.Sqrt(float64(a.nNew)) * float64(biasTowardsNewAddrs)
|
||||
|
||||
// pick a random peer from a random bucket
|
||||
var bucket map[string]*knownAddress
|
||||
@ -295,6 +328,100 @@ func (a *addrBook) GetSelection() []*p2p.NetAddress {
|
||||
return allAddr[:numAddresses]
|
||||
}
|
||||
|
||||
// GetSelectionWithBias implements AddrBook.
|
||||
// It randomly selects some addresses (old & new). Suitable for peer-exchange protocols.
|
||||
//
|
||||
// Each address is picked randomly from an old or new bucket according to the
|
||||
// biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to
|
||||
// that range) and determines how biased we are to pick an address from a new
|
||||
// bucket.
|
||||
func (a *addrBook) GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
|
||||
if a.size() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if biasTowardsNewAddrs > 100 {
|
||||
biasTowardsNewAddrs = 100
|
||||
}
|
||||
if biasTowardsNewAddrs < 0 {
|
||||
biasTowardsNewAddrs = 0
|
||||
}
|
||||
|
||||
numAddresses := cmn.MaxInt(
|
||||
cmn.MinInt(minGetSelection, a.size()),
|
||||
a.size()*getSelectionPercent/100)
|
||||
numAddresses = cmn.MinInt(maxGetSelection, numAddresses)
|
||||
|
||||
selection := make([]*p2p.NetAddress, numAddresses)
|
||||
|
||||
oldBucketToAddrsMap := make(map[int]map[string]struct{})
|
||||
var oldIndex int
|
||||
newBucketToAddrsMap := make(map[int]map[string]struct{})
|
||||
var newIndex int
|
||||
|
||||
selectionIndex := 0
|
||||
ADDRS_LOOP:
|
||||
for selectionIndex < numAddresses {
|
||||
pickFromOldBucket := int((float64(selectionIndex)/float64(numAddresses))*100) >= biasTowardsNewAddrs
|
||||
pickFromOldBucket = (pickFromOldBucket && a.nOld > 0) || a.nNew == 0
|
||||
bucket := make(map[string]*knownAddress)
|
||||
|
||||
// loop until we pick a random non-empty bucket
|
||||
for len(bucket) == 0 {
|
||||
if pickFromOldBucket {
|
||||
oldIndex = a.rand.Intn(len(a.bucketsOld))
|
||||
bucket = a.bucketsOld[oldIndex]
|
||||
} else {
|
||||
newIndex = a.rand.Intn(len(a.bucketsNew))
|
||||
bucket = a.bucketsNew[newIndex]
|
||||
}
|
||||
}
|
||||
|
||||
// pick a random index
|
||||
randIndex := a.rand.Intn(len(bucket))
|
||||
|
||||
// loop over the map to return that index
|
||||
var selectedAddr *p2p.NetAddress
|
||||
for _, ka := range bucket {
|
||||
if randIndex == 0 {
|
||||
selectedAddr = ka.Addr
|
||||
break
|
||||
}
|
||||
randIndex--
|
||||
}
|
||||
|
||||
// if we have selected the address before, restart the loop
|
||||
// otherwise, record it and continue
|
||||
if pickFromOldBucket {
|
||||
if addrsMap, ok := oldBucketToAddrsMap[oldIndex]; ok {
|
||||
if _, ok = addrsMap[selectedAddr.String()]; ok {
|
||||
continue ADDRS_LOOP
|
||||
}
|
||||
} else {
|
||||
oldBucketToAddrsMap[oldIndex] = make(map[string]struct{})
|
||||
}
|
||||
oldBucketToAddrsMap[oldIndex][selectedAddr.String()] = struct{}{}
|
||||
} else {
|
||||
if addrsMap, ok := newBucketToAddrsMap[newIndex]; ok {
|
||||
if _, ok = addrsMap[selectedAddr.String()]; ok {
|
||||
continue ADDRS_LOOP
|
||||
}
|
||||
} else {
|
||||
newBucketToAddrsMap[newIndex] = make(map[string]struct{})
|
||||
}
|
||||
newBucketToAddrsMap[newIndex][selectedAddr.String()] = struct{}{}
|
||||
}
|
||||
|
||||
selection[selectionIndex] = selectedAddr
|
||||
selectionIndex++
|
||||
}
|
||||
|
||||
return selection
|
||||
}
|
||||
|
||||
// ListOfKnownAddresses returns the new and old addresses.
|
||||
func (a *addrBook) ListOfKnownAddresses() []*knownAddress {
|
||||
a.mtx.Lock()
|
||||
|
@ -157,6 +157,13 @@ func TestAddrBookPromoteToOld(t *testing.T) {
|
||||
t.Errorf("selection could not be bigger than the book")
|
||||
}
|
||||
|
||||
selection = book.GetSelectionWithBias(30)
|
||||
t.Logf("selection: %v", selection)
|
||||
|
||||
if len(selection) > book.Size() {
|
||||
t.Errorf("selection with bias could not be bigger than the book")
|
||||
}
|
||||
|
||||
assert.Equal(t, book.Size(), 100, "expecting book size to be 100")
|
||||
}
|
||||
|
||||
@ -229,3 +236,121 @@ func TestAddrBookRemoveAddress(t *testing.T) {
|
||||
book.RemoveAddress(nonExistingAddr)
|
||||
assert.Equal(t, 0, book.Size())
|
||||
}
|
||||
|
||||
func TestAddrBookGetSelection(t *testing.T) {
|
||||
fname := createTempFileName("addrbook_test")
|
||||
defer deleteTempFile(fname)
|
||||
|
||||
book := NewAddrBook(fname, true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
|
||||
// 1) empty book
|
||||
assert.Empty(t, book.GetSelection())
|
||||
|
||||
// 2) add one address
|
||||
addr := randIPv4Address(t)
|
||||
book.AddAddress(addr, addr)
|
||||
|
||||
assert.Equal(t, 1, len(book.GetSelection()))
|
||||
assert.Equal(t, addr, book.GetSelection()[0])
|
||||
|
||||
// 3) add a bunch of addresses
|
||||
randAddrs := randNetAddressPairs(t, 100)
|
||||
for _, addrSrc := range randAddrs {
|
||||
book.AddAddress(addrSrc.addr, addrSrc.src)
|
||||
}
|
||||
|
||||
// check there is no duplicates
|
||||
addrs := make(map[string]*p2p.NetAddress)
|
||||
selection := book.GetSelection()
|
||||
for _, addr := range selection {
|
||||
if dup, ok := addrs[addr.String()]; ok {
|
||||
t.Fatalf("selection %v contains duplicates %v", selection, dup)
|
||||
}
|
||||
addrs[addr.String()] = addr
|
||||
}
|
||||
|
||||
if len(selection) > book.Size() {
|
||||
t.Errorf("selection %v could not be bigger than the book", selection)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddrBookGetSelectionWithBias(t *testing.T) {
|
||||
const biasTowardsNewAddrs = 30
|
||||
|
||||
fname := createTempFileName("addrbook_test")
|
||||
defer deleteTempFile(fname)
|
||||
|
||||
book := NewAddrBook(fname, true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
|
||||
// 1) empty book
|
||||
selection := book.GetSelectionWithBias(biasTowardsNewAddrs)
|
||||
assert.Empty(t, selection)
|
||||
|
||||
// 2) add one address
|
||||
addr := randIPv4Address(t)
|
||||
book.AddAddress(addr, addr)
|
||||
|
||||
selection = book.GetSelectionWithBias(biasTowardsNewAddrs)
|
||||
assert.Equal(t, 1, len(selection))
|
||||
assert.Equal(t, addr, selection[0])
|
||||
|
||||
// 3) add a bunch of addresses
|
||||
randAddrs := randNetAddressPairs(t, 100)
|
||||
for _, addrSrc := range randAddrs {
|
||||
book.AddAddress(addrSrc.addr, addrSrc.src)
|
||||
}
|
||||
|
||||
// check there is no duplicates
|
||||
addrs := make(map[string]*p2p.NetAddress)
|
||||
selection = book.GetSelectionWithBias(biasTowardsNewAddrs)
|
||||
for _, addr := range selection {
|
||||
if dup, ok := addrs[addr.String()]; ok {
|
||||
t.Fatalf("selection %v contains duplicates %v", selection, dup)
|
||||
}
|
||||
addrs[addr.String()] = addr
|
||||
}
|
||||
|
||||
if len(selection) > book.Size() {
|
||||
t.Fatalf("selection %v could not be bigger than the book", selection)
|
||||
}
|
||||
|
||||
// 4) mark 80% of the addresses as good
|
||||
randAddrsLen := len(randAddrs)
|
||||
for i, addrSrc := range randAddrs {
|
||||
if int((float64(i)/float64(randAddrsLen))*100) >= 20 {
|
||||
book.MarkGood(addrSrc.addr)
|
||||
}
|
||||
}
|
||||
|
||||
selection = book.GetSelectionWithBias(biasTowardsNewAddrs)
|
||||
|
||||
// check that ~70% of addresses returned are good
|
||||
good := 0
|
||||
for _, addr := range selection {
|
||||
if book.IsGood(addr) {
|
||||
good++
|
||||
}
|
||||
}
|
||||
got, expected := int((float64(good)/float64(len(selection)))*100), (100 - biasTowardsNewAddrs)
|
||||
if got >= expected {
|
||||
t.Fatalf("expected more good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)", got, expected, good, len(selection))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddrBookHasAddress(t *testing.T) {
|
||||
fname := createTempFileName("addrbook_test")
|
||||
defer deleteTempFile(fname)
|
||||
|
||||
book := NewAddrBook(fname, true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
addr := randIPv4Address(t)
|
||||
book.AddAddress(addr, addr)
|
||||
|
||||
assert.True(t, book.HasAddress(addr))
|
||||
|
||||
book.RemoveAddress(addr)
|
||||
|
||||
assert.False(t, book.HasAddress(addr))
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ func (ka *knownAddress) isNew() bool {
|
||||
func (ka *knownAddress) markAttempt() {
|
||||
now := time.Now()
|
||||
ka.LastAttempt = now
|
||||
ka.Attempts += 1
|
||||
ka.Attempts++
|
||||
}
|
||||
|
||||
func (ka *knownAddress) markGood() {
|
||||
|
@ -28,17 +28,24 @@ const (
|
||||
defaultMinNumOutboundPeers = 10
|
||||
|
||||
// Seed/Crawler constants
|
||||
// TODO:
|
||||
// We want seeds to only advertise good peers.
|
||||
// Peers are marked by external mechanisms.
|
||||
// We need a config value that can be set to be
|
||||
// on the order of how long it would take before a good
|
||||
// peer is marked good.
|
||||
defaultSeedDisconnectWaitPeriod = 2 * time.Minute // disconnect after this
|
||||
defaultCrawlPeerInterval = 2 * time.Minute // dont redial for this. TODO: back-off
|
||||
defaultCrawlPeersPeriod = 30 * time.Second // check some peers every this
|
||||
|
||||
// We want seeds to only advertise good peers. Therefore they should wait at
|
||||
// least as long as we expect it to take for a peer to become good before
|
||||
// disconnecting.
|
||||
// see consensus/reactor.go: blocksToContributeToBecomeGoodPeer
|
||||
// 10000 blocks assuming 1s blocks ~ 2.7 hours.
|
||||
defaultSeedDisconnectWaitPeriod = 3 * time.Hour
|
||||
|
||||
defaultCrawlPeerInterval = 2 * time.Minute // don't redial for this. TODO: back-off. what for?
|
||||
|
||||
defaultCrawlPeersPeriod = 30 * time.Second // check some peers every this
|
||||
|
||||
maxAttemptsToDial = 16 // ~ 35h in total (last attempt - 18h)
|
||||
|
||||
// if node connects to seed, it does not have any trusted peers.
|
||||
// Especially in the beginning, node should have more trusted peers than
|
||||
// untrusted.
|
||||
biasToSelectNewPeers = 30 // 70 to select good peers
|
||||
)
|
||||
|
||||
// PEXReactor handles PEX (peer exchange) and ensures that an
|
||||
@ -72,6 +79,10 @@ type PEXReactorConfig struct {
|
||||
// Seeds is a list of addresses reactor may use
|
||||
// if it can't connect to peers in the addrbook.
|
||||
Seeds []string
|
||||
|
||||
// PrivatePeerIDs is a list of peer IDs, which must not be gossiped to other
|
||||
// peers.
|
||||
PrivatePeerIDs []string
|
||||
}
|
||||
|
||||
type _attemptsToDial struct {
|
||||
@ -150,7 +161,12 @@ func (r *PEXReactor) AddPeer(p Peer) {
|
||||
// Let the ensurePeersRoutine handle asking for more
|
||||
// peers when we need - we don't trust inbound peers as much.
|
||||
addr := p.NodeInfo().NetAddress()
|
||||
r.book.AddAddress(addr, addr)
|
||||
if !isAddrPrivate(addr, r.config.PrivatePeerIDs) {
|
||||
err := r.book.AddAddress(addr, addr)
|
||||
if err != nil {
|
||||
r.Logger.Error("Failed to add new address", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -181,8 +197,7 @@ func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) {
|
||||
|
||||
// Seeds disconnect after sending a batch of addrs
|
||||
if r.config.SeedMode {
|
||||
// TODO: should we be more selective ?
|
||||
r.SendAddrs(src, r.book.GetSelection())
|
||||
r.SendAddrs(src, r.book.GetSelectionWithBias(biasToSelectNewPeers))
|
||||
r.Switch.StopPeerGracefully(src)
|
||||
} else {
|
||||
r.SendAddrs(src, r.book.GetSelection())
|
||||
@ -250,8 +265,11 @@ func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error {
|
||||
|
||||
srcAddr := src.NodeInfo().NetAddress()
|
||||
for _, netAddr := range addrs {
|
||||
if netAddr != nil {
|
||||
r.book.AddAddress(netAddr, srcAddr)
|
||||
if netAddr != nil && !isAddrPrivate(netAddr, r.config.PrivatePeerIDs) {
|
||||
err := r.book.AddAddress(netAddr, srcAddr)
|
||||
if err != nil {
|
||||
r.Logger.Error("Failed to add new address", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -401,11 +419,15 @@ func (r *PEXReactor) dialPeer(addr *p2p.NetAddress) {
|
||||
// TODO: detect more "bad peer" scenarios
|
||||
if _, ok := err.(p2p.ErrSwitchAuthenticationFailure); ok {
|
||||
r.book.MarkBad(addr)
|
||||
r.attemptsToDial.Delete(addr.DialString())
|
||||
} else {
|
||||
r.book.MarkAttempt(addr)
|
||||
// FIXME: if the addr is going to be removed from the addrbook (hard to
|
||||
// tell at this point), we need to Delete it from attemptsToDial, not
|
||||
// record another attempt.
|
||||
// record attempt
|
||||
r.attemptsToDial.Store(addr.DialString(), _attemptsToDial{attempts + 1, time.Now()})
|
||||
}
|
||||
// record attempt
|
||||
r.attemptsToDial.Store(addr.DialString(), _attemptsToDial{attempts + 1, time.Now()})
|
||||
} else {
|
||||
// cleanup any history
|
||||
r.attemptsToDial.Delete(addr.DialString())
|
||||
@ -455,9 +477,8 @@ func (r *PEXReactor) AttemptsToDial(addr *p2p.NetAddress) int {
|
||||
lAttempts, attempted := r.attemptsToDial.Load(addr.DialString())
|
||||
if attempted {
|
||||
return lAttempts.(_attemptsToDial).number
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
//----------------------------------------------------------
|
||||
@ -551,24 +572,16 @@ func (r *PEXReactor) crawlPeers() {
|
||||
r.book.MarkAttempt(pi.Addr)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Crawl the connected peers asking for more addresses
|
||||
for _, pi := range peerInfos {
|
||||
// We will wait a minimum period of time before crawling peers again
|
||||
if now.Sub(pi.LastAttempt) >= defaultCrawlPeerInterval {
|
||||
peer := r.Switch.Peers().Get(pi.Addr.ID)
|
||||
if peer != nil {
|
||||
r.RequestAddrs(peer)
|
||||
}
|
||||
}
|
||||
// Ask for more addresses
|
||||
peer := r.Switch.Peers().Get(pi.Addr.ID)
|
||||
r.RequestAddrs(peer)
|
||||
}
|
||||
}
|
||||
|
||||
// attemptDisconnects checks if we've been with each peer long enough to disconnect
|
||||
func (r *PEXReactor) attemptDisconnects() {
|
||||
for _, peer := range r.Switch.Peers().List() {
|
||||
status := peer.Status()
|
||||
if status.Duration < defaultSeedDisconnectWaitPeriod {
|
||||
if peer.Status().Duration < defaultSeedDisconnectWaitPeriod {
|
||||
continue
|
||||
}
|
||||
if peer.IsPersistent() {
|
||||
@ -578,6 +591,16 @@ func (r *PEXReactor) attemptDisconnects() {
|
||||
}
|
||||
}
|
||||
|
||||
// isAddrPrivate returns true if addr is private.
|
||||
func isAddrPrivate(addr *p2p.NetAddress, privatePeerIDs []string) bool {
|
||||
for _, id := range privatePeerIDs {
|
||||
if string(addr.ID) == id {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Messages
|
||||
|
||||
|
@ -62,35 +62,45 @@ func TestPEXReactorRunning(t *testing.T) {
|
||||
N := 3
|
||||
switches := make([]*p2p.Switch, N)
|
||||
|
||||
// directory to store address books
|
||||
dir, err := ioutil.TempDir("", "pex_reactor")
|
||||
require.Nil(t, err)
|
||||
defer os.RemoveAll(dir) // nolint: errcheck
|
||||
book := NewAddrBook(filepath.Join(dir, "addrbook.json"), false)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
|
||||
books := make([]*addrBook, N)
|
||||
logger := log.TestingLogger()
|
||||
|
||||
// create switches
|
||||
for i := 0; i < N; i++ {
|
||||
switches[i] = p2p.MakeSwitch(config, i, "127.0.0.1", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch {
|
||||
sw.SetLogger(log.TestingLogger().With("switch", i))
|
||||
books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false)
|
||||
books[i].SetLogger(logger.With("pex", i))
|
||||
sw.SetAddrBook(books[i])
|
||||
|
||||
r := NewPEXReactor(book, &PEXReactorConfig{})
|
||||
r.SetLogger(log.TestingLogger())
|
||||
sw.SetLogger(logger.With("pex", i))
|
||||
|
||||
r := NewPEXReactor(books[i], &PEXReactorConfig{})
|
||||
r.SetLogger(logger.With("pex", i))
|
||||
r.SetEnsurePeersPeriod(250 * time.Millisecond)
|
||||
sw.AddReactor("pex", r)
|
||||
|
||||
return sw
|
||||
})
|
||||
}
|
||||
|
||||
// fill the address book and add listeners
|
||||
for _, s := range switches {
|
||||
addr := s.NodeInfo().NetAddress()
|
||||
book.AddAddress(addr, addr)
|
||||
s.AddListener(p2p.NewDefaultListener("tcp", s.NodeInfo().ListenAddr, true, log.TestingLogger()))
|
||||
addOtherNodeAddrToAddrBook := func(switchIndex, otherSwitchIndex int) {
|
||||
addr := switches[otherSwitchIndex].NodeInfo().NetAddress()
|
||||
books[switchIndex].AddAddress(addr, addr)
|
||||
}
|
||||
|
||||
// start switches
|
||||
for _, s := range switches {
|
||||
err := s.Start() // start switch and reactors
|
||||
addOtherNodeAddrToAddrBook(0, 1)
|
||||
addOtherNodeAddrToAddrBook(1, 0)
|
||||
addOtherNodeAddrToAddrBook(2, 1)
|
||||
|
||||
for i, sw := range switches {
|
||||
sw.AddListener(p2p.NewDefaultListener("tcp", sw.NodeInfo().ListenAddr, true, logger.With("pex", i)))
|
||||
|
||||
err := sw.Start() // start switch and reactors
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
@ -126,6 +136,7 @@ func TestPEXReactorRequestMessageAbuse(t *testing.T) {
|
||||
defer teardownReactor(book)
|
||||
|
||||
sw := createSwitchAndAddReactors(r)
|
||||
sw.SetAddrBook(book)
|
||||
|
||||
peer := newMockPeer()
|
||||
p2p.AddPeerToSwitch(sw, peer)
|
||||
@ -155,6 +166,7 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) {
|
||||
defer teardownReactor(book)
|
||||
|
||||
sw := createSwitchAndAddReactors(r)
|
||||
sw.SetAddrBook(book)
|
||||
|
||||
peer := newMockPeer()
|
||||
p2p.AddPeerToSwitch(sw, peer)
|
||||
@ -181,13 +193,11 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) {
|
||||
// directory to store address books
|
||||
dir, err := ioutil.TempDir("", "pex_reactor")
|
||||
require.Nil(t, err)
|
||||
defer os.RemoveAll(dir) // nolint: errcheck
|
||||
|
||||
book := NewAddrBook(filepath.Join(dir, "addrbook.json"), false)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
|
||||
// 1. create seed
|
||||
seed := p2p.MakeSwitch(
|
||||
config,
|
||||
@ -195,6 +205,10 @@ func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) {
|
||||
"127.0.0.1",
|
||||
"123.123.123",
|
||||
func(i int, sw *p2p.Switch) *p2p.Switch {
|
||||
book := NewAddrBook(filepath.Join(dir, "addrbook0.json"), false)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
sw.SetAddrBook(book)
|
||||
|
||||
sw.SetLogger(log.TestingLogger())
|
||||
|
||||
r := NewPEXReactor(book, &PEXReactorConfig{})
|
||||
@ -221,6 +235,10 @@ func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) {
|
||||
"127.0.0.1",
|
||||
"123.123.123",
|
||||
func(i int, sw *p2p.Switch) *p2p.Switch {
|
||||
book := NewAddrBook(filepath.Join(dir, "addrbook1.json"), false)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
sw.SetAddrBook(book)
|
||||
|
||||
sw.SetLogger(log.TestingLogger())
|
||||
|
||||
r := NewPEXReactor(
|
||||
@ -246,7 +264,8 @@ func TestPEXReactorCrawlStatus(t *testing.T) {
|
||||
defer teardownReactor(book)
|
||||
|
||||
// Seed/Crawler mode uses data from the Switch
|
||||
_ = createSwitchAndAddReactors(pexR)
|
||||
sw := createSwitchAndAddReactors(pexR)
|
||||
sw.SetAddrBook(book)
|
||||
|
||||
// Create a peer, add it to the peer set and the addrbook.
|
||||
peer := p2p.CreateRandomPeer(false)
|
||||
@ -267,11 +286,31 @@ func TestPEXReactorCrawlStatus(t *testing.T) {
|
||||
// TODO: test
|
||||
}
|
||||
|
||||
func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) {
|
||||
peer := p2p.CreateRandomPeer(false)
|
||||
|
||||
pexR, book := createReactor(&PEXReactorConfig{PrivatePeerIDs: []string{string(peer.NodeInfo().ID())}})
|
||||
defer teardownReactor(book)
|
||||
|
||||
// we have to send a request to receive responses
|
||||
pexR.RequestAddrs(peer)
|
||||
|
||||
size := book.Size()
|
||||
addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()}
|
||||
msg := wire.BinaryBytes(struct{ PexMessage }{&pexAddrsMessage{Addrs: addrs}})
|
||||
pexR.Receive(PexChannel, peer, msg)
|
||||
assert.Equal(t, size, book.Size())
|
||||
|
||||
pexR.AddPeer(peer)
|
||||
assert.Equal(t, size, book.Size())
|
||||
}
|
||||
|
||||
func TestPEXReactorDialPeer(t *testing.T) {
|
||||
pexR, book := createReactor(&PEXReactorConfig{})
|
||||
defer teardownReactor(book)
|
||||
|
||||
_ = createSwitchAndAddReactors(pexR)
|
||||
sw := createSwitchAndAddReactors(pexR)
|
||||
sw.SetAddrBook(book)
|
||||
|
||||
peer := newMockPeer()
|
||||
addr := peer.NodeInfo().NetAddress()
|
||||
@ -288,6 +327,15 @@ func TestPEXReactorDialPeer(t *testing.T) {
|
||||
|
||||
// must be skipped because it is too early
|
||||
assert.Equal(t, 1, pexR.AttemptsToDial(addr))
|
||||
|
||||
if !testing.Short() {
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// 3rd attempt
|
||||
pexR.dialPeer(addr)
|
||||
|
||||
assert.Equal(t, 2, pexR.AttemptsToDial(addr))
|
||||
}
|
||||
}
|
||||
|
||||
type mockPeer struct {
|
||||
@ -368,6 +416,7 @@ func assertPeersWithTimeout(
|
||||
}
|
||||
|
||||
func createReactor(config *PEXReactorConfig) (r *PEXReactor, book *addrBook) {
|
||||
// directory to store address book
|
||||
dir, err := ioutil.TempDir("", "pex_reactor")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -375,7 +424,7 @@ func createReactor(config *PEXReactorConfig) (r *PEXReactor, book *addrBook) {
|
||||
book = NewAddrBook(filepath.Join(dir, "addrbook.json"), true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
|
||||
r = NewPEXReactor(book, &PEXReactorConfig{})
|
||||
r = NewPEXReactor(book, config)
|
||||
r.SetLogger(log.TestingLogger())
|
||||
return
|
||||
}
|
||||
|
@ -31,15 +31,21 @@ const (
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
// An AddrBook represents an address book from the pex package, which is used
|
||||
// to store peer addresses.
|
||||
type AddrBook interface {
|
||||
AddAddress(addr *NetAddress, src *NetAddress) error
|
||||
AddOurAddress(*NetAddress)
|
||||
OurAddress(*NetAddress) bool
|
||||
MarkGood(*NetAddress)
|
||||
RemoveAddress(*NetAddress)
|
||||
HasAddress(*NetAddress) bool
|
||||
Save()
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
// `Switch` handles peer connections and exposes an API to receive incoming messages
|
||||
// Switch handles peer connections and exposes an API to receive incoming messages
|
||||
// on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one
|
||||
// or more `Channels`. So while sending outgoing messages is typically performed on the peer,
|
||||
// incoming messages are received on the reactor.
|
||||
@ -64,6 +70,7 @@ type Switch struct {
|
||||
rng *rand.Rand // seed for randomizing dial times and orders
|
||||
}
|
||||
|
||||
// NewSwitch creates a new Switch with the given config.
|
||||
func NewSwitch(config *cfg.P2PConfig) *Switch {
|
||||
sw := &Switch{
|
||||
config: config,
|
||||
@ -341,20 +348,21 @@ func (sw *Switch) IsDialing(id ID) bool {
|
||||
// DialPeersAsync dials a list of peers asynchronously in random order (optionally, making them persistent).
|
||||
func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent bool) error {
|
||||
netAddrs, errs := NewNetAddressStrings(peers)
|
||||
// only log errors, dial correct addresses
|
||||
for _, err := range errs {
|
||||
sw.Logger.Error("Error in peer's address", "err", err)
|
||||
}
|
||||
|
||||
ourAddr := sw.nodeInfo.NetAddress()
|
||||
|
||||
// TODO: move this out of here ?
|
||||
if addrBook != nil {
|
||||
// add peers to `addrBook`
|
||||
ourAddr := sw.nodeInfo.NetAddress()
|
||||
for _, netAddr := range netAddrs {
|
||||
// do not add our address or ID
|
||||
if netAddr.Same(ourAddr) {
|
||||
continue
|
||||
if !netAddr.Same(ourAddr) {
|
||||
addrBook.AddAddress(netAddr, ourAddr)
|
||||
}
|
||||
// TODO: move this out of here ?
|
||||
addrBook.AddAddress(netAddr, ourAddr)
|
||||
}
|
||||
// Persist some peers to disk right away.
|
||||
// NOTE: integration tests depend on this
|
||||
@ -365,8 +373,14 @@ func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent b
|
||||
perm := sw.rng.Perm(len(netAddrs))
|
||||
for i := 0; i < len(perm); i++ {
|
||||
go func(i int) {
|
||||
sw.randomSleep(0)
|
||||
j := perm[i]
|
||||
|
||||
// do not dial ourselves
|
||||
if netAddrs[j].Same(ourAddr) {
|
||||
return
|
||||
}
|
||||
|
||||
sw.randomSleep(0)
|
||||
err := sw.DialPeerWithAddress(netAddrs[j], persistent)
|
||||
if err != nil {
|
||||
sw.Logger.Error("Error dialing peer", "err", err)
|
||||
@ -520,6 +534,15 @@ func (sw *Switch) addPeer(pc peerConn) error {
|
||||
|
||||
// Avoid self
|
||||
if sw.nodeKey.ID() == peerID {
|
||||
addr := peerNodeInfo.NetAddress()
|
||||
|
||||
// remove the given address from the address book if we're added it earlier
|
||||
sw.addrBook.RemoveAddress(addr)
|
||||
|
||||
// add the given address to the address book to avoid dialing ourselves
|
||||
// again this is our public address
|
||||
sw.addrBook.AddOurAddress(addr)
|
||||
|
||||
return ErrSwitchConnectToSelf
|
||||
}
|
||||
|
||||
@ -545,7 +568,9 @@ func (sw *Switch) addPeer(pc peerConn) error {
|
||||
|
||||
// All good. Start peer
|
||||
if sw.IsRunning() {
|
||||
sw.startInitPeer(peer)
|
||||
if err = sw.startInitPeer(peer); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Add the peer to .peers.
|
||||
@ -559,14 +584,17 @@ func (sw *Switch) addPeer(pc peerConn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sw *Switch) startInitPeer(peer *peer) {
|
||||
func (sw *Switch) startInitPeer(peer *peer) error {
|
||||
err := peer.Start() // spawn send/recv routines
|
||||
if err != nil {
|
||||
// Should never happen
|
||||
sw.Logger.Error("Error starting peer", "peer", peer, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
for _, reactor := range sw.reactors {
|
||||
reactor.AddPeer(peer)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -38,8 +38,6 @@ type TestReactor struct {
|
||||
|
||||
mtx sync.Mutex
|
||||
channels []*conn.ChannelDescriptor
|
||||
peersAdded []Peer
|
||||
peersRemoved []Peer
|
||||
logMessages bool
|
||||
msgsCounter int
|
||||
msgsReceived map[byte][]PeerMessage
|
||||
@ -60,17 +58,9 @@ func (tr *TestReactor) GetChannels() []*conn.ChannelDescriptor {
|
||||
return tr.channels
|
||||
}
|
||||
|
||||
func (tr *TestReactor) AddPeer(peer Peer) {
|
||||
tr.mtx.Lock()
|
||||
defer tr.mtx.Unlock()
|
||||
tr.peersAdded = append(tr.peersAdded, peer)
|
||||
}
|
||||
func (tr *TestReactor) AddPeer(peer Peer) {}
|
||||
|
||||
func (tr *TestReactor) RemovePeer(peer Peer, reason interface{}) {
|
||||
tr.mtx.Lock()
|
||||
defer tr.mtx.Unlock()
|
||||
tr.peersRemoved = append(tr.peersRemoved, peer)
|
||||
}
|
||||
func (tr *TestReactor) RemovePeer(peer Peer, reason interface{}) {}
|
||||
|
||||
func (tr *TestReactor) Receive(chID byte, peer Peer, msgBytes []byte) {
|
||||
if tr.logMessages {
|
||||
@ -99,6 +89,10 @@ func MakeSwitchPair(t testing.TB, initSwitch func(int, *Switch) *Switch) (*Switc
|
||||
}
|
||||
|
||||
func initSwitchFunc(i int, sw *Switch) *Switch {
|
||||
sw.SetAddrBook(&addrBookMock{
|
||||
addrs: make(map[string]struct{}),
|
||||
ourAddrs: make(map[string]struct{})})
|
||||
|
||||
// Make two reactors of two channels each
|
||||
sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{
|
||||
{ID: byte(0x00), Priority: 10},
|
||||
@ -108,6 +102,7 @@ func initSwitchFunc(i int, sw *Switch) *Switch {
|
||||
{ID: byte(0x02), Priority: 10},
|
||||
{ID: byte(0x03), Priority: 10},
|
||||
}, true))
|
||||
|
||||
return sw
|
||||
}
|
||||
|
||||
@ -184,6 +179,32 @@ func TestConnAddrFilter(t *testing.T) {
|
||||
assertNoPeersAfterTimeout(t, s2, 400*time.Millisecond)
|
||||
}
|
||||
|
||||
func TestSwitchFiltersOutItself(t *testing.T) {
|
||||
s1 := MakeSwitch(config, 1, "127.0.0.2", "123.123.123", initSwitchFunc)
|
||||
// addr := s1.NodeInfo().NetAddress()
|
||||
|
||||
// // add ourselves like we do in node.go#427
|
||||
// s1.addrBook.AddOurAddress(addr)
|
||||
|
||||
// simulate s1 having a public IP by creating a remote peer with the same ID
|
||||
rp := &remotePeer{PrivKey: s1.nodeKey.PrivKey, Config: DefaultPeerConfig()}
|
||||
rp.Start()
|
||||
|
||||
// addr should be rejected in addPeer based on the same ID
|
||||
err := s1.DialPeerWithAddress(rp.Addr(), false)
|
||||
if assert.Error(t, err) {
|
||||
assert.Equal(t, ErrSwitchConnectToSelf, err)
|
||||
}
|
||||
|
||||
assert.True(t, s1.addrBook.OurAddress(rp.Addr()))
|
||||
|
||||
assert.False(t, s1.addrBook.HasAddress(rp.Addr()))
|
||||
|
||||
rp.Stop()
|
||||
|
||||
assertNoPeersAfterTimeout(t, s1, 100*time.Millisecond)
|
||||
}
|
||||
|
||||
func assertNoPeersAfterTimeout(t *testing.T, sw *Switch, timeout time.Duration) {
|
||||
time.Sleep(timeout)
|
||||
if sw.Peers().Size() != 0 {
|
||||
@ -349,3 +370,29 @@ func BenchmarkSwitchBroadcast(b *testing.B) {
|
||||
|
||||
b.Logf("success: %v, failure: %v", numSuccess, numFailure)
|
||||
}
|
||||
|
||||
type addrBookMock struct {
|
||||
addrs map[string]struct{}
|
||||
ourAddrs map[string]struct{}
|
||||
}
|
||||
|
||||
var _ AddrBook = (*addrBookMock)(nil)
|
||||
|
||||
func (book *addrBookMock) AddAddress(addr *NetAddress, src *NetAddress) error {
|
||||
book.addrs[addr.String()] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
func (book *addrBookMock) AddOurAddress(addr *NetAddress) { book.ourAddrs[addr.String()] = struct{}{} }
|
||||
func (book *addrBookMock) OurAddress(addr *NetAddress) bool {
|
||||
_, ok := book.ourAddrs[addr.String()]
|
||||
return ok
|
||||
}
|
||||
func (book *addrBookMock) MarkGood(*NetAddress) {}
|
||||
func (book *addrBookMock) HasAddress(addr *NetAddress) bool {
|
||||
_, ok := book.addrs[addr.String()]
|
||||
return ok
|
||||
}
|
||||
func (book *addrBookMock) RemoveAddress(addr *NetAddress) {
|
||||
delete(book.addrs, addr.String())
|
||||
}
|
||||
func (book *addrBookMock) Save() {}
|
||||
|
@ -143,7 +143,7 @@ func MakeSwitch(cfg *cfg.P2PConfig, i int, network, version string, initSwitch f
|
||||
Version: version,
|
||||
ListenAddr: cmn.Fmt("%v:%v", network, rand.Intn(64512)+1023),
|
||||
}
|
||||
for ch, _ := range sw.reactorsByCh {
|
||||
for ch := range sw.reactorsByCh {
|
||||
ni.Channels = append(ni.Channels, ch)
|
||||
}
|
||||
sw.SetNodeInfo(ni)
|
||||
|
@ -103,7 +103,7 @@ func Discover() (nat NAT, err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
err = errors.New("UPnP port discovery failed.")
|
||||
err = errors.New("UPnP port discovery failed")
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -126,6 +126,15 @@ func (c *HTTP) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (c *HTTP) Health() (*ctypes.ResultHealth, error) {
|
||||
result := new(ctypes.ResultHealth)
|
||||
_, err := c.rpc.Call("health", map[string]interface{}{}, result)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Health")
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (c *HTTP) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
|
||||
result := new(ctypes.ResultBlockchainInfo)
|
||||
_, err := c.rpc.Call("blockchain",
|
||||
|
@ -83,6 +83,7 @@ type Client interface {
|
||||
type NetworkClient interface {
|
||||
NetInfo() (*ctypes.ResultNetInfo, error)
|
||||
DumpConsensusState() (*ctypes.ResultDumpConsensusState, error)
|
||||
Health() (*ctypes.ResultHealth, error)
|
||||
}
|
||||
|
||||
// EventsClient is reactive, you can subscribe to any message, given the proper
|
||||
|
@ -84,6 +84,10 @@ func (Local) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
|
||||
return core.DumpConsensusState()
|
||||
}
|
||||
|
||||
func (Local) Health() (*ctypes.ResultHealth, error) {
|
||||
return core.Health()
|
||||
}
|
||||
|
||||
func (Local) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) {
|
||||
return core.UnsafeDialSeeds(seeds)
|
||||
}
|
||||
|
@ -78,6 +78,15 @@ func TestDumpConsensusState(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestHealth(t *testing.T) {
|
||||
for i, c := range GetClients() {
|
||||
nc, ok := c.(client.NetworkClient)
|
||||
require.True(t, ok, "%d", i)
|
||||
_, err := nc.Health()
|
||||
require.Nil(t, err, "%d: %+v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenesisAndValidators(t *testing.T) {
|
||||
for i, c := range GetClients() {
|
||||
|
||||
@ -244,13 +253,11 @@ func TestBroadcastTxCommit(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTx(t *testing.T) {
|
||||
assert, require := assert.New(t), require.New(t)
|
||||
|
||||
// first we broadcast a tx
|
||||
c := getHTTPClient()
|
||||
_, _, tx := MakeTxKV()
|
||||
bres, err := c.BroadcastTxCommit(tx)
|
||||
require.Nil(err, "%+v", err)
|
||||
require.Nil(t, err, "%+v", err)
|
||||
|
||||
txHeight := bres.Height
|
||||
txHash := bres.Hash
|
||||
@ -280,18 +287,19 @@ func TestTx(t *testing.T) {
|
||||
ptx, err := c.Tx(tc.hash, tc.prove)
|
||||
|
||||
if !tc.valid {
|
||||
require.NotNil(err)
|
||||
require.NotNil(t, err)
|
||||
} else {
|
||||
require.Nil(err, "%+v", err)
|
||||
assert.EqualValues(txHeight, ptx.Height)
|
||||
assert.EqualValues(tx, ptx.Tx)
|
||||
assert.Zero(ptx.Index)
|
||||
assert.True(ptx.TxResult.IsOK())
|
||||
require.Nil(t, err, "%+v", err)
|
||||
assert.EqualValues(t, txHeight, ptx.Height)
|
||||
assert.EqualValues(t, tx, ptx.Tx)
|
||||
assert.Zero(t, ptx.Index)
|
||||
assert.True(t, ptx.TxResult.IsOK())
|
||||
assert.EqualValues(t, txHash, ptx.Hash)
|
||||
|
||||
// time to verify the proof
|
||||
proof := ptx.Proof
|
||||
if tc.prove && assert.EqualValues(tx, proof.Data) {
|
||||
assert.True(proof.Proof.Verify(proof.Index, proof.Total, txHash, proof.RootHash))
|
||||
if tc.prove && assert.EqualValues(t, tx, proof.Data) {
|
||||
assert.True(t, proof.Proof.Verify(proof.Index, proof.Total, txHash, proof.RootHash))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -324,6 +332,7 @@ func TestTxSearch(t *testing.T) {
|
||||
assert.EqualValues(t, tx, ptx.Tx)
|
||||
assert.Zero(t, ptx.Index)
|
||||
assert.True(t, ptx.TxResult.IsOK())
|
||||
assert.EqualValues(t, txHash, ptx.Hash)
|
||||
|
||||
// time to verify the proof
|
||||
proof := ptx.Proof
|
||||
|
@ -81,6 +81,7 @@ Available endpoints:
|
||||
/net_info
|
||||
/num_unconfirmed_txs
|
||||
/status
|
||||
/health
|
||||
/unconfirmed_txs
|
||||
/unsafe_flush_mempool
|
||||
/unsafe_stop_cpu_profiler
|
||||
|
31
rpc/core/health.go
Normal file
31
rpc/core/health.go
Normal file
@ -0,0 +1,31 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
)
|
||||
|
||||
// Get node health. Returns empty result (200 OK) on success, no response - in
|
||||
// case of an error.
|
||||
//
|
||||
// ```shell
|
||||
// curl 'localhost:46657/health'
|
||||
// ```
|
||||
//
|
||||
// ```go
|
||||
// client := client.NewHTTP("tcp://0.0.0.0:46657", "/websocket")
|
||||
// result, err := client.Health()
|
||||
// ```
|
||||
//
|
||||
// > The above command returns JSON structured like this:
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "error": "",
|
||||
// "result": {},
|
||||
// "id": "",
|
||||
// "jsonrpc": "2.0"
|
||||
// }
|
||||
// ```
|
||||
func Health() (*ctypes.ResultHealth, error) {
|
||||
return &ctypes.ResultHealth{}, nil
|
||||
}
|
@ -2,6 +2,7 @@ package core
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
)
|
||||
|
||||
@ -42,6 +43,7 @@ func NetInfo() (*ctypes.ResultNetInfo, error) {
|
||||
for _, peer := range p2pSwitch.Peers().List() {
|
||||
peers = append(peers, ctypes.Peer{
|
||||
NodeInfo: peer.NodeInfo(),
|
||||
ID: peer.ID(),
|
||||
IsOutbound: peer.IsOutbound(),
|
||||
ConnectionStatus: peer.Status(),
|
||||
})
|
||||
|
@ -3,10 +3,10 @@ package core
|
||||
import (
|
||||
"time"
|
||||
|
||||
crypto "github.com/tendermint/go-crypto"
|
||||
"github.com/tendermint/go-crypto"
|
||||
"github.com/tendermint/tendermint/consensus"
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
p2p "github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/state/txindex"
|
||||
|
@ -13,6 +13,7 @@ var Routes = map[string]*rpc.RPCFunc{
|
||||
"unsubscribe_all": rpc.NewWSRPCFunc(UnsubscribeAll, ""),
|
||||
|
||||
// info API
|
||||
"health": rpc.NewRPCFunc(Health, ""),
|
||||
"status": rpc.NewRPCFunc(Status, ""),
|
||||
"net_info": rpc.NewRPCFunc(NetInfo, ""),
|
||||
"blockchain": rpc.NewRPCFunc(BlockchainInfo, "minHeight,maxHeight"),
|
||||
|
@ -1,9 +1,11 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"time"
|
||||
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
@ -48,7 +50,10 @@ import (
|
||||
// "remote_addr": "",
|
||||
// "network": "test-chain-qhVCa2",
|
||||
// "moniker": "vagrant-ubuntu-trusty-64",
|
||||
// "pub_key": "844981FE99ABB19F7816F2D5E94E8A74276AB1153760A7799E925C75401856C6"
|
||||
// "pub_key": "844981FE99ABB19F7816F2D5E94E8A74276AB1153760A7799E925C75401856C6",
|
||||
// "validator_status": {
|
||||
// "voting_power": 10
|
||||
// }
|
||||
// }
|
||||
// },
|
||||
// "id": "",
|
||||
@ -72,12 +77,50 @@ func Status() (*ctypes.ResultStatus, error) {
|
||||
|
||||
latestBlockTime := time.Unix(0, latestBlockTimeNano)
|
||||
|
||||
return &ctypes.ResultStatus{
|
||||
result := &ctypes.ResultStatus{
|
||||
NodeInfo: p2pSwitch.NodeInfo(),
|
||||
PubKey: pubKey,
|
||||
LatestBlockHash: latestBlockHash,
|
||||
LatestAppHash: latestAppHash,
|
||||
LatestBlockHeight: latestHeight,
|
||||
LatestBlockTime: latestBlockTime,
|
||||
Syncing: consensusReactor.FastSync()}, nil
|
||||
Syncing: consensusReactor.FastSync(),
|
||||
}
|
||||
|
||||
// add ValidatorStatus if node is a validator
|
||||
if val := validatorAtHeight(latestHeight); val != nil {
|
||||
result.ValidatorStatus = ctypes.ValidatorStatus{
|
||||
VotingPower: val.VotingPower,
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func validatorAtHeight(h int64) *types.Validator {
|
||||
lastBlockHeight, vals := consensusState.GetValidators()
|
||||
|
||||
privValAddress := pubKey.Address()
|
||||
|
||||
// if we're still at height h, search in the current validator set
|
||||
if lastBlockHeight == h {
|
||||
for _, val := range vals {
|
||||
if bytes.Equal(val.Address, privValAddress) {
|
||||
return val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if we've moved to the next height, retrieve the validator set from DB
|
||||
if lastBlockHeight > h {
|
||||
vals, err := sm.LoadValidators(stateDB, h)
|
||||
if err != nil {
|
||||
// should not happen
|
||||
return nil
|
||||
}
|
||||
_, val := vals.GetByAddress(privValAddress)
|
||||
return val
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -44,7 +44,8 @@ import (
|
||||
// "code": 0
|
||||
// },
|
||||
// "index": 0,
|
||||
// "height": 52
|
||||
// "height": 52,
|
||||
// "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF"
|
||||
// },
|
||||
// "id": "",
|
||||
// "jsonrpc": "2.0"
|
||||
@ -67,11 +68,12 @@ import (
|
||||
// - `tx_result`: the `abci.Result` object
|
||||
// - `index`: `int` - index of the transaction
|
||||
// - `height`: `int` - height of the block where this transaction was in
|
||||
// - `hash`: `[]byte` - hash of the transaction
|
||||
func Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
|
||||
|
||||
// if index is disabled, return error
|
||||
if _, ok := txIndexer.(*null.TxIndex); ok {
|
||||
return nil, fmt.Errorf("Transaction indexing is disabled.")
|
||||
return nil, fmt.Errorf("Transaction indexing is disabled")
|
||||
}
|
||||
|
||||
r, err := txIndexer.Get(hash)
|
||||
@ -93,6 +95,7 @@ func Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
|
||||
}
|
||||
|
||||
return &ctypes.ResultTx{
|
||||
Hash: hash,
|
||||
Height: height,
|
||||
Index: uint32(index),
|
||||
TxResult: r.Result,
|
||||
@ -137,7 +140,8 @@ func Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
|
||||
// "tx": "mvZHHa7HhZ4aRT0xMDA=",
|
||||
// "tx_result": {},
|
||||
// "index": 31,
|
||||
// "height": 12
|
||||
// "height": 12,
|
||||
// "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF"
|
||||
// }
|
||||
// ],
|
||||
// "id": "",
|
||||
@ -161,10 +165,11 @@ func Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
|
||||
// - `tx_result`: the `abci.Result` object
|
||||
// - `index`: `int` - index of the transaction
|
||||
// - `height`: `int` - height of the block where this transaction was in
|
||||
// - `hash`: `[]byte` - hash of the transaction
|
||||
func TxSearch(query string, prove bool) ([]*ctypes.ResultTx, error) {
|
||||
// if index is disabled, return error
|
||||
if _, ok := txIndexer.(*null.TxIndex); ok {
|
||||
return nil, fmt.Errorf("Transaction indexing is disabled.")
|
||||
return nil, fmt.Errorf("Transaction indexing is disabled")
|
||||
}
|
||||
|
||||
q, err := tmquery.New(query)
|
||||
@ -191,6 +196,7 @@ func TxSearch(query string, prove bool) ([]*ctypes.ResultTx, error) {
|
||||
}
|
||||
|
||||
apiResults[i] = &ctypes.ResultTx{
|
||||
Hash: r.Tx.Hash(),
|
||||
Height: height,
|
||||
Index: index,
|
||||
TxResult: r.Result,
|
||||
|
@ -54,14 +54,19 @@ func NewResultCommit(header *types.Header, commit *types.Commit,
|
||||
}
|
||||
}
|
||||
|
||||
type ValidatorStatus struct {
|
||||
VotingPower int64 `json:"voting_power"`
|
||||
}
|
||||
|
||||
type ResultStatus struct {
|
||||
NodeInfo p2p.NodeInfo `json:"node_info"`
|
||||
PubKey crypto.PubKey `json:"pub_key"`
|
||||
LatestBlockHash cmn.HexBytes `json:"latest_block_hash"`
|
||||
LatestAppHash cmn.HexBytes `json:"latest_app_hash"`
|
||||
LatestBlockHeight int64 `json:"latest_block_height"`
|
||||
LatestBlockTime time.Time `json:"latest_block_time"`
|
||||
Syncing bool `json:"syncing"`
|
||||
NodeInfo p2p.NodeInfo `json:"node_info"`
|
||||
PubKey crypto.PubKey `json:"pub_key"`
|
||||
LatestBlockHash cmn.HexBytes `json:"latest_block_hash"`
|
||||
LatestAppHash cmn.HexBytes `json:"latest_app_hash"`
|
||||
LatestBlockHeight int64 `json:"latest_block_height"`
|
||||
LatestBlockTime time.Time `json:"latest_block_time"`
|
||||
Syncing bool `json:"syncing"`
|
||||
ValidatorStatus ValidatorStatus `json:"validator_status,omitempty"`
|
||||
}
|
||||
|
||||
func (s *ResultStatus) TxIndexEnabled() bool {
|
||||
@ -93,6 +98,7 @@ type ResultDialPeers struct {
|
||||
|
||||
type Peer struct {
|
||||
p2p.NodeInfo `json:"node_info"`
|
||||
p2p.ID `json:"node_id"`
|
||||
IsOutbound bool `json:"is_outbound"`
|
||||
ConnectionStatus p2p.ConnectionStatus `json:"connection_status"`
|
||||
}
|
||||
@ -123,6 +129,7 @@ type ResultBroadcastTxCommit struct {
|
||||
}
|
||||
|
||||
type ResultTx struct {
|
||||
Hash cmn.HexBytes `json:"hash"`
|
||||
Height int64 `json:"height"`
|
||||
Index uint32 `json:"index"`
|
||||
TxResult abci.ResponseDeliverTx `json:"tx_result"`
|
||||
@ -155,3 +162,5 @@ type ResultEvent struct {
|
||||
Query string `json:"query"`
|
||||
Data types.TMEventData `json:"data"`
|
||||
}
|
||||
|
||||
type ResultHealth struct{}
|
||||
|
@ -329,21 +329,21 @@ func (c *WSClient) reconnectRoutine() {
|
||||
c.Logger.Error("failed to reconnect", "err", err, "original_err", originalError)
|
||||
c.Stop()
|
||||
return
|
||||
} else {
|
||||
// drain reconnectAfter
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-c.reconnectAfter:
|
||||
default:
|
||||
break LOOP
|
||||
}
|
||||
}
|
||||
err = c.processBacklog()
|
||||
if err == nil {
|
||||
c.startReadWriteRoutines()
|
||||
}
|
||||
// drain reconnectAfter
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-c.reconnectAfter:
|
||||
default:
|
||||
break LOOP
|
||||
}
|
||||
}
|
||||
err := c.processBacklog()
|
||||
if err == nil {
|
||||
c.startReadWriteRoutines()
|
||||
}
|
||||
|
||||
case <-c.Quit():
|
||||
return
|
||||
}
|
||||
|
@ -18,32 +18,51 @@ import (
|
||||
)
|
||||
|
||||
func StartHTTPServer(listenAddr string, handler http.Handler, logger log.Logger) (listener net.Listener, err error) {
|
||||
// listenAddr should be fully formed including tcp:// or unix:// prefix
|
||||
var proto, addr string
|
||||
parts := strings.SplitN(listenAddr, "://", 2)
|
||||
if len(parts) != 2 {
|
||||
logger.Error("WARNING (tendermint/rpc/lib): Please use fully formed listening addresses, including the tcp:// or unix:// prefix")
|
||||
// we used to allow addrs without tcp/unix prefix by checking for a colon
|
||||
// TODO: Deprecate
|
||||
proto = types.SocketType(listenAddr)
|
||||
addr = listenAddr
|
||||
// return nil, errors.Errorf("Invalid listener address %s", lisenAddr)
|
||||
} else {
|
||||
proto, addr = parts[0], parts[1]
|
||||
return nil, errors.Errorf("Invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", listenAddr)
|
||||
}
|
||||
proto, addr = parts[0], parts[1]
|
||||
|
||||
logger.Info(fmt.Sprintf("Starting RPC HTTP server on %s socket %v", proto, addr))
|
||||
logger.Info(fmt.Sprintf("Starting RPC HTTP server on %s", listenAddr))
|
||||
listener, err = net.Listen(proto, addr)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Failed to listen to %v: %v", listenAddr, err)
|
||||
return nil, errors.Errorf("Failed to listen on %v: %v", listenAddr, err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
res := http.Serve(
|
||||
err := http.Serve(
|
||||
listener,
|
||||
RecoverAndLogHandler(handler, logger),
|
||||
)
|
||||
logger.Error("RPC HTTP server stopped", "result", res)
|
||||
logger.Error("RPC HTTP server stopped", "err", err)
|
||||
}()
|
||||
return listener, nil
|
||||
}
|
||||
|
||||
func StartHTTPAndTLSServer(listenAddr string, handler http.Handler, certFile, keyFile string, logger log.Logger) (listener net.Listener, err error) {
|
||||
var proto, addr string
|
||||
parts := strings.SplitN(listenAddr, "://", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, errors.Errorf("Invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", listenAddr)
|
||||
}
|
||||
proto, addr = parts[0], parts[1]
|
||||
|
||||
logger.Info(fmt.Sprintf("Starting RPC HTTPS server on %s (cert: %q, key: %q)", listenAddr, certFile, keyFile))
|
||||
listener, err = net.Listen(proto, addr)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("Failed to listen on %v: %v", listenAddr, err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
err := http.ServeTLS(
|
||||
listener,
|
||||
RecoverAndLogHandler(handler, logger),
|
||||
certFile,
|
||||
keyFile,
|
||||
)
|
||||
logger.Error("RPC HTTPS server stopped", "err", err)
|
||||
}()
|
||||
return listener, nil
|
||||
}
|
||||
|
@ -118,9 +118,8 @@ func NewRPCErrorResponse(id string, code int, msg string, data string) RPCRespon
|
||||
func (resp RPCResponse) String() string {
|
||||
if resp.Error == nil {
|
||||
return fmt.Sprintf("[%s %v]", resp.ID, resp.Result)
|
||||
} else {
|
||||
return fmt.Sprintf("[%s %s]", resp.ID, resp.Error)
|
||||
}
|
||||
return fmt.Sprintf("[%s %s]", resp.ID, resp.Error)
|
||||
}
|
||||
|
||||
func RPCParseError(id string, err error) RPCResponse {
|
||||
|
@ -1,35 +0,0 @@
|
||||
#! /bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
set +u
|
||||
if [[ "$DEP" == "" ]]; then
|
||||
DEP=$GOPATH/src/github.com/tendermint/tendermint/Gopkg.lock
|
||||
fi
|
||||
set -u
|
||||
|
||||
|
||||
set -u
|
||||
|
||||
function getVendoredVersion() {
|
||||
grep -A100 "$LIB" "$DEP" | grep revision | head -n1 | grep -o '"[^"]\+"' | cut -d '"' -f 2
|
||||
}
|
||||
|
||||
|
||||
# fetch and checkout vendored dep
|
||||
|
||||
lib=$1
|
||||
|
||||
echo "----------------------------------"
|
||||
echo "Getting $lib ..."
|
||||
go get -t "github.com/tendermint/$lib/..."
|
||||
|
||||
VENDORED=$(getVendoredVersion "$lib")
|
||||
cd "$GOPATH/src/github.com/tendermint/$lib" || exit
|
||||
MASTER=$(git rev-parse origin/master)
|
||||
|
||||
if [[ "$VENDORED" != "$MASTER" ]]; then
|
||||
echo "... VENDORED != MASTER ($VENDORED != $MASTER)"
|
||||
echo "... Checking out commit $VENDORED"
|
||||
git checkout "$VENDORED" &> /dev/null
|
||||
fi
|
@ -4,6 +4,7 @@
|
||||
Usage:
|
||||
wal2json <path-to-wal>
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
|
@ -219,7 +219,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) {
|
||||
// use the next pubkey
|
||||
if changeIndex < len(changeHeights) && i == changeHeights[changeIndex] {
|
||||
changeIndex++
|
||||
power += 1
|
||||
power++
|
||||
}
|
||||
header, blockID, responses := makeHeaderPartsResponsesValPowerChange(state, i, power)
|
||||
state, err = updateState(state, blockID, header, responses)
|
||||
@ -237,7 +237,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) {
|
||||
// use the next pubkey (note our counter starts at 0 this time)
|
||||
if changeIndex < len(changeHeights) && i == changeHeights[changeIndex]+1 {
|
||||
changeIndex++
|
||||
power += 1
|
||||
power++
|
||||
}
|
||||
testCases[i-1] = power
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@ -145,9 +146,8 @@ func (txi *TxIndex) Search(q *query.Query) ([]*types.TxResult, error) {
|
||||
res, err := txi.Get(hash)
|
||||
if res == nil {
|
||||
return []*types.TxResult{}, nil
|
||||
} else {
|
||||
return []*types.TxResult{res}, errors.Wrap(err, "error while retrieving the result")
|
||||
}
|
||||
return []*types.TxResult{res}, errors.Wrap(err, "error while retrieving the result")
|
||||
}
|
||||
|
||||
// conditions to skip because they're handled before "everything else"
|
||||
@ -168,10 +168,10 @@ func (txi *TxIndex) Search(q *query.Query) ([]*types.TxResult, error) {
|
||||
|
||||
for _, r := range ranges {
|
||||
if !hashesInitialized {
|
||||
hashes = txi.matchRange(r, startKeyForRange(r, height))
|
||||
hashes = txi.matchRange(r, []byte(r.key))
|
||||
hashesInitialized = true
|
||||
} else {
|
||||
hashes = intersect(hashes, txi.matchRange(r, startKeyForRange(r, height)))
|
||||
hashes = intersect(hashes, txi.matchRange(r, []byte(r.key)))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -200,6 +200,11 @@ func (txi *TxIndex) Search(q *query.Query) ([]*types.TxResult, error) {
|
||||
i++
|
||||
}
|
||||
|
||||
// sort by height by default
|
||||
sort.Slice(results, func(i, j int) bool {
|
||||
return results[i].Height < results[j].Height
|
||||
})
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
@ -234,6 +239,52 @@ type queryRange struct {
|
||||
includeUpperBound bool
|
||||
}
|
||||
|
||||
func (r queryRange) lowerBoundValue() interface{} {
|
||||
if r.lowerBound == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if r.includeLowerBound {
|
||||
return r.lowerBound
|
||||
} else {
|
||||
switch t := r.lowerBound.(type) {
|
||||
case int64:
|
||||
return t + 1
|
||||
case time.Time:
|
||||
return t.Unix() + 1
|
||||
default:
|
||||
panic("not implemented")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r queryRange) AnyBound() interface{} {
|
||||
if r.lowerBound != nil {
|
||||
return r.lowerBound
|
||||
} else {
|
||||
return r.upperBound
|
||||
}
|
||||
}
|
||||
|
||||
func (r queryRange) upperBoundValue() interface{} {
|
||||
if r.upperBound == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if r.includeUpperBound {
|
||||
return r.upperBound
|
||||
} else {
|
||||
switch t := r.upperBound.(type) {
|
||||
case int64:
|
||||
return t - 1
|
||||
case time.Time:
|
||||
return t.Unix() - 1
|
||||
default:
|
||||
panic("not implemented")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func lookForRanges(conditions []query.Condition) (ranges queryRanges, indexes []int) {
|
||||
ranges = make(queryRanges)
|
||||
for i, c := range conditions {
|
||||
@ -297,34 +348,49 @@ func (txi *TxIndex) match(c query.Condition, startKey []byte) (hashes [][]byte)
|
||||
return
|
||||
}
|
||||
|
||||
func (txi *TxIndex) matchRange(r queryRange, startKey []byte) (hashes [][]byte) {
|
||||
it := dbm.IteratePrefix(txi.store, startKey)
|
||||
func (txi *TxIndex) matchRange(r queryRange, prefix []byte) (hashes [][]byte) {
|
||||
// create a map to prevent duplicates
|
||||
hashesMap := make(map[string][]byte)
|
||||
|
||||
lowerBound := r.lowerBoundValue()
|
||||
upperBound := r.upperBoundValue()
|
||||
|
||||
it := dbm.IteratePrefix(txi.store, prefix)
|
||||
defer it.Close()
|
||||
LOOP:
|
||||
for ; it.Valid(); it.Next() {
|
||||
if !isTagKey(it.Key()) {
|
||||
continue
|
||||
}
|
||||
if r.upperBound != nil {
|
||||
// no other way to stop iterator other than checking for upperBound
|
||||
switch (r.upperBound).(type) {
|
||||
case int64:
|
||||
v, err := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64)
|
||||
if err == nil && v == r.upperBound {
|
||||
if r.includeUpperBound {
|
||||
hashes = append(hashes, it.Value())
|
||||
}
|
||||
break LOOP
|
||||
}
|
||||
// XXX: passing time in a ABCI Tags is not yet implemented
|
||||
// case time.Time:
|
||||
// v := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64)
|
||||
// if v == r.upperBound {
|
||||
// break
|
||||
// }
|
||||
switch r.AnyBound().(type) {
|
||||
case int64:
|
||||
v, err := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64)
|
||||
if err != nil {
|
||||
continue LOOP
|
||||
}
|
||||
include := true
|
||||
if lowerBound != nil && v < lowerBound.(int64) {
|
||||
include = false
|
||||
}
|
||||
if upperBound != nil && v > upperBound.(int64) {
|
||||
include = false
|
||||
}
|
||||
if include {
|
||||
hashesMap[fmt.Sprintf("%X", it.Value())] = it.Value()
|
||||
}
|
||||
// XXX: passing time in a ABCI Tags is not yet implemented
|
||||
// case time.Time:
|
||||
// v := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64)
|
||||
// if v == r.upperBound {
|
||||
// break
|
||||
// }
|
||||
}
|
||||
hashes = append(hashes, it.Value())
|
||||
}
|
||||
hashes = make([][]byte, len(hashesMap))
|
||||
i := 0
|
||||
for _, h := range hashesMap {
|
||||
hashes[i] = h
|
||||
i++
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -342,33 +408,6 @@ func startKey(c query.Condition, height int64) []byte {
|
||||
return []byte(key)
|
||||
}
|
||||
|
||||
func startKeyForRange(r queryRange, height int64) []byte {
|
||||
if r.lowerBound == nil {
|
||||
return []byte(r.key)
|
||||
}
|
||||
|
||||
var lowerBound interface{}
|
||||
if r.includeLowerBound {
|
||||
lowerBound = r.lowerBound
|
||||
} else {
|
||||
switch t := r.lowerBound.(type) {
|
||||
case int64:
|
||||
lowerBound = t + 1
|
||||
case time.Time:
|
||||
lowerBound = t.Unix() + 1
|
||||
default:
|
||||
panic("not implemented")
|
||||
}
|
||||
}
|
||||
var key string
|
||||
if height > 0 {
|
||||
key = fmt.Sprintf("%s/%v/%d", r.key, lowerBound, height)
|
||||
} else {
|
||||
key = fmt.Sprintf("%s/%v", r.key, lowerBound)
|
||||
}
|
||||
return []byte(key)
|
||||
}
|
||||
|
||||
func isTagKey(key []byte) bool {
|
||||
return strings.Count(string(key), tagKeySeparator) == 3
|
||||
}
|
||||
|
@ -123,6 +123,35 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) {
|
||||
assert.Equal(t, []*types.TxResult{txResult}, results)
|
||||
}
|
||||
|
||||
func TestTxSearchMultipleTxs(t *testing.T) {
|
||||
allowedTags := []string{"account.number"}
|
||||
indexer := NewTxIndex(db.NewMemDB(), IndexTags(allowedTags))
|
||||
|
||||
// indexed first, but bigger height (to test the order of transactions)
|
||||
txResult := txResultWithTags([]cmn.KVPair{
|
||||
{Key: []byte("account.number"), Value: []byte("1")},
|
||||
})
|
||||
txResult.Tx = types.Tx("Bob's account")
|
||||
txResult.Height = 2
|
||||
err := indexer.Index(txResult)
|
||||
require.NoError(t, err)
|
||||
|
||||
// indexed second, but smaller height (to test the order of transactions)
|
||||
txResult2 := txResultWithTags([]cmn.KVPair{
|
||||
{Key: []byte("account.number"), Value: []byte("2")},
|
||||
})
|
||||
txResult2.Tx = types.Tx("Alice's account")
|
||||
txResult2.Height = 1
|
||||
err = indexer.Index(txResult2)
|
||||
require.NoError(t, err)
|
||||
|
||||
results, err := indexer.Search(query.MustParse("account.number >= 1"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
require.Len(t, results, 2)
|
||||
assert.Equal(t, []*types.TxResult{txResult2, txResult}, results)
|
||||
}
|
||||
|
||||
func TestIndexAllTags(t *testing.T) {
|
||||
indexer := NewTxIndex(db.NewMemDB(), IndexAllTags())
|
||||
|
||||
@ -147,7 +176,7 @@ func TestIndexAllTags(t *testing.T) {
|
||||
|
||||
func txResultWithTags(tags []cmn.KVPair) *types.TxResult {
|
||||
tx := types.Tx("HELLO WORLD")
|
||||
return &types.TxResult{1, 0, tx, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Tags: tags}}
|
||||
return &types.TxResult{Height: 1, Index: 0, Tx: tx, Result: abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Tags: tags, Fee: cmn.KI64Pair{Key: []uint8{}, Value: 0}}}
|
||||
}
|
||||
|
||||
func benchmarkTxIndex(txsCount int, b *testing.B) {
|
||||
@ -168,7 +197,7 @@ func benchmarkTxIndex(txsCount int, b *testing.B) {
|
||||
if err := batch.Add(txResult); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
txResult.Index += 1
|
||||
txResult.Index++
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
@ -19,7 +19,3 @@ and run the following tests in docker containers:
|
||||
- send a tx on each node and ensure the state root is updated on all of them
|
||||
- crash and restart nodes one at a time and ensure they can sync back up (via fastsync)
|
||||
- crash and restart all nodes at once and ensure they can sync back up
|
||||
|
||||
If on a `release-x.x.x` branch, we also run
|
||||
|
||||
- `go test` for all our dependency libs (test/test_libs.sh)
|
||||
|
@ -1,34 +0,0 @@
|
||||
#! /bin/bash
|
||||
set -ex
|
||||
|
||||
export PATH="$GOBIN:$PATH"
|
||||
|
||||
# Get the parent directory of where this script is.
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )"
|
||||
|
||||
####################
|
||||
# libs we depend on
|
||||
####################
|
||||
|
||||
# All libs should define `make test` and `make get_vendor_deps`
|
||||
LIBS=(tmlibs go-wire go-crypto abci)
|
||||
for lib in "${LIBS[@]}"; do
|
||||
# checkout vendored version of lib
|
||||
bash scripts/dep_utils/checkout.sh "$lib"
|
||||
|
||||
echo "Testing $lib ..."
|
||||
cd "$GOPATH/src/github.com/tendermint/$lib"
|
||||
make get_tools
|
||||
make get_vendor_deps
|
||||
make test
|
||||
if [[ "$?" != 0 ]]; then
|
||||
echo "FAIL"
|
||||
exit 1
|
||||
fi
|
||||
cd "$DIR"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "PASS"
|
@ -156,9 +156,8 @@ func (b *Block) StringIndented(indent string) string {
|
||||
func (b *Block) StringShort() string {
|
||||
if b == nil {
|
||||
return "nil-Block"
|
||||
} else {
|
||||
return fmt.Sprintf("Block#%v", b.Hash())
|
||||
}
|
||||
return fmt.Sprintf("Block#%v", b.Hash())
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
@ -192,9 +191,11 @@ type Header struct {
|
||||
}
|
||||
|
||||
// Hash returns the hash of the header.
|
||||
// Returns nil if ValidatorHash is missing.
|
||||
// Returns nil if ValidatorHash is missing,
|
||||
// since a Header is not valid unless there is
|
||||
// a ValidaotrsHash (corresponding to the validator set).
|
||||
func (h *Header) Hash() cmn.HexBytes {
|
||||
if len(h.ValidatorsHash) == 0 {
|
||||
if h == nil || len(h.ValidatorsHash) == 0 {
|
||||
return nil
|
||||
}
|
||||
return merkle.SimpleHashFromMap(map[string]merkle.Hasher{
|
||||
@ -230,7 +231,7 @@ func (h *Header) StringIndented(indent string) string {
|
||||
%s Data: %v
|
||||
%s Validators: %v
|
||||
%s App: %v
|
||||
%s Conensus: %v
|
||||
%s Consensus: %v
|
||||
%s Results: %v
|
||||
%s Evidence: %v
|
||||
%s}#%v`,
|
||||
@ -428,6 +429,9 @@ type Data struct {
|
||||
|
||||
// Hash returns the hash of the data
|
||||
func (data *Data) Hash() cmn.HexBytes {
|
||||
if data == nil {
|
||||
return (Txs{}).Hash()
|
||||
}
|
||||
if data.hash == nil {
|
||||
data.hash = data.Txs.Hash() // NOTE: leaves of merkle tree are TxIDs
|
||||
}
|
||||
|
@ -3,7 +3,9 @@ package types
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
crypto "github.com/tendermint/go-crypto"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
@ -26,7 +28,7 @@ func TestValidateBlock(t *testing.T) {
|
||||
|
||||
// tamper with NumTxs
|
||||
block = MakeBlock(h, txs, commit)
|
||||
block.NumTxs += 1
|
||||
block.NumTxs++
|
||||
err = block.ValidateBasic()
|
||||
require.Error(t, err)
|
||||
|
||||
@ -72,3 +74,15 @@ func makeBlockID(hash string, partSetSize int, partSetHash string) BlockID {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var nilBytes []byte
|
||||
|
||||
func TestNilHeaderHashDoesntCrash(t *testing.T) {
|
||||
assert.Equal(t, []byte((*Header)(nil).Hash()), nilBytes)
|
||||
assert.Equal(t, []byte((new(Header)).Hash()), nilBytes)
|
||||
}
|
||||
|
||||
func TestNilDataHashDoesntCrash(t *testing.T) {
|
||||
assert.Equal(t, []byte((*Data)(nil).Hash()), nilBytes)
|
||||
assert.Equal(t, []byte(new(Data).Hash()), nilBytes)
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ func (b *EventBus) PublishEventTx(event EventDataTx) error {
|
||||
b.Logger.Info("Got tag with an empty key (skipping)", "tag", tag, "tx", event.Tx)
|
||||
continue
|
||||
}
|
||||
tags[string(tag.Key)] = tag.Value
|
||||
tags[string(tag.Key)] = string(tag.Value)
|
||||
}
|
||||
|
||||
// add predefined tags
|
||||
|
@ -7,9 +7,58 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
abci "github.com/tendermint/abci/types"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
tmpubsub "github.com/tendermint/tmlibs/pubsub"
|
||||
tmquery "github.com/tendermint/tmlibs/pubsub/query"
|
||||
)
|
||||
|
||||
func TestEventBusPublishEventTx(t *testing.T) {
|
||||
eventBus := NewEventBus()
|
||||
err := eventBus.Start()
|
||||
require.NoError(t, err)
|
||||
defer eventBus.Stop()
|
||||
|
||||
tx := Tx("foo")
|
||||
result := abci.ResponseDeliverTx{Data: []byte("bar"), Tags: []cmn.KVPair{}, Fee: cmn.KI64Pair{Key: []uint8{}, Value: 0}}
|
||||
|
||||
txEventsCh := make(chan interface{})
|
||||
|
||||
// PublishEventTx adds all these 3 tags, so the query below should work
|
||||
query := fmt.Sprintf("tm.event='Tx' AND tx.height=1 AND tx.hash='%X'", tx.Hash())
|
||||
err = eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query), txEventsCh)
|
||||
require.NoError(t, err)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
for e := range txEventsCh {
|
||||
edt := e.(TMEventData).Unwrap().(EventDataTx)
|
||||
assert.Equal(t, int64(1), edt.Height)
|
||||
assert.Equal(t, uint32(0), edt.Index)
|
||||
assert.Equal(t, tx, edt.Tx)
|
||||
assert.Equal(t, result, edt.Result)
|
||||
close(done)
|
||||
}
|
||||
}()
|
||||
|
||||
err = eventBus.PublishEventTx(EventDataTx{TxResult{
|
||||
Height: 1,
|
||||
Index: 0,
|
||||
Tx: tx,
|
||||
Result: result,
|
||||
}})
|
||||
assert.NoError(t, err)
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Fatal("did not receive a transaction after 1 sec.")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEventBus(b *testing.B) {
|
||||
benchmarks := []struct {
|
||||
name string
|
||||
|
@ -99,7 +99,7 @@ func (dve *DuplicateVoteEvidence) Verify(chainID string) error {
|
||||
|
||||
// BlockIDs must be different
|
||||
if dve.VoteA.BlockID.Equals(dve.VoteB.BlockID) {
|
||||
return fmt.Errorf("DuplicateVoteEvidence Error: BlockIDs are the same (%v) - not a real duplicate vote!", dve.VoteA.BlockID)
|
||||
return fmt.Errorf("DuplicateVoteEvidence Error: BlockIDs are the same (%v) - not a real duplicate vote", dve.VoteA.BlockID)
|
||||
}
|
||||
|
||||
// Signatures must be valid
|
||||
|
@ -26,7 +26,17 @@ type GenesisDoc struct {
|
||||
ConsensusParams *ConsensusParams `json:"consensus_params,omitempty"`
|
||||
Validators []GenesisValidator `json:"validators"`
|
||||
AppHash cmn.HexBytes `json:"app_hash"`
|
||||
AppState json.RawMessage `json:"app_state,omitempty"`
|
||||
AppStateJSON json.RawMessage `json:"app_state,omitempty"`
|
||||
AppOptions json.RawMessage `json:"app_options,omitempty"` // DEPRECATED
|
||||
}
|
||||
|
||||
// AppState returns raw application state.
|
||||
// TODO: replace with AppState field during next breaking release (0.18)
|
||||
func (genDoc *GenesisDoc) AppState() json.RawMessage {
|
||||
if len(genDoc.AppOptions) > 0 {
|
||||
return genDoc.AppOptions
|
||||
}
|
||||
return genDoc.AppStateJSON
|
||||
}
|
||||
|
||||
// SaveAs is a utility method for saving GenensisDoc as a JSON file.
|
||||
|
@ -30,12 +30,11 @@ type Part struct {
|
||||
func (part *Part) Hash() []byte {
|
||||
if part.hash != nil {
|
||||
return part.hash
|
||||
} else {
|
||||
hasher := ripemd160.New()
|
||||
hasher.Write(part.Bytes) // nolint: errcheck, gas
|
||||
part.hash = hasher.Sum(nil)
|
||||
return part.hash
|
||||
}
|
||||
hasher := ripemd160.New()
|
||||
hasher.Write(part.Bytes) // nolint: errcheck, gas
|
||||
part.hash = hasher.Sum(nil)
|
||||
return part.hash
|
||||
}
|
||||
|
||||
func (part *Part) String() string {
|
||||
@ -129,20 +128,18 @@ func NewPartSetFromHeader(header PartSetHeader) *PartSet {
|
||||
func (ps *PartSet) Header() PartSetHeader {
|
||||
if ps == nil {
|
||||
return PartSetHeader{}
|
||||
} else {
|
||||
return PartSetHeader{
|
||||
Total: ps.total,
|
||||
Hash: ps.hash,
|
||||
}
|
||||
}
|
||||
return PartSetHeader{
|
||||
Total: ps.total,
|
||||
Hash: ps.hash,
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *PartSet) HasHeader(header PartSetHeader) bool {
|
||||
if ps == nil {
|
||||
return false
|
||||
} else {
|
||||
return ps.Header().Equals(header)
|
||||
}
|
||||
return ps.Header().Equals(header)
|
||||
}
|
||||
|
||||
func (ps *PartSet) BitArray() *cmn.BitArray {
|
||||
@ -251,7 +248,7 @@ func (psr *PartSetReader) Read(p []byte) (n int, err error) {
|
||||
return n1 + n2, err
|
||||
}
|
||||
|
||||
psr.i += 1
|
||||
psr.i++
|
||||
if psr.i >= len(psr.parts) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
@ -262,9 +259,8 @@ func (psr *PartSetReader) Read(p []byte) (n int, err error) {
|
||||
func (ps *PartSet) StringShort() string {
|
||||
if ps == nil {
|
||||
return "nil-PartSet"
|
||||
} else {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
return fmt.Sprintf("(%v of %v)", ps.Count(), ps.Total())
|
||||
}
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
return fmt.Sprintf("(%v of %v)", ps.Count(), ps.Total())
|
||||
}
|
||||
|
238
types/priv_validator/sign_info.go
Normal file
238
types/priv_validator/sign_info.go
Normal file
@ -0,0 +1,238 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
crypto "github.com/tendermint/go-crypto"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
|
||||
// TODO: type ?
|
||||
const (
|
||||
stepNone int8 = 0 // Used to distinguish the initial state
|
||||
stepPropose int8 = 1
|
||||
stepPrevote int8 = 2
|
||||
stepPrecommit int8 = 3
|
||||
)
|
||||
|
||||
func voteToStep(vote *types.Vote) int8 {
|
||||
switch vote.Type {
|
||||
case types.VoteTypePrevote:
|
||||
return stepPrevote
|
||||
case types.VoteTypePrecommit:
|
||||
return stepPrecommit
|
||||
default:
|
||||
panic("Unknown vote type")
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
// LastSignedInfo contains information about the latest
|
||||
// data signed by a validator to help prevent double signing.
|
||||
type LastSignedInfo struct {
|
||||
Height int64 `json:"height"`
|
||||
Round int `json:"round"`
|
||||
Step int8 `json:"step"`
|
||||
Signature crypto.Signature `json:"signature,omitempty"` // so we dont lose signatures
|
||||
SignBytes cmn.HexBytes `json:"signbytes,omitempty"` // so we dont lose signatures
|
||||
}
|
||||
|
||||
func NewLastSignedInfo() *LastSignedInfo {
|
||||
return &LastSignedInfo{
|
||||
Step: stepNone,
|
||||
}
|
||||
}
|
||||
|
||||
func (lsi *LastSignedInfo) String() string {
|
||||
return fmt.Sprintf("LH:%v, LR:%v, LS:%v", lsi.Height, lsi.Round, lsi.Step)
|
||||
}
|
||||
|
||||
// Verify returns an error if there is a height/round/step regression
|
||||
// or if the HRS matches but there are no LastSignBytes.
|
||||
// It returns true if HRS matches exactly and the LastSignature exists.
|
||||
// It panics if the HRS matches, the LastSignBytes are not empty, but the LastSignature is empty.
|
||||
func (lsi LastSignedInfo) Verify(height int64, round int, step int8) (bool, error) {
|
||||
if lsi.Height > height {
|
||||
return false, errors.New("Height regression")
|
||||
}
|
||||
|
||||
if lsi.Height == height {
|
||||
if lsi.Round > round {
|
||||
return false, errors.New("Round regression")
|
||||
}
|
||||
|
||||
if lsi.Round == round {
|
||||
if lsi.Step > step {
|
||||
return false, errors.New("Step regression")
|
||||
} else if lsi.Step == step {
|
||||
if lsi.SignBytes != nil {
|
||||
if lsi.Signature.Empty() {
|
||||
panic("info: LastSignature is nil but LastSignBytes is not!")
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
return false, errors.New("No LastSignature found")
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Set height/round/step and signature on the info
|
||||
func (lsi *LastSignedInfo) Set(height int64, round int, step int8,
|
||||
signBytes []byte, sig crypto.Signature) {
|
||||
|
||||
lsi.Height = height
|
||||
lsi.Round = round
|
||||
lsi.Step = step
|
||||
lsi.Signature = sig
|
||||
lsi.SignBytes = signBytes
|
||||
}
|
||||
|
||||
// Reset resets all the values.
|
||||
// XXX: Unsafe.
|
||||
func (lsi *LastSignedInfo) Reset() {
|
||||
lsi.Height = 0
|
||||
lsi.Round = 0
|
||||
lsi.Step = 0
|
||||
lsi.Signature = crypto.Signature{}
|
||||
lsi.SignBytes = nil
|
||||
}
|
||||
|
||||
// SignVote checks the height/round/step (HRS) are greater than the latest state of the LastSignedInfo.
|
||||
// If so, it signs the vote, updates the LastSignedInfo, and sets the signature on the vote.
|
||||
// If the HRS are equal and the only thing changed is the timestamp, it sets the vote.Timestamp to the previous
|
||||
// value and the Signature to the LastSignedInfo.Signature.
|
||||
// Else it returns an error.
|
||||
func (lsi *LastSignedInfo) SignVote(signer types.Signer, chainID string, vote *types.Vote) error {
|
||||
height, round, step := vote.Height, vote.Round, voteToStep(vote)
|
||||
signBytes := vote.SignBytes(chainID)
|
||||
|
||||
sameHRS, err := lsi.Verify(height, round, step)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We might crash before writing to the wal,
|
||||
// causing us to try to re-sign for the same HRS.
|
||||
// If signbytes are the same, use the last signature.
|
||||
// If they only differ by timestamp, use last timestamp and signature
|
||||
// Otherwise, return error
|
||||
if sameHRS {
|
||||
if bytes.Equal(signBytes, lsi.SignBytes) {
|
||||
vote.Signature = lsi.Signature
|
||||
} else if timestamp, ok := checkVotesOnlyDifferByTimestamp(lsi.SignBytes, signBytes); ok {
|
||||
vote.Timestamp = timestamp
|
||||
vote.Signature = lsi.Signature
|
||||
} else {
|
||||
err = fmt.Errorf("Conflicting data")
|
||||
}
|
||||
return err
|
||||
}
|
||||
sig, err := signer.Sign(signBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lsi.Set(height, round, step, signBytes, sig)
|
||||
vote.Signature = sig
|
||||
return nil
|
||||
}
|
||||
|
||||
// SignProposal checks if the height/round/step (HRS) are greater than the latest state of the LastSignedInfo.
|
||||
// If so, it signs the proposal, updates the LastSignedInfo, and sets the signature on the proposal.
|
||||
// If the HRS are equal and the only thing changed is the timestamp, it sets the timestamp to the previous
|
||||
// value and the Signature to the LastSignedInfo.Signature.
|
||||
// Else it returns an error.
|
||||
func (lsi *LastSignedInfo) SignProposal(signer types.Signer, chainID string, proposal *types.Proposal) error {
|
||||
height, round, step := proposal.Height, proposal.Round, stepPropose
|
||||
signBytes := proposal.SignBytes(chainID)
|
||||
|
||||
sameHRS, err := lsi.Verify(height, round, step)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We might crash before writing to the wal,
|
||||
// causing us to try to re-sign for the same HRS.
|
||||
// If signbytes are the same, use the last signature.
|
||||
// If they only differ by timestamp, use last timestamp and signature
|
||||
// Otherwise, return error
|
||||
if sameHRS {
|
||||
if bytes.Equal(signBytes, lsi.SignBytes) {
|
||||
proposal.Signature = lsi.Signature
|
||||
} else if timestamp, ok := checkProposalsOnlyDifferByTimestamp(lsi.SignBytes, signBytes); ok {
|
||||
proposal.Timestamp = timestamp
|
||||
proposal.Signature = lsi.Signature
|
||||
} else {
|
||||
err = fmt.Errorf("Conflicting data")
|
||||
}
|
||||
return err
|
||||
}
|
||||
sig, err := signer.Sign(signBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lsi.Set(height, round, step, signBytes, sig)
|
||||
proposal.Signature = sig
|
||||
return nil
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
// returns the timestamp from the lastSignBytes.
|
||||
// returns true if the only difference in the votes is their timestamp.
|
||||
func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) {
|
||||
var lastVote, newVote types.CanonicalJSONOnceVote
|
||||
if err := json.Unmarshal(lastSignBytes, &lastVote); err != nil {
|
||||
panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into vote: %v", err))
|
||||
}
|
||||
if err := json.Unmarshal(newSignBytes, &newVote); err != nil {
|
||||
panic(fmt.Sprintf("signBytes cannot be unmarshalled into vote: %v", err))
|
||||
}
|
||||
|
||||
lastTime, err := time.Parse(types.TimeFormat, lastVote.Vote.Timestamp)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// set the times to the same value and check equality
|
||||
now := types.CanonicalTime(time.Now())
|
||||
lastVote.Vote.Timestamp = now
|
||||
newVote.Vote.Timestamp = now
|
||||
lastVoteBytes, _ := json.Marshal(lastVote)
|
||||
newVoteBytes, _ := json.Marshal(newVote)
|
||||
|
||||
return lastTime, bytes.Equal(newVoteBytes, lastVoteBytes)
|
||||
}
|
||||
|
||||
// returns the timestamp from the lastSignBytes.
|
||||
// returns true if the only difference in the proposals is their timestamp
|
||||
func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) {
|
||||
var lastProposal, newProposal types.CanonicalJSONOnceProposal
|
||||
if err := json.Unmarshal(lastSignBytes, &lastProposal); err != nil {
|
||||
panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into proposal: %v", err))
|
||||
}
|
||||
if err := json.Unmarshal(newSignBytes, &newProposal); err != nil {
|
||||
panic(fmt.Sprintf("signBytes cannot be unmarshalled into proposal: %v", err))
|
||||
}
|
||||
|
||||
lastTime, err := time.Parse(types.TimeFormat, lastProposal.Proposal.Timestamp)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// set the times to the same value and check equality
|
||||
now := types.CanonicalTime(time.Now())
|
||||
lastProposal.Proposal.Timestamp = now
|
||||
newProposal.Proposal.Timestamp = now
|
||||
lastProposalBytes, _ := json.Marshal(lastProposal)
|
||||
newProposalBytes, _ := json.Marshal(newProposal)
|
||||
|
||||
return lastTime, bytes.Equal(newProposalBytes, lastProposalBytes)
|
||||
}
|
@ -45,9 +45,10 @@ func (v *Validator) CompareAccum(other *Validator) *Validator {
|
||||
} else if v.Accum < other.Accum {
|
||||
return other
|
||||
} else {
|
||||
if bytes.Compare(v.Address, other.Address) < 0 {
|
||||
result := bytes.Compare(v.Address, other.Address)
|
||||
if result < 0 {
|
||||
return v
|
||||
} else if bytes.Compare(v.Address, other.Address) > 0 {
|
||||
} else if result > 0 {
|
||||
return other
|
||||
} else {
|
||||
cmn.PanicSanity("Cannot compare identical validators")
|
||||
|
@ -82,27 +82,30 @@ func (valSet *ValidatorSet) Copy() *ValidatorSet {
|
||||
}
|
||||
}
|
||||
|
||||
// HasAddress returns true if address given is in the validator set, false -
|
||||
// otherwise.
|
||||
func (valSet *ValidatorSet) HasAddress(address []byte) bool {
|
||||
idx := sort.Search(len(valSet.Validators), func(i int) bool {
|
||||
return bytes.Compare(address, valSet.Validators[i].Address) <= 0
|
||||
})
|
||||
return idx != len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address)
|
||||
return idx < len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address)
|
||||
}
|
||||
|
||||
// GetByAddress returns an index of the validator with address and validator
|
||||
// itself if found. Otherwise, -1 and nil are returned.
|
||||
func (valSet *ValidatorSet) GetByAddress(address []byte) (index int, val *Validator) {
|
||||
idx := sort.Search(len(valSet.Validators), func(i int) bool {
|
||||
return bytes.Compare(address, valSet.Validators[i].Address) <= 0
|
||||
})
|
||||
if idx != len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address) {
|
||||
if idx < len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address) {
|
||||
return idx, valSet.Validators[idx].Copy()
|
||||
} else {
|
||||
return 0, nil
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// GetByIndex returns the validator by index.
|
||||
// It returns nil values if index < 0 or
|
||||
// index >= len(ValidatorSet.Validators)
|
||||
// GetByIndex returns the validator's address and validator itself by index.
|
||||
// It returns nil values if index is less than 0 or greater or equal to
|
||||
// len(ValidatorSet.Validators).
|
||||
func (valSet *ValidatorSet) GetByIndex(index int) (address []byte, val *Validator) {
|
||||
if index < 0 || index >= len(valSet.Validators) {
|
||||
return nil, nil
|
||||
@ -111,10 +114,12 @@ func (valSet *ValidatorSet) GetByIndex(index int) (address []byte, val *Validato
|
||||
return val.Address, val.Copy()
|
||||
}
|
||||
|
||||
// Size returns the length of the validator set.
|
||||
func (valSet *ValidatorSet) Size() int {
|
||||
return len(valSet.Validators)
|
||||
}
|
||||
|
||||
// TotalVotingPower returns the sum of the voting powers of all validators.
|
||||
func (valSet *ValidatorSet) TotalVotingPower() int64 {
|
||||
if valSet.totalVotingPower == 0 {
|
||||
for _, val := range valSet.Validators {
|
||||
@ -125,6 +130,8 @@ func (valSet *ValidatorSet) TotalVotingPower() int64 {
|
||||
return valSet.totalVotingPower
|
||||
}
|
||||
|
||||
// GetProposer returns the current proposer. If the validator set is empty, nil
|
||||
// is returned.
|
||||
func (valSet *ValidatorSet) GetProposer() (proposer *Validator) {
|
||||
if len(valSet.Validators) == 0 {
|
||||
return nil
|
||||
@ -145,6 +152,8 @@ func (valSet *ValidatorSet) findProposer() *Validator {
|
||||
return proposer
|
||||
}
|
||||
|
||||
// Hash returns the Merkle root hash build using validators (as leaves) in the
|
||||
// set.
|
||||
func (valSet *ValidatorSet) Hash() []byte {
|
||||
if len(valSet.Validators) == 0 {
|
||||
return nil
|
||||
@ -156,12 +165,14 @@ func (valSet *ValidatorSet) Hash() []byte {
|
||||
return merkle.SimpleHashFromHashers(hashers)
|
||||
}
|
||||
|
||||
// Add adds val to the validator set and returns true. It returns false if val
|
||||
// is already in the set.
|
||||
func (valSet *ValidatorSet) Add(val *Validator) (added bool) {
|
||||
val = val.Copy()
|
||||
idx := sort.Search(len(valSet.Validators), func(i int) bool {
|
||||
return bytes.Compare(val.Address, valSet.Validators[i].Address) <= 0
|
||||
})
|
||||
if idx == len(valSet.Validators) {
|
||||
if idx >= len(valSet.Validators) {
|
||||
valSet.Validators = append(valSet.Validators, val)
|
||||
// Invalidate cache
|
||||
valSet.Proposer = nil
|
||||
@ -182,39 +193,42 @@ func (valSet *ValidatorSet) Add(val *Validator) (added bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// Update updates val and returns true. It returns false if val is not present
|
||||
// in the set.
|
||||
func (valSet *ValidatorSet) Update(val *Validator) (updated bool) {
|
||||
index, sameVal := valSet.GetByAddress(val.Address)
|
||||
if sameVal == nil {
|
||||
return false
|
||||
} else {
|
||||
valSet.Validators[index] = val.Copy()
|
||||
// Invalidate cache
|
||||
valSet.Proposer = nil
|
||||
valSet.totalVotingPower = 0
|
||||
return true
|
||||
}
|
||||
valSet.Validators[index] = val.Copy()
|
||||
// Invalidate cache
|
||||
valSet.Proposer = nil
|
||||
valSet.totalVotingPower = 0
|
||||
return true
|
||||
}
|
||||
|
||||
// Remove deletes the validator with address. It returns the validator removed
|
||||
// and true. If returns nil and false if validator is not present in the set.
|
||||
func (valSet *ValidatorSet) Remove(address []byte) (val *Validator, removed bool) {
|
||||
idx := sort.Search(len(valSet.Validators), func(i int) bool {
|
||||
return bytes.Compare(address, valSet.Validators[i].Address) <= 0
|
||||
})
|
||||
if idx == len(valSet.Validators) || !bytes.Equal(valSet.Validators[idx].Address, address) {
|
||||
if idx >= len(valSet.Validators) || !bytes.Equal(valSet.Validators[idx].Address, address) {
|
||||
return nil, false
|
||||
} else {
|
||||
removedVal := valSet.Validators[idx]
|
||||
newValidators := valSet.Validators[:idx]
|
||||
if idx+1 < len(valSet.Validators) {
|
||||
newValidators = append(newValidators, valSet.Validators[idx+1:]...)
|
||||
}
|
||||
valSet.Validators = newValidators
|
||||
// Invalidate cache
|
||||
valSet.Proposer = nil
|
||||
valSet.totalVotingPower = 0
|
||||
return removedVal, true
|
||||
}
|
||||
removedVal := valSet.Validators[idx]
|
||||
newValidators := valSet.Validators[:idx]
|
||||
if idx+1 < len(valSet.Validators) {
|
||||
newValidators = append(newValidators, valSet.Validators[idx+1:]...)
|
||||
}
|
||||
valSet.Validators = newValidators
|
||||
// Invalidate cache
|
||||
valSet.Proposer = nil
|
||||
valSet.totalVotingPower = 0
|
||||
return removedVal, true
|
||||
}
|
||||
|
||||
// Iterate will run the given function over the set.
|
||||
func (valSet *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) {
|
||||
for i, val := range valSet.Validators {
|
||||
stop := fn(i, val.Copy())
|
||||
@ -265,10 +279,9 @@ func (valSet *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height
|
||||
|
||||
if talliedVotingPower > valSet.TotalVotingPower()*2/3 {
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("Invalid commit -- insufficient voting power: got %v, needed %v",
|
||||
talliedVotingPower, (valSet.TotalVotingPower()*2/3 + 1))
|
||||
}
|
||||
return fmt.Errorf("Invalid commit -- insufficient voting power: got %v, needed %v",
|
||||
talliedVotingPower, (valSet.TotalVotingPower()*2/3 + 1))
|
||||
}
|
||||
|
||||
// VerifyCommitAny will check to see if the set would
|
||||
@ -471,9 +484,8 @@ func safeMulClip(a, b int64) int64 {
|
||||
if overflow {
|
||||
if (a < 0 || b < 0) && !(a < 0 && b < 0) {
|
||||
return math.MinInt64
|
||||
} else {
|
||||
return math.MaxInt64
|
||||
}
|
||||
return math.MaxInt64
|
||||
}
|
||||
return c
|
||||
}
|
||||
@ -483,9 +495,8 @@ func safeAddClip(a, b int64) int64 {
|
||||
if overflow {
|
||||
if b < 0 {
|
||||
return math.MinInt64
|
||||
} else {
|
||||
return math.MaxInt64
|
||||
}
|
||||
return math.MaxInt64
|
||||
}
|
||||
return c
|
||||
}
|
||||
@ -495,9 +506,8 @@ func safeSubClip(a, b int64) int64 {
|
||||
if overflow {
|
||||
if b > 0 {
|
||||
return math.MinInt64
|
||||
} else {
|
||||
return math.MaxInt64
|
||||
}
|
||||
return math.MaxInt64
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
@ -126,7 +126,7 @@ func TestProposerSelection2(t *testing.T) {
|
||||
for i := 0; i < 120*N; i++ {
|
||||
prop := vals.GetProposer()
|
||||
ii := prop.Address[19]
|
||||
propCount[ii] += 1
|
||||
propCount[ii]++
|
||||
vals.IncrementAccum(1)
|
||||
}
|
||||
|
||||
|
@ -94,33 +94,29 @@ func (voteSet *VoteSet) ChainID() string {
|
||||
func (voteSet *VoteSet) Height() int64 {
|
||||
if voteSet == nil {
|
||||
return 0
|
||||
} else {
|
||||
return voteSet.height
|
||||
}
|
||||
return voteSet.height
|
||||
}
|
||||
|
||||
func (voteSet *VoteSet) Round() int {
|
||||
if voteSet == nil {
|
||||
return -1
|
||||
} else {
|
||||
return voteSet.round
|
||||
}
|
||||
return voteSet.round
|
||||
}
|
||||
|
||||
func (voteSet *VoteSet) Type() byte {
|
||||
if voteSet == nil {
|
||||
return 0x00
|
||||
} else {
|
||||
return voteSet.type_
|
||||
}
|
||||
return voteSet.type_
|
||||
}
|
||||
|
||||
func (voteSet *VoteSet) Size() int {
|
||||
if voteSet == nil {
|
||||
return 0
|
||||
} else {
|
||||
return voteSet.valSet.Size()
|
||||
}
|
||||
return voteSet.valSet.Size()
|
||||
}
|
||||
|
||||
// Returns added=true if vote is valid and new.
|
||||
@ -185,9 +181,8 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) {
|
||||
if existing, ok := voteSet.getVote(valIndex, blockKey); ok {
|
||||
if existing.Signature.Equals(vote.Signature) {
|
||||
return false, nil // duplicate
|
||||
} else {
|
||||
return false, errors.Wrapf(ErrVoteNonDeterministicSignature, "Existing vote: %v; New vote: %v", existing, vote)
|
||||
}
|
||||
return false, errors.Wrapf(ErrVoteNonDeterministicSignature, "Existing vote: %v; New vote: %v", existing, vote)
|
||||
}
|
||||
|
||||
// Check signature.
|
||||
@ -199,13 +194,11 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) {
|
||||
added, conflicting := voteSet.addVerifiedVote(vote, blockKey, val.VotingPower)
|
||||
if conflicting != nil {
|
||||
return added, NewConflictingVoteError(val, conflicting, vote)
|
||||
} else {
|
||||
if !added {
|
||||
cmn.PanicSanity("Expected to add non-conflicting vote")
|
||||
}
|
||||
return added, nil
|
||||
}
|
||||
|
||||
if !added {
|
||||
cmn.PanicSanity("Expected to add non-conflicting vote")
|
||||
}
|
||||
return added, nil
|
||||
}
|
||||
|
||||
// Returns (vote, true) if vote exists for valIndex and blockKey
|
||||
@ -257,13 +250,12 @@ func (voteSet *VoteSet) addVerifiedVote(vote *Vote, blockKey string, votingPower
|
||||
// ... and there's a conflicting vote.
|
||||
// We're not even tracking this blockKey, so just forget it.
|
||||
return false, conflicting
|
||||
} else {
|
||||
// ... and there's no conflicting vote.
|
||||
// Start tracking this blockKey
|
||||
votesByBlock = newBlockVotes(false, voteSet.valSet.Size())
|
||||
voteSet.votesByBlock[blockKey] = votesByBlock
|
||||
// We'll add the vote in a bit.
|
||||
}
|
||||
// ... and there's no conflicting vote.
|
||||
// Start tracking this blockKey
|
||||
votesByBlock = newBlockVotes(false, voteSet.valSet.Size())
|
||||
voteSet.votesByBlock[blockKey] = votesByBlock
|
||||
// We'll add the vote in a bit.
|
||||
}
|
||||
|
||||
// Before adding to votesByBlock, see if we'll exceed quorum
|
||||
@ -309,10 +301,9 @@ func (voteSet *VoteSet) SetPeerMaj23(peerID P2PID, blockID BlockID) error {
|
||||
if existing, ok := voteSet.peerMaj23s[peerID]; ok {
|
||||
if existing.Equals(blockID) {
|
||||
return nil // Nothing to do
|
||||
} else {
|
||||
return fmt.Errorf("SetPeerMaj23: Received conflicting blockID from peer %v. Got %v, expected %v",
|
||||
peerID, blockID, existing)
|
||||
}
|
||||
return fmt.Errorf("SetPeerMaj23: Received conflicting blockID from peer %v. Got %v, expected %v",
|
||||
peerID, blockID, existing)
|
||||
}
|
||||
voteSet.peerMaj23s[peerID] = blockID
|
||||
|
||||
@ -321,10 +312,9 @@ func (voteSet *VoteSet) SetPeerMaj23(peerID P2PID, blockID BlockID) error {
|
||||
if ok {
|
||||
if votesByBlock.peerMaj23 {
|
||||
return nil // Nothing to do
|
||||
} else {
|
||||
votesByBlock.peerMaj23 = true
|
||||
// No need to copy votes, already there.
|
||||
}
|
||||
votesByBlock.peerMaj23 = true
|
||||
// No need to copy votes, already there.
|
||||
} else {
|
||||
votesByBlock = newBlockVotes(true, voteSet.valSet.Size())
|
||||
voteSet.votesByBlock[blockKey] = votesByBlock
|
||||
@ -422,9 +412,8 @@ func (voteSet *VoteSet) TwoThirdsMajority() (blockID BlockID, ok bool) {
|
||||
defer voteSet.mtx.Unlock()
|
||||
if voteSet.maj23 != nil {
|
||||
return *voteSet.maj23, true
|
||||
} else {
|
||||
return BlockID{}, false
|
||||
}
|
||||
return BlockID{}, false
|
||||
}
|
||||
|
||||
func (voteSet *VoteSet) String() string {
|
||||
|
@ -7,7 +7,7 @@ const Fix = "0"
|
||||
var (
|
||||
// Version is the current version of Tendermint
|
||||
// Must be a string because scripts like dist.sh read this file.
|
||||
Version = "0.19.0"
|
||||
Version = "0.19.0-dev"
|
||||
|
||||
// GitCommit is the current HEAD set using ldflags.
|
||||
GitCommit string
|
||||
|
Loading…
x
Reference in New Issue
Block a user