mirror of
https://github.com/fluencelabs/tendermint
synced 2025-06-22 17:31:34 +00:00
Merge branch 'p2p-consolidate' into p2p-id
This commit is contained in:
10
CHANGELOG.md
10
CHANGELOG.md
@ -5,6 +5,7 @@
|
||||
BREAKING CHANGES:
|
||||
- Better support for injecting randomness
|
||||
- Upgrade consensus for more real-time use of evidence
|
||||
- the files usually found in `~/.tendermint` (`config.toml`, `genesis.json`, and `priv_validator.json`) are now in `~/.tendermint/config`. The `$TMHOME/data/` directory remains unchanged.
|
||||
|
||||
FEATURES:
|
||||
- Peer reputation management
|
||||
@ -25,6 +26,15 @@ BUG FIXES:
|
||||
- Graceful handling/recovery for apps that have non-determinism or fail to halt
|
||||
- Graceful handling/recovery for violations of safety, or liveness
|
||||
|
||||
## 0.16.0 (TBD)
|
||||
|
||||
BREAKING CHANGES:
|
||||
- [p2p] old `seeds` is now `persistent_peers` (persistent peers to which TM will always connect to)
|
||||
- [p2p] now `seeds` only used for getting addresses (if addrbook is empty; not persistent)
|
||||
|
||||
FEATURES:
|
||||
- [p2p] added new `/dial_persistent_peers` **unsafe** endpoint
|
||||
|
||||
## 0.15.0 (December 29, 2017)
|
||||
|
||||
BREAKING CHANGES:
|
||||
|
@ -1,8 +1,8 @@
|
||||
FROM alpine:3.6
|
||||
|
||||
# This is the release of tendermint to pull in.
|
||||
ENV TM_VERSION 0.13.0
|
||||
ENV TM_SHA256SUM 36d773d4c2890addc61cc87a72c1e9c21c89516921b0defb0edfebde719b4b85
|
||||
ENV TM_VERSION 0.15.0
|
||||
ENV TM_SHA256SUM 71cc271c67eca506ca492c8b90b090132f104bf5dbfe0af2702a50886e88de17
|
||||
|
||||
# Tendermint will be looking for genesis file in /tendermint (unless you change
|
||||
# `genesis_file` in config.toml). You can put your config.toml and private
|
||||
|
@ -1,6 +1,7 @@
|
||||
# Supported tags and respective `Dockerfile` links
|
||||
|
||||
- `0.13.0`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/a28b3fff49dce2fb31f90abb2fc693834e0029c2/DOCKER/Dockerfile)
|
||||
- `0.15.0`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/170777300ea92dc21a8aec1abc16cb51812513a4/DOCKER/Dockerfile)
|
||||
- `0.13.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/a28b3fff49dce2fb31f90abb2fc693834e0029c2/DOCKER/Dockerfile)
|
||||
- `0.12.1` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/457c688346b565e90735431619ca3ca597ef9007/DOCKER/Dockerfile)
|
||||
- `0.12.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/70d8afa6e952e24c573ece345560a5971bf2cc0e/DOCKER/Dockerfile)
|
||||
- `0.11.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/9177cc1f64ca88a4a0243c5d1773d10fba67e201/DOCKER/Dockerfile)
|
||||
|
2
Vagrantfile
vendored
2
Vagrantfile
vendored
@ -44,6 +44,6 @@ EOF
|
||||
chown ubuntu:ubuntu /home/ubuntu/.bash_profile
|
||||
|
||||
# get all deps and tools, ready to install/test
|
||||
su - ubuntu -c 'cd /home/ubuntu/go/src/github.com/tendermint/tendermint && make get_vendor_deps && make tools'
|
||||
su - ubuntu -c 'cd /home/ubuntu/go/src/github.com/tendermint/tendermint && make get_tools && make get_vendor_deps'
|
||||
SHELL
|
||||
end
|
||||
|
@ -51,7 +51,7 @@ tendermint node \
|
||||
--proxy_app dummy \
|
||||
--p2p.laddr tcp://127.0.0.1:56666 \
|
||||
--rpc.laddr tcp://127.0.0.1:56667 \
|
||||
--p2p.seeds 127.0.0.1:56656 \
|
||||
--p2p.persistent_peers 127.0.0.1:56656 \
|
||||
--log_level error &
|
||||
|
||||
# wait for node to start up so we only count time where we are actually syncing
|
||||
|
@ -29,6 +29,7 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
// p2p flags
|
||||
cmd.Flags().String("p2p.laddr", config.P2P.ListenAddress, "Node listen address. (0.0.0.0:0 means any interface, any port)")
|
||||
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "Comma delimited host:port seed nodes")
|
||||
cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "Comma delimited host:port persistent peers")
|
||||
cmd.Flags().Bool("p2p.skip_upnp", config.P2P.SkipUPNP, "Skip UPNP configuration")
|
||||
cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "Enable/disable Peer-Exchange")
|
||||
|
||||
|
@ -2,11 +2,12 @@ package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
@ -35,6 +36,7 @@ var TestnetFilesCmd = &cobra.Command{
|
||||
func testnetFiles(cmd *cobra.Command, args []string) {
|
||||
|
||||
genVals := make([]types.GenesisValidator, nValidators)
|
||||
defaultConfig := cfg.DefaultBaseConfig()
|
||||
|
||||
// Initialize core dir and priv_validator.json's
|
||||
for i := 0; i < nValidators; i++ {
|
||||
@ -44,7 +46,7 @@ func testnetFiles(cmd *cobra.Command, args []string) {
|
||||
cmn.Exit(err.Error())
|
||||
}
|
||||
// Read priv_validator.json to populate vals
|
||||
privValFile := path.Join(dataDir, mach, "priv_validator.json")
|
||||
privValFile := filepath.Join(dataDir, mach, defaultConfig.PrivValidator)
|
||||
privVal := types.LoadPrivValidatorFS(privValFile)
|
||||
genVals[i] = types.GenesisValidator{
|
||||
PubKey: privVal.GetPubKey(),
|
||||
@ -63,7 +65,7 @@ func testnetFiles(cmd *cobra.Command, args []string) {
|
||||
// Write genesis file.
|
||||
for i := 0; i < nValidators; i++ {
|
||||
mach := cmn.Fmt("mach%d", i)
|
||||
if err := genDoc.SaveAs(path.Join(dataDir, mach, "genesis.json")); err != nil {
|
||||
if err := genDoc.SaveAs(filepath.Join(dataDir, mach, defaultConfig.Genesis)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
@ -73,14 +75,15 @@ func testnetFiles(cmd *cobra.Command, args []string) {
|
||||
|
||||
// Initialize per-machine core directory
|
||||
func initMachCoreDirectory(base, mach string) error {
|
||||
dir := path.Join(base, mach)
|
||||
dir := filepath.Join(base, mach)
|
||||
err := cmn.EnsureDir(dir, 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create priv_validator.json file if not present
|
||||
ensurePrivValidator(path.Join(dir, "priv_validator.json"))
|
||||
defaultConfig := cfg.DefaultBaseConfig()
|
||||
ensurePrivValidator(filepath.Join(dir, defaultConfig.PrivValidator))
|
||||
return nil
|
||||
|
||||
}
|
||||
|
@ -2,10 +2,12 @@ package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/tendermint/tmlibs/cli"
|
||||
|
||||
cmd "github.com/tendermint/tendermint/cmd/tendermint/commands"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
nm "github.com/tendermint/tendermint/node"
|
||||
)
|
||||
|
||||
@ -37,7 +39,7 @@ func main() {
|
||||
// Create & start node
|
||||
rootCmd.AddCommand(cmd.NewRunNodeCmd(nodeFunc))
|
||||
|
||||
cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv("$HOME/.tendermint"))
|
||||
cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv(filepath.Join("$HOME", cfg.DefaultTendermintDir)))
|
||||
if err := cmd.Execute(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -7,6 +7,27 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Note: Most of the structs & relevant comments + the
|
||||
// default configuration options were used to manually
|
||||
// generate the config.toml. Please reflect any changes
|
||||
// made here in the defaultConfigTemplate constant in
|
||||
// config/toml.go
|
||||
var (
|
||||
DefaultTendermintDir = ".tendermint"
|
||||
defaultConfigDir = "config"
|
||||
defaultDataDir = "data"
|
||||
|
||||
defaultConfigFileName = "config.toml"
|
||||
defaultGenesisJSONName = "genesis.json"
|
||||
defaultPrivValName = "priv_validator.json"
|
||||
defaultNodeKeyName = "node_key.json"
|
||||
|
||||
defaultConfigFilePath = filepath.Join(defaultConfigDir, defaultConfigFileName)
|
||||
defaultGenesisJSONPath = filepath.Join(defaultConfigDir, defaultGenesisJSONName)
|
||||
defaultPrivValPath = filepath.Join(defaultConfigDir, defaultPrivValName)
|
||||
defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName)
|
||||
)
|
||||
|
||||
// Config defines the top level configuration for a Tendermint node
|
||||
type Config struct {
|
||||
// Top level options use an anonymous struct
|
||||
@ -59,17 +80,18 @@ func (cfg *Config) SetRoot(root string) *Config {
|
||||
|
||||
// BaseConfig defines the base configuration for a Tendermint node
|
||||
type BaseConfig struct {
|
||||
|
||||
// chainID is unexposed and immutable but here for convenience
|
||||
chainID string
|
||||
|
||||
// The root directory for all data.
|
||||
// This should be set in viper so it can unmarshal into this struct
|
||||
RootDir string `mapstructure:"home"`
|
||||
|
||||
// The ID of the chain to join (should be signed with every transaction and vote)
|
||||
ChainID string `mapstructure:"chain_id"`
|
||||
|
||||
// A JSON file containing the initial validator set and other meta data
|
||||
// Path to the JSON file containing the initial validator set and other meta data
|
||||
Genesis string `mapstructure:"genesis_file"`
|
||||
|
||||
// A JSON file containing the private key to use as a validator in the consensus protocol
|
||||
// Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||
PrivValidator string `mapstructure:"priv_validator_file"`
|
||||
|
||||
// A JSON file containing the private key to use for p2p authenticated encryption
|
||||
@ -107,12 +129,16 @@ type BaseConfig struct {
|
||||
DBPath string `mapstructure:"db_dir"`
|
||||
}
|
||||
|
||||
func (c BaseConfig) ChainID() string {
|
||||
return c.chainID
|
||||
}
|
||||
|
||||
// DefaultBaseConfig returns a default base configuration for a Tendermint node
|
||||
func DefaultBaseConfig() BaseConfig {
|
||||
return BaseConfig{
|
||||
Genesis: "genesis.json",
|
||||
PrivValidator: "priv_validator.json",
|
||||
NodeKey: "node_key.json",
|
||||
Genesis: defaultGenesisJSONPath,
|
||||
PrivValidator: defaultPrivValPath,
|
||||
NodeKey: defaultNodeKeyPath,
|
||||
Moniker: defaultMoniker,
|
||||
ProxyApp: "tcp://127.0.0.1:46658",
|
||||
ABCI: "socket",
|
||||
@ -128,7 +154,7 @@ func DefaultBaseConfig() BaseConfig {
|
||||
// TestBaseConfig returns a base configuration for testing a Tendermint node
|
||||
func TestBaseConfig() BaseConfig {
|
||||
conf := DefaultBaseConfig()
|
||||
conf.ChainID = "tendermint_test"
|
||||
conf.chainID = "tendermint_test"
|
||||
conf.ProxyApp = "dummy"
|
||||
conf.FastSync = false
|
||||
conf.DBBackend = "memdb"
|
||||
@ -179,7 +205,7 @@ type RPCConfig struct {
|
||||
// NOTE: This server only supports /broadcast_tx_commit
|
||||
GRPCListenAddress string `mapstructure:"grpc_laddr"`
|
||||
|
||||
// Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
|
||||
// Activate unsafe RPC commands like /dial_persistent_peers and /unsafe_flush_mempool
|
||||
Unsafe bool `mapstructure:"unsafe"`
|
||||
}
|
||||
|
||||
@ -212,8 +238,13 @@ type P2PConfig struct {
|
||||
ListenAddress string `mapstructure:"laddr"`
|
||||
|
||||
// Comma separated list of seed nodes to connect to
|
||||
// We only use these if we can’t connect to peers in the addrbook
|
||||
Seeds string `mapstructure:"seeds"`
|
||||
|
||||
// Comma separated list of persistent peers to connect to
|
||||
// We always connect to these
|
||||
PersistentPeers string `mapstructure:"persistent_peers"`
|
||||
|
||||
// Skip UPNP port forwarding
|
||||
SkipUPNP bool `mapstructure:"skip_upnp"`
|
||||
|
||||
@ -288,7 +319,7 @@ func DefaultMempoolConfig() *MempoolConfig {
|
||||
Recheck: true,
|
||||
RecheckEmpty: true,
|
||||
Broadcast: true,
|
||||
WalPath: "data/mempool.wal",
|
||||
WalPath: filepath.Join(defaultDataDir, "mempool.wal"),
|
||||
}
|
||||
}
|
||||
|
||||
@ -308,7 +339,7 @@ type ConsensusConfig struct {
|
||||
WalLight bool `mapstructure:"wal_light"`
|
||||
walFile string // overrides WalPath if set
|
||||
|
||||
// All timeouts are in ms
|
||||
// All timeouts are in milliseconds
|
||||
TimeoutPropose int `mapstructure:"timeout_propose"`
|
||||
TimeoutProposeDelta int `mapstructure:"timeout_propose_delta"`
|
||||
TimeoutPrevote int `mapstructure:"timeout_prevote"`
|
||||
@ -328,7 +359,7 @@ type ConsensusConfig struct {
|
||||
CreateEmptyBlocks bool `mapstructure:"create_empty_blocks"`
|
||||
CreateEmptyBlocksInterval int `mapstructure:"create_empty_blocks_interval"`
|
||||
|
||||
// Reactor sleep duration parameters are in ms
|
||||
// Reactor sleep duration parameters are in milliseconds
|
||||
PeerGossipSleepDuration int `mapstructure:"peer_gossip_sleep_duration"`
|
||||
PeerQueryMaj23SleepDuration int `mapstructure:"peer_query_maj23_sleep_duration"`
|
||||
}
|
||||
@ -376,7 +407,7 @@ func (cfg *ConsensusConfig) PeerQueryMaj23Sleep() time.Duration {
|
||||
// DefaultConsensusConfig returns a default configuration for the consensus service
|
||||
func DefaultConsensusConfig() *ConsensusConfig {
|
||||
return &ConsensusConfig{
|
||||
WalPath: "data/cs.wal/wal",
|
||||
WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"),
|
||||
WalLight: false,
|
||||
TimeoutPropose: 3000,
|
||||
TimeoutProposeDelta: 500,
|
||||
|
230
config/toml.go
230
config/toml.go
@ -1,52 +1,210 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
|
||||
var configTemplate *template.Template
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
if configTemplate, err = template.New("configFileTemplate").Parse(defaultConfigTemplate); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
/****** these are for production settings ***********/
|
||||
|
||||
func EnsureRoot(rootDir string) {
|
||||
if err := cmn.EnsureDir(rootDir, 0700); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
if err := cmn.EnsureDir(rootDir+"/data", 0700); err != nil {
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), 0700); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), 0700); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
|
||||
configFilePath := path.Join(rootDir, "config.toml")
|
||||
configFilePath := filepath.Join(rootDir, defaultConfigFilePath)
|
||||
|
||||
// Write default config file if missing.
|
||||
if !cmn.FileExists(configFilePath) {
|
||||
cmn.MustWriteFile(configFilePath, []byte(defaultConfig(defaultMoniker)), 0644)
|
||||
writeConfigFile(configFilePath)
|
||||
}
|
||||
}
|
||||
|
||||
var defaultConfigTmpl = `# This is a TOML config file.
|
||||
// XXX: this func should probably be called by cmd/tendermint/commands/init.go
|
||||
// alongside the writing of the genesis.json and priv_validator.json
|
||||
func writeConfigFile(configFilePath string) {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
if err := configTemplate.Execute(&buffer, DefaultConfig()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
cmn.MustWriteFile(configFilePath, buffer.Bytes(), 0644)
|
||||
}
|
||||
|
||||
// Note: any changes to the comments/variables/mapstructure
|
||||
// must be reflected in the appropriate struct in config/config.go
|
||||
const defaultConfigTemplate = `# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
proxy_app = "tcp://127.0.0.1:46658"
|
||||
moniker = "__MONIKER__"
|
||||
fast_sync = true
|
||||
db_backend = "leveldb"
|
||||
log_level = "state:info,*:error"
|
||||
##### main base config options #####
|
||||
|
||||
# TCP or UNIX socket address of the ABCI application,
|
||||
# or the name of an ABCI application compiled in with the Tendermint binary
|
||||
proxy_app = "{{ .BaseConfig.ProxyApp }}"
|
||||
|
||||
# A custom human readable name for this node
|
||||
moniker = "{{ .BaseConfig.Moniker }}"
|
||||
|
||||
# If this node is many blocks behind the tip of the chain, FastSync
|
||||
# allows them to catchup quickly by downloading blocks in parallel
|
||||
# and verifying their commits
|
||||
fast_sync = {{ .BaseConfig.FastSync }}
|
||||
|
||||
# Database backend: leveldb | memdb
|
||||
db_backend = "{{ .BaseConfig.DBBackend }}"
|
||||
|
||||
# Database directory
|
||||
db_path = "{{ .BaseConfig.DBPath }}"
|
||||
|
||||
# Output level for logging, including package level options
|
||||
log_level = "{{ .BaseConfig.LogLevel }}"
|
||||
|
||||
##### additional base config options #####
|
||||
|
||||
# Path to the JSON file containing the initial validator set and other meta data
|
||||
genesis_file = "{{ .BaseConfig.Genesis }}"
|
||||
|
||||
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||
priv_validator_file = "{{ .BaseConfig.PrivValidator }}"
|
||||
|
||||
# Mechanism to connect to the ABCI application: socket | grpc
|
||||
abci = "{{ .BaseConfig.ABCI }}"
|
||||
|
||||
# TCP or UNIX socket address for the profiling server to listen on
|
||||
prof_laddr = "{{ .BaseConfig.ProfListenAddress }}"
|
||||
|
||||
# If true, query the ABCI app on connecting to a new peer
|
||||
# so the app can decide if we should keep the connection or not
|
||||
filter_peers = {{ .BaseConfig.FilterPeers }}
|
||||
|
||||
##### advanced configuration options #####
|
||||
|
||||
##### rpc server configuration options #####
|
||||
[rpc]
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
# TCP or UNIX socket address for the RPC server to listen on
|
||||
laddr = "{{ .RPC.ListenAddress }}"
|
||||
|
||||
# TCP or UNIX socket address for the gRPC server to listen on
|
||||
# NOTE: This server only supports /broadcast_tx_commit
|
||||
grpc_laddr = "{{ .RPC.GRPCListenAddress }}"
|
||||
|
||||
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
|
||||
unsafe = {{ .RPC.Unsafe }}
|
||||
|
||||
##### peer to peer configuration options #####
|
||||
[p2p]
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
seeds = ""
|
||||
`
|
||||
|
||||
func defaultConfig(moniker string) string {
|
||||
return strings.Replace(defaultConfigTmpl, "__MONIKER__", moniker, -1)
|
||||
}
|
||||
# Address to listen for incoming connections
|
||||
laddr = "{{ .P2P.ListenAddress }}"
|
||||
|
||||
# Comma separated list of seed nodes to connect to
|
||||
seeds = ""
|
||||
|
||||
# Comma separated list of nodes to keep persistent connections to
|
||||
persistent_peers = ""
|
||||
|
||||
# Path to address book
|
||||
addr_book_file = "{{ .P2P.AddrBook }}"
|
||||
|
||||
# Set true for strict address routability rules
|
||||
addr_book_strict = {{ .P2P.AddrBookStrict }}
|
||||
|
||||
# Time to wait before flushing messages out on the connection, in ms
|
||||
flush_throttle_timeout = {{ .P2P.FlushThrottleTimeout }}
|
||||
|
||||
# Maximum number of peers to connect to
|
||||
max_num_peers = {{ .P2P.MaxNumPeers }}
|
||||
|
||||
# Maximum size of a message packet payload, in bytes
|
||||
max_msg_packet_payload_size = {{ .P2P.MaxMsgPacketPayloadSize }}
|
||||
|
||||
# Rate at which packets can be sent, in bytes/second
|
||||
send_rate = {{ .P2P.SendRate }}
|
||||
|
||||
# Rate at which packets can be received, in bytes/second
|
||||
recv_rate = {{ .P2P.RecvRate }}
|
||||
|
||||
##### mempool configuration options #####
|
||||
[mempool]
|
||||
|
||||
recheck = {{ .Mempool.Recheck }}
|
||||
recheck_empty = {{ .Mempool.RecheckEmpty }}
|
||||
broadcast = {{ .Mempool.Broadcast }}
|
||||
wal_dir = "{{ .Mempool.WalPath }}"
|
||||
|
||||
##### consensus configuration options #####
|
||||
[consensus]
|
||||
|
||||
wal_file = "{{ .Consensus.WalPath }}"
|
||||
wal_light = {{ .Consensus.WalLight }}
|
||||
|
||||
# All timeouts are in milliseconds
|
||||
timeout_propose = {{ .Consensus.TimeoutPropose }}
|
||||
timeout_propose_delta = {{ .Consensus.TimeoutProposeDelta }}
|
||||
timeout_prevote = {{ .Consensus.TimeoutPrevote }}
|
||||
timeout_prevote_delta = {{ .Consensus.TimeoutPrevoteDelta }}
|
||||
timeout_precommit = {{ .Consensus.TimeoutPrecommit }}
|
||||
timeout_precommit_delta = {{ .Consensus.TimeoutPrecommitDelta }}
|
||||
timeout_commit = {{ .Consensus.TimeoutCommit }}
|
||||
|
||||
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
|
||||
skip_timeout_commit = {{ .Consensus.SkipTimeoutCommit }}
|
||||
|
||||
# BlockSize
|
||||
max_block_size_txs = {{ .Consensus.MaxBlockSizeTxs }}
|
||||
max_block_size_bytes = {{ .Consensus.MaxBlockSizeBytes }}
|
||||
|
||||
# EmptyBlocks mode and possible interval between empty blocks in seconds
|
||||
create_empty_blocks = {{ .Consensus.CreateEmptyBlocks }}
|
||||
create_empty_blocks_interval = {{ .Consensus.CreateEmptyBlocksInterval }}
|
||||
|
||||
# Reactor sleep duration parameters are in milliseconds
|
||||
peer_gossip_sleep_duration = {{ .Consensus.PeerGossipSleepDuration }}
|
||||
peer_query_maj23_sleep_duration = {{ .Consensus.PeerQueryMaj23SleepDuration }}
|
||||
|
||||
##### transactions indexer configuration options #####
|
||||
[tx_index]
|
||||
|
||||
# What indexer to use for transactions
|
||||
#
|
||||
# Options:
|
||||
# 1) "null" (default)
|
||||
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
indexer = "{{ .TxIndex.Indexer }}"
|
||||
|
||||
# Comma-separated list of tags to index (by default the only tag is tx hash)
|
||||
#
|
||||
# It's recommended to index only a subset of tags due to possible memory
|
||||
# bloat. This is, of course, depends on the indexer's DB and the volume of
|
||||
# transactions.
|
||||
index_tags = "{{ .TxIndex.IndexTags }}"
|
||||
|
||||
# When set to true, tells indexer to index all tags. Note this may be not
|
||||
# desirable (see the comment above). IndexTags has a precedence over
|
||||
# IndexAllTags (i.e. when given both, IndexTags will be indexed).
|
||||
index_all_tags = {{ .TxIndex.IndexAllTags }}
|
||||
`
|
||||
|
||||
/****** these are for test settings ***********/
|
||||
|
||||
@ -69,17 +227,21 @@ func ResetTestRoot(testName string) *Config {
|
||||
if err := cmn.EnsureDir(rootDir, 0700); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
if err := cmn.EnsureDir(rootDir+"/data", 0700); err != nil {
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), 0700); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), 0700); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
|
||||
configFilePath := path.Join(rootDir, "config.toml")
|
||||
genesisFilePath := path.Join(rootDir, "genesis.json")
|
||||
privFilePath := path.Join(rootDir, "priv_validator.json")
|
||||
baseConfig := DefaultBaseConfig()
|
||||
configFilePath := filepath.Join(rootDir, defaultConfigFilePath)
|
||||
genesisFilePath := filepath.Join(rootDir, baseConfig.Genesis)
|
||||
privFilePath := filepath.Join(rootDir, baseConfig.PrivValidator)
|
||||
|
||||
// Write default config file if missing.
|
||||
if !cmn.FileExists(configFilePath) {
|
||||
cmn.MustWriteFile(configFilePath, []byte(testConfig(defaultMoniker)), 0644)
|
||||
writeConfigFile(configFilePath)
|
||||
}
|
||||
if !cmn.FileExists(genesisFilePath) {
|
||||
cmn.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644)
|
||||
@ -91,28 +253,6 @@ func ResetTestRoot(testName string) *Config {
|
||||
return config
|
||||
}
|
||||
|
||||
var testConfigTmpl = `# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
proxy_app = "dummy"
|
||||
moniker = "__MONIKER__"
|
||||
fast_sync = false
|
||||
db_backend = "memdb"
|
||||
log_level = "info"
|
||||
|
||||
[rpc]
|
||||
laddr = "tcp://0.0.0.0:36657"
|
||||
|
||||
[p2p]
|
||||
laddr = "tcp://0.0.0.0:36656"
|
||||
seeds = ""
|
||||
`
|
||||
|
||||
func testConfig(moniker string) (testConfig string) {
|
||||
testConfig = strings.Replace(testConfigTmpl, "__MONIKER__", moniker, -1)
|
||||
return
|
||||
}
|
||||
|
||||
var testGenesis = `{
|
||||
"genesis_time": "0001-01-01T00:00:00.000Z",
|
||||
"chain_id": "tendermint_test",
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -19,7 +20,7 @@ func ensureFiles(t *testing.T, rootDir string, files ...string) {
|
||||
}
|
||||
|
||||
func TestEnsureRoot(t *testing.T) {
|
||||
assert, require := assert.New(t), require.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
// setup temp dir for test
|
||||
tmpDir, err := ioutil.TempDir("", "config-test")
|
||||
@ -30,15 +31,18 @@ func TestEnsureRoot(t *testing.T) {
|
||||
EnsureRoot(tmpDir)
|
||||
|
||||
// make sure config is set properly
|
||||
data, err := ioutil.ReadFile(filepath.Join(tmpDir, "config.toml"))
|
||||
data, err := ioutil.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath))
|
||||
require.Nil(err)
|
||||
assert.Equal([]byte(defaultConfig(defaultMoniker)), data)
|
||||
|
||||
if !checkConfig(string(data)) {
|
||||
t.Fatalf("config file missing some information")
|
||||
}
|
||||
|
||||
ensureFiles(t, tmpDir, "data")
|
||||
}
|
||||
|
||||
func TestEnsureTestRoot(t *testing.T) {
|
||||
assert, require := assert.New(t), require.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
testName := "ensureTestRoot"
|
||||
|
||||
@ -47,11 +51,44 @@ func TestEnsureTestRoot(t *testing.T) {
|
||||
rootDir := cfg.RootDir
|
||||
|
||||
// make sure config is set properly
|
||||
data, err := ioutil.ReadFile(filepath.Join(rootDir, "config.toml"))
|
||||
data, err := ioutil.ReadFile(filepath.Join(rootDir, defaultConfigFilePath))
|
||||
require.Nil(err)
|
||||
assert.Equal([]byte(testConfig(defaultMoniker)), data)
|
||||
|
||||
if !checkConfig(string(data)) {
|
||||
t.Fatalf("config file missing some information")
|
||||
}
|
||||
|
||||
// TODO: make sure the cfg returned and testconfig are the same!
|
||||
|
||||
ensureFiles(t, rootDir, "data", "genesis.json", "priv_validator.json")
|
||||
baseConfig := DefaultBaseConfig()
|
||||
ensureFiles(t, rootDir, defaultDataDir, baseConfig.Genesis, baseConfig.PrivValidator)
|
||||
}
|
||||
|
||||
func checkConfig(configFile string) bool {
|
||||
var valid bool
|
||||
|
||||
// list of words we expect in the config
|
||||
var elems = []string{
|
||||
"moniker",
|
||||
"seeds",
|
||||
"proxy_app",
|
||||
"fast_sync",
|
||||
"create_empty_blocks",
|
||||
"peer",
|
||||
"timeout",
|
||||
"broadcast",
|
||||
"send",
|
||||
"addr",
|
||||
"wal",
|
||||
"propose",
|
||||
"max",
|
||||
"genesis",
|
||||
}
|
||||
for _, e := range elems {
|
||||
if !strings.Contains(configFile, e) {
|
||||
valid = false
|
||||
} else {
|
||||
valid = true
|
||||
}
|
||||
}
|
||||
return valid
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ func (vs *validatorStub) signVote(voteType byte, hash []byte, header types.PartS
|
||||
Type: voteType,
|
||||
BlockID: types.BlockID{hash, header},
|
||||
}
|
||||
err := vs.PrivValidator.SignVote(config.ChainID, vote)
|
||||
err := vs.PrivValidator.SignVote(config.ChainID(), vote)
|
||||
return vote, err
|
||||
}
|
||||
|
||||
@ -129,7 +129,7 @@ func decideProposal(cs1 *ConsensusState, vs *validatorStub, height int64, round
|
||||
// Make proposal
|
||||
polRound, polBlockID := cs1.Votes.POLInfo()
|
||||
proposal = types.NewProposal(height, round, blockParts.Header(), polRound, polBlockID)
|
||||
if err := vs.SignProposal(config.ChainID, proposal); err != nil {
|
||||
if err := vs.SignProposal(cs1.state.ChainID, proposal); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
@ -426,9 +426,10 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
|
||||
privValidators[i] = privVal
|
||||
}
|
||||
sort.Sort(types.PrivValidatorsByAddress(privValidators))
|
||||
|
||||
return &types.GenesisDoc{
|
||||
GenesisTime: time.Now(),
|
||||
ChainID: config.ChainID,
|
||||
ChainID: config.ChainID(),
|
||||
Validators: validators,
|
||||
}, privValidators
|
||||
}
|
||||
|
@ -204,7 +204,7 @@ func TestBadProposal(t *testing.T) {
|
||||
propBlock.AppHash = stateHash
|
||||
propBlockParts := propBlock.MakePartSet(partSize)
|
||||
proposal := types.NewProposal(vs2.Height, round, propBlockParts.Header(), -1, types.BlockID{})
|
||||
if err := vs2.SignProposal(config.ChainID, proposal); err != nil {
|
||||
if err := vs2.SignProposal(config.ChainID(), proposal); err != nil {
|
||||
t.Fatal("failed to sign bad proposal", err)
|
||||
}
|
||||
|
||||
@ -900,7 +900,7 @@ func TestLockPOLSafety2(t *testing.T) {
|
||||
|
||||
// in round 2 we see the polkad block from round 0
|
||||
newProp := types.NewProposal(height, 2, propBlockParts0.Header(), 0, propBlockID1)
|
||||
if err := vs3.SignProposal(config.ChainID, newProp); err != nil {
|
||||
if err := vs3.SignProposal(config.ChainID(), newProp); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cs1.SetProposalAndBlock(newProp, propBlock0, propBlockParts0, "some peer"); err != nil {
|
||||
|
@ -18,7 +18,7 @@ func init() {
|
||||
func TestPeerCatchupRounds(t *testing.T) {
|
||||
valSet, privVals := types.RandValidatorSet(10, 1)
|
||||
|
||||
hvs := NewHeightVoteSet(config.ChainID, 1, valSet)
|
||||
hvs := NewHeightVoteSet(config.ChainID(), 1, valSet)
|
||||
|
||||
vote999_0 := makeVoteHR(t, 1, 999, privVals, 0)
|
||||
added, err := hvs.AddVote(vote999_0, "peer1")
|
||||
@ -59,7 +59,7 @@ func makeVoteHR(t *testing.T, height int64, round int, privVals []*types.PrivVal
|
||||
Type: types.VoteTypePrecommit,
|
||||
BlockID: types.BlockID{[]byte("fakehash"), types.PartSetHeader{}},
|
||||
}
|
||||
chainID := config.ChainID
|
||||
chainID := config.ChainID()
|
||||
err := privVal.SignVote(chainID, vote)
|
||||
if err != nil {
|
||||
panic(cmn.Fmt("Error signing vote: %v", err))
|
||||
|
@ -66,14 +66,56 @@ The most important messages are ``deliver_tx``, ``check_tx``, and
|
||||
``commit``, but there are others for convenience, configuration, and
|
||||
information purposes.
|
||||
|
||||
Let's start a dummy application, which was installed at the same time as
|
||||
``abci-cli`` above. The dummy just stores transactions in a merkle tree:
|
||||
We'll start a dummy application, which was installed at the same time as
|
||||
``abci-cli`` above. The dummy just stores transactions in a merkle tree.
|
||||
|
||||
Its code can be found `here <https://github.com/tendermint/abci/blob/master/cmd/abci-cli/abci-cli.go>`__ and looks like:
|
||||
|
||||
.. container:: toggle
|
||||
|
||||
.. container:: header
|
||||
|
||||
**Show/Hide Dummy Example**
|
||||
|
||||
.. code-block:: go
|
||||
|
||||
func cmdDummy(cmd *cobra.Command, args []string) error {
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
// Create the application - in memory or persisted to disk
|
||||
var app types.Application
|
||||
if flagPersist == "" {
|
||||
app = dummy.NewDummyApplication()
|
||||
} else {
|
||||
app = dummy.NewPersistentDummyApplication(flagPersist)
|
||||
app.(*dummy.PersistentDummyApplication).SetLogger(logger.With("module", "dummy"))
|
||||
}
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddrD, flagAbci, app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := srv.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait forever
|
||||
cmn.TrapSignal(func() {
|
||||
// Cleanup
|
||||
srv.Stop()
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
Start by running:
|
||||
|
||||
::
|
||||
|
||||
abci-cli dummy
|
||||
|
||||
In another terminal, run
|
||||
And in another terminal, run
|
||||
|
||||
::
|
||||
|
||||
@ -187,6 +229,41 @@ Counter - Another Example
|
||||
Now that we've got the hang of it, let's try another application, the
|
||||
"counter" app.
|
||||
|
||||
Like the dummy app, its code can be found `here <https://github.com/tendermint/abci/blob/master/cmd/abci-cli/abci-cli.go>`__ and looks like:
|
||||
|
||||
.. container:: toggle
|
||||
|
||||
.. container:: header
|
||||
|
||||
**Show/Hide Counter Example**
|
||||
|
||||
.. code-block:: go
|
||||
|
||||
func cmdCounter(cmd *cobra.Command, args []string) error {
|
||||
|
||||
app := counter.NewCounterApplication(flagSerial)
|
||||
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddrC, flagAbci, app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := srv.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait forever
|
||||
cmn.TrapSignal(func() {
|
||||
// Cleanup
|
||||
srv.Stop()
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
The counter app doesn't use a Merkle tree, it just counts how many times
|
||||
we've sent a transaction, asked for a hash, or committed the state. The
|
||||
result of ``commit`` is just the number of transactions sent.
|
||||
@ -261,7 +338,7 @@ But the ultimate flexibility comes from being able to write the
|
||||
application easily in any language.
|
||||
|
||||
We have implemented the counter in a number of languages (see the
|
||||
example directory).
|
||||
`example directory <https://github.com/tendermint/abci/tree/master/example`__).
|
||||
|
||||
To run the Node JS version, ``cd`` to ``example/js`` and run
|
||||
|
||||
@ -289,4 +366,4 @@ its own pattern of messages.
|
||||
For more information, see the `application developers
|
||||
guide <./app-development.html>`__. For examples of running an ABCI
|
||||
app with Tendermint, see the `getting started
|
||||
guide <./getting-started.html>`__.
|
||||
guide <./getting-started.html>`__. Next is the ABCI specification.
|
||||
|
45
docs/conf.py
45
docs/conf.py
@ -171,29 +171,38 @@ texinfo_documents = [
|
||||
'Database'),
|
||||
]
|
||||
|
||||
repo = "https://raw.githubusercontent.com/tendermint/tools/"
|
||||
branch = "master"
|
||||
# ---- customization -------------------------
|
||||
|
||||
tools = "./tools"
|
||||
assets = tools + "/assets"
|
||||
tools_repo = "https://raw.githubusercontent.com/tendermint/tools/"
|
||||
tools_branch = "master"
|
||||
|
||||
if os.path.isdir(tools) != True:
|
||||
os.mkdir(tools)
|
||||
if os.path.isdir(assets) != True:
|
||||
os.mkdir(assets)
|
||||
tools_dir = "./tools"
|
||||
assets_dir = tools_dir + "/assets"
|
||||
|
||||
urllib.urlretrieve(repo+branch+'/ansible/README.rst', filename=tools+'/ansible.rst')
|
||||
urllib.urlretrieve(repo+branch+'/ansible/assets/a_plus_t.png', filename=assets+'/a_plus_t.png')
|
||||
if os.path.isdir(tools_dir) != True:
|
||||
os.mkdir(tools_dir)
|
||||
if os.path.isdir(assets_dir) != True:
|
||||
os.mkdir(assets_dir)
|
||||
|
||||
urllib.urlretrieve(repo+branch+'/docker/README.rst', filename=tools+'/docker.rst')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/ansible/README.rst', filename=tools_dir+'/ansible.rst')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/ansible/assets/a_plus_t.png', filename=assets_dir+'/a_plus_t.png')
|
||||
|
||||
urllib.urlretrieve(repo+branch+'/mintnet-kubernetes/README.rst', filename=tools+'/mintnet-kubernetes.rst')
|
||||
urllib.urlretrieve(repo+branch+'/mintnet-kubernetes/assets/gce1.png', filename=assets+'/gce1.png')
|
||||
urllib.urlretrieve(repo+branch+'/mintnet-kubernetes/assets/gce2.png', filename=assets+'/gce2.png')
|
||||
urllib.urlretrieve(repo+branch+'/mintnet-kubernetes/assets/statefulset.png', filename=assets+'/statefulset.png')
|
||||
urllib.urlretrieve(repo+branch+'/mintnet-kubernetes/assets/t_plus_k.png', filename=assets+'/t_plus_k.png')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/docker/README.rst', filename=tools_dir+'/docker.rst')
|
||||
|
||||
urllib.urlretrieve(repo+branch+'/terraform-digitalocean/README.rst', filename=tools+'/terraform-digitalocean.rst')
|
||||
urllib.urlretrieve(repo+branch+'/tm-bench/README.rst', filename=tools+'/benchmarking-and-monitoring.rst')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/README.rst', filename=tools_dir+'/mintnet-kubernetes.rst')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/gce1.png', filename=assets_dir+'/gce1.png')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/gce2.png', filename=assets_dir+'/gce2.png')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/statefulset.png', filename=assets_dir+'/statefulset.png')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/t_plus_k.png', filename=assets_dir+'/t_plus_k.png')
|
||||
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/terraform-digitalocean/README.rst', filename=tools_dir+'/terraform-digitalocean.rst')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/tm-bench/README.rst', filename=tools_dir+'/benchmarking-and-monitoring.rst')
|
||||
# the readme for below is included in tm-bench
|
||||
# urllib.urlretrieve('https://raw.githubusercontent.com/tendermint/tools/master/tm-monitor/README.rst', filename='tools/tm-monitor.rst')
|
||||
|
||||
#### abci spec #################################
|
||||
|
||||
abci_repo = "https://raw.githubusercontent.com/tendermint/abci/"
|
||||
abci_branch = "spec-docs"
|
||||
|
||||
urllib.urlretrieve(abci_repo+abci_branch+'/specification.rst', filename='abci-spec.rst')
|
||||
|
@ -13,7 +13,7 @@ It's relatively easy to setup a Tendermint cluster manually. The only
|
||||
requirements for a particular Tendermint node are a private key for the
|
||||
validator, stored as ``priv_validator.json``, and a list of the public
|
||||
keys of all validators, stored as ``genesis.json``. These files should
|
||||
be stored in ``~/.tendermint``, or wherever the ``$TMHOME`` variable
|
||||
be stored in ``~/.tendermint/config``, or wherever the ``$TMHOME`` variable
|
||||
might be set to.
|
||||
|
||||
Here are the steps to setting up a testnet manually:
|
||||
@ -24,13 +24,13 @@ Here are the steps to setting up a testnet manually:
|
||||
``tendermint gen_validator``
|
||||
4) Compile a list of public keys for each validator into a
|
||||
``genesis.json`` file.
|
||||
5) Run ``tendermint node --p2p.seeds=< seed addresses >`` on each node,
|
||||
where ``< seed addresses >`` is a comma separated list of the IP:PORT
|
||||
5) Run ``tendermint node --p2p.persistent_peers=< peer addresses >`` on each node,
|
||||
where ``< peer addresses >`` is a comma separated list of the IP:PORT
|
||||
combination for each node. The default port for Tendermint is
|
||||
``46656``. Thus, if the IP addresses of your nodes were
|
||||
``192.168.0.1, 192.168.0.2, 192.168.0.3, 192.168.0.4``, the command
|
||||
would look like:
|
||||
``tendermint node --p2p.seeds=192.168.0.1:46656,192.168.0.2:46656,192.168.0.3:46656,192.168.0.4:46656``.
|
||||
``tendermint node --p2p.persistent_peers=192.168.0.1:46656,192.168.0.2:46656,192.168.0.3:46656,192.168.0.4:46656``.
|
||||
|
||||
After a few seconds, all the nodes should connect to eachother and start
|
||||
making blocks! For more information, see the Tendermint Networks section
|
||||
|
139
docs/examples/getting-started.md
Normal file
139
docs/examples/getting-started.md
Normal file
@ -0,0 +1,139 @@
|
||||
# Tendermint
|
||||
|
||||
## Overview
|
||||
|
||||
This is a quick start guide. If you have a vague idea about how Tendermint works
|
||||
and want to get started right away, continue. Otherwise, [review the documentation](http://tendermint.readthedocs.io/en/master/)
|
||||
|
||||
## Install
|
||||
|
||||
### Quick Install
|
||||
|
||||
On a fresh Ubuntu 16.04 machine can be done with [this script](https://git.io/vNLfY), like so:
|
||||
|
||||
```
|
||||
curl -L https://git.io/vNLfY | bash
|
||||
source ~/.profile
|
||||
```
|
||||
|
||||
WARNING: do not run the above on your local machine.
|
||||
|
||||
The script is also used to facilitate cluster deployment below.
|
||||
|
||||
### Manual Install
|
||||
|
||||
Requires:
|
||||
- `go` minimum version 1.9.2
|
||||
- `$GOPATH` set and `$GOPATH/bin` on your $PATH (see https://github.com/tendermint/tendermint/wiki/Setting-GOPATH)
|
||||
|
||||
To install Tendermint, run:
|
||||
|
||||
```
|
||||
go get github.com/tendermint/tendermint
|
||||
cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
make get_vendor_deps
|
||||
make install
|
||||
```
|
||||
|
||||
Confirm installation:
|
||||
|
||||
```
|
||||
$ tendermint version
|
||||
0.15.0-381fe19
|
||||
```
|
||||
|
||||
## Initialization
|
||||
|
||||
Running:
|
||||
|
||||
```
|
||||
tendermint init
|
||||
```
|
||||
|
||||
will create the required files for a single, local node.
|
||||
|
||||
These files are found in `$HOME/.tendermint`:
|
||||
|
||||
```
|
||||
$ ls $HOME/.tendermint
|
||||
|
||||
config.toml data genesis.json priv_validator.json
|
||||
```
|
||||
|
||||
For a single, local node, no further configuration is required.
|
||||
Configuring a cluster is covered further below.
|
||||
|
||||
## Local Node
|
||||
|
||||
Start tendermint with a simple in-process application:
|
||||
|
||||
```
|
||||
tendermint node --proxy_app=dummy
|
||||
```
|
||||
|
||||
and blocks will start to stream in:
|
||||
|
||||
```
|
||||
I[01-06|01:45:15.592] Executed block module=state height=1 validTxs=0 invalidTxs=0
|
||||
I[01-06|01:45:15.624] Committed state module=state height=1 txs=0 appHash=
|
||||
```
|
||||
|
||||
Check the status with:
|
||||
|
||||
```
|
||||
curl -s localhost:46657/status
|
||||
```
|
||||
|
||||
### Sending Transactions
|
||||
|
||||
With the dummy app running, we can send transactions:
|
||||
|
||||
```
|
||||
curl -s 'localhost:46657/broadcast_tx_commit?tx="abcd"'
|
||||
```
|
||||
|
||||
and check that it worked with:
|
||||
|
||||
```
|
||||
curl -s 'localhost:46657/abci_query?data="abcd"'
|
||||
```
|
||||
|
||||
We can send transactions with a key:value store:
|
||||
|
||||
```
|
||||
curl -s 'localhost:46657/broadcast_tx_commit?tx="name=satoshi"'
|
||||
```
|
||||
|
||||
and query the key:
|
||||
|
||||
```
|
||||
curl -s 'localhost:46657/abci_query?data="name"'
|
||||
```
|
||||
|
||||
where the value is returned in hex.
|
||||
|
||||
## Cluster of Nodes
|
||||
|
||||
First create four Ubuntu cloud machines. The following was testing on Digital Ocean Ubuntu 16.04 x64 (3GB/1CPU, 20GB SSD). We'll refer to their respective IP addresses below as IP1, IP2, IP3, IP4.
|
||||
|
||||
Then, `ssh` into each machine, and `curl` then execute [this script](https://git.io/vNLfY):
|
||||
|
||||
```
|
||||
curl -L https://git.io/vNLfY | bash
|
||||
source ~/.profile
|
||||
```
|
||||
|
||||
This will install `go` and other dependencies, get the Tendermint source code, then compile the `tendermint` binary.
|
||||
|
||||
Next, `cd` into `docs/examples`. Each command below should be run from each node, in sequence:
|
||||
|
||||
```
|
||||
tendermint node --home ./node1 --proxy_app=dummy
|
||||
tendermint node --home ./node2 --proxy_app=dummy --p2p.seeds IP1:46656
|
||||
tendermint node --home ./node3 --proxy_app=dummy --p2p.seeds IP1:46656,IP2:46656
|
||||
tendermint node --home ./node4 --proxy_app=dummy --p2p.seeds IP1:46656,IP2:46656,IP3:46656
|
||||
```
|
||||
|
||||
Note that after the third node is started, blocks will start to stream in because >2/3 of validators (defined in the `genesis.json` have come online). Seeds can also be specified in the `config.toml`. See [this PR](https://github.com/tendermint/tendermint/pull/792) for more information about configuration options.
|
||||
|
||||
Transactions can then be sent as covered in the single, local node example above.
|
32
docs/examples/install_tendermint.sh
Normal file
32
docs/examples/install_tendermint.sh
Normal file
@ -0,0 +1,32 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# XXX: this script is meant to be used only on a fresh Ubuntu 16.04 instance
|
||||
# and has only been tested on Digital Ocean
|
||||
|
||||
# get and unpack golang
|
||||
curl -O https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz
|
||||
tar -xvf go1.9.2.linux-amd64.tar.gz
|
||||
|
||||
apt install make
|
||||
|
||||
## move go and add binary to path
|
||||
mv go /usr/local
|
||||
echo "export PATH=\$PATH:/usr/local/go/bin" >> ~/.profile
|
||||
|
||||
## create the GOPATH directory, set GOPATH and put on PATH
|
||||
mkdir goApps
|
||||
echo "export GOPATH=/root/goApps" >> ~/.profile
|
||||
echo "export PATH=\$PATH:\$GOPATH/bin" >> ~/.profile
|
||||
|
||||
source ~/.profile
|
||||
|
||||
## get the code and move into it
|
||||
REPO=github.com/tendermint/tendermint
|
||||
go get $REPO
|
||||
cd $GOPATH/src/$REPO
|
||||
|
||||
## build
|
||||
git checkout v0.15.0
|
||||
make get_tools
|
||||
make get_vendor_deps
|
||||
make install
|
15
docs/examples/node1/config.toml
Normal file
15
docs/examples/node1/config.toml
Normal file
@ -0,0 +1,15 @@
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
proxy_app = "tcp://127.0.0.1:46658"
|
||||
moniker = "penguin"
|
||||
fast_sync = true
|
||||
db_backend = "leveldb"
|
||||
log_level = "state:info,*:error"
|
||||
|
||||
[rpc]
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
[p2p]
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
seeds = ""
|
42
docs/examples/node1/genesis.json
Normal file
42
docs/examples/node1/genesis.json
Normal file
@ -0,0 +1,42 @@
|
||||
{
|
||||
"genesis_time":"0001-01-01T00:00:00Z",
|
||||
"chain_id":"test-chain-wt7apy",
|
||||
"validators":[
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node1"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node2"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node3"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node4"
|
||||
}
|
||||
],
|
||||
"app_hash":""
|
||||
}
|
15
docs/examples/node1/priv_validator.json
Normal file
15
docs/examples/node1/priv_validator.json
Normal file
@ -0,0 +1,15 @@
|
||||
{
|
||||
"address":"4DC2756029CE0D8F8C6C3E4C3CE6EE8C30AF352F",
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
|
||||
},
|
||||
"last_height":0,
|
||||
"last_round":0,
|
||||
"last_step":0,
|
||||
"last_signature":null,
|
||||
"priv_key":{
|
||||
"type":"ed25519",
|
||||
"data":"4D3648E1D93C8703E436BFF814728B6BD270CFDFD686DF5385E8ACBEB7BE2D7DF08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
|
||||
}
|
||||
}
|
15
docs/examples/node2/config.toml
Normal file
15
docs/examples/node2/config.toml
Normal file
@ -0,0 +1,15 @@
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
proxy_app = "tcp://127.0.0.1:46658"
|
||||
moniker = "penguin"
|
||||
fast_sync = true
|
||||
db_backend = "leveldb"
|
||||
log_level = "state:info,*:error"
|
||||
|
||||
[rpc]
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
[p2p]
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
seeds = ""
|
42
docs/examples/node2/genesis.json
Normal file
42
docs/examples/node2/genesis.json
Normal file
@ -0,0 +1,42 @@
|
||||
{
|
||||
"genesis_time":"0001-01-01T00:00:00Z",
|
||||
"chain_id":"test-chain-wt7apy",
|
||||
"validators":[
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node1"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node2"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node3"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node4"
|
||||
}
|
||||
],
|
||||
"app_hash":""
|
||||
}
|
15
docs/examples/node2/priv_validator.json
Normal file
15
docs/examples/node2/priv_validator.json
Normal file
@ -0,0 +1,15 @@
|
||||
{
|
||||
"address": "DD6C63A762608A9DDD4A845657743777F63121D6",
|
||||
"pub_key": {
|
||||
"type": "ed25519",
|
||||
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
|
||||
},
|
||||
"last_height": 0,
|
||||
"last_round": 0,
|
||||
"last_step": 0,
|
||||
"last_signature": null,
|
||||
"priv_key": {
|
||||
"type": "ed25519",
|
||||
"data": "7B0DE666FF5E9B437D284BCE767F612381890C018B93B0A105D2E829A568DA6FA8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
|
||||
}
|
||||
}
|
15
docs/examples/node3/config.toml
Normal file
15
docs/examples/node3/config.toml
Normal file
@ -0,0 +1,15 @@
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
proxy_app = "tcp://127.0.0.1:46658"
|
||||
moniker = "penguin"
|
||||
fast_sync = true
|
||||
db_backend = "leveldb"
|
||||
log_level = "state:info,*:error"
|
||||
|
||||
[rpc]
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
[p2p]
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
seeds = ""
|
42
docs/examples/node3/genesis.json
Normal file
42
docs/examples/node3/genesis.json
Normal file
@ -0,0 +1,42 @@
|
||||
{
|
||||
"genesis_time":"0001-01-01T00:00:00Z",
|
||||
"chain_id":"test-chain-wt7apy",
|
||||
"validators":[
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node1"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node2"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node3"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node4"
|
||||
}
|
||||
],
|
||||
"app_hash":""
|
||||
}
|
15
docs/examples/node3/priv_validator.json
Normal file
15
docs/examples/node3/priv_validator.json
Normal file
@ -0,0 +1,15 @@
|
||||
{
|
||||
"address": "6D6A1E313B407B5474106CA8759C976B777AB659",
|
||||
"pub_key": {
|
||||
"type": "ed25519",
|
||||
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
|
||||
},
|
||||
"last_height": 0,
|
||||
"last_round": 0,
|
||||
"last_step": 0,
|
||||
"last_signature": null,
|
||||
"priv_key": {
|
||||
"type": "ed25519",
|
||||
"data": "622432A370111A5C25CFE121E163FE709C9D5C95F551EDBD7A2C69A8545C9B76E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
|
||||
}
|
||||
}
|
15
docs/examples/node4/config.toml
Normal file
15
docs/examples/node4/config.toml
Normal file
@ -0,0 +1,15 @@
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
proxy_app = "tcp://127.0.0.1:46658"
|
||||
moniker = "penguin"
|
||||
fast_sync = true
|
||||
db_backend = "leveldb"
|
||||
log_level = "state:info,*:error"
|
||||
|
||||
[rpc]
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
[p2p]
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
seeds = ""
|
42
docs/examples/node4/genesis.json
Normal file
42
docs/examples/node4/genesis.json
Normal file
@ -0,0 +1,42 @@
|
||||
{
|
||||
"genesis_time":"0001-01-01T00:00:00Z",
|
||||
"chain_id":"test-chain-wt7apy",
|
||||
"validators":[
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node1"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node2"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node3"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node4"
|
||||
}
|
||||
],
|
||||
"app_hash":""
|
||||
}
|
15
docs/examples/node4/priv_validator.json
Normal file
15
docs/examples/node4/priv_validator.json
Normal file
@ -0,0 +1,15 @@
|
||||
{
|
||||
"address": "829A9663611D3DD88A3D84EA0249679D650A0755",
|
||||
"pub_key": {
|
||||
"type": "ed25519",
|
||||
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
|
||||
},
|
||||
"last_height": 0,
|
||||
"last_round": 0,
|
||||
"last_step": 0,
|
||||
"last_signature": null,
|
||||
"priv_key": {
|
||||
"type": "ed25519",
|
||||
"data": "0A604D1C9AE94A50150BF39E603239092F9392E4773F4D8F4AC1D86E6438E89E2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
|
||||
}
|
||||
}
|
@ -53,6 +53,7 @@ Tendermint 102
|
||||
:maxdepth: 2
|
||||
|
||||
abci-cli.rst
|
||||
abci-spec.rst
|
||||
app-architecture.rst
|
||||
app-development.rst
|
||||
how-to-read-logs.rst
|
||||
|
@ -1,58 +1,173 @@
|
||||
Configuration
|
||||
=============
|
||||
|
||||
TendermintCore can be configured via a TOML file in
|
||||
``$TMHOME/config.toml``. Some of these parameters can be overridden by
|
||||
command-line flags.
|
||||
Tendermint Core can be configured via a TOML file in
|
||||
``$TMHOME/config/config.toml``. Some of these parameters can be overridden by
|
||||
command-line flags. For most users, the options in the ``##### main
|
||||
base configuration options #####`` are intended to be modified while
|
||||
config options further below are intended for advance power users.
|
||||
|
||||
Config parameters
|
||||
~~~~~~~~~~~~~~~~~
|
||||
Config options
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
The main config parameters are defined
|
||||
`here <https://github.com/tendermint/tendermint/blob/master/config/config.go>`__.
|
||||
The default configuration file create by ``tendermint init`` has all
|
||||
the parameters set with their default values. It will look something
|
||||
like the file below, however, double check by inspecting the
|
||||
``config.toml`` created with your version of ``tendermint`` installed:
|
||||
|
||||
- ``abci``: ABCI transport (socket \| grpc). *Default*: ``socket``
|
||||
- ``db_backend``: Database backend for the blockchain and
|
||||
TendermintCore state. ``leveldb`` or ``memdb``. *Default*:
|
||||
``"leveldb"``
|
||||
- ``db_dir``: Database dir. *Default*: ``"$TMHOME/data"``
|
||||
- ``fast_sync``: Whether to sync faster from the block pool. *Default*:
|
||||
``true``
|
||||
- ``genesis_file``: The location of the genesis file. *Default*:
|
||||
``"$TMHOME/genesis.json"``
|
||||
- ``log_level``: *Default*: ``"state:info,*:error"``
|
||||
- ``moniker``: Name of this node. *Default*: the host name or ``"anonymous"``
|
||||
if runtime fails to get the host name
|
||||
- ``priv_validator_file``: Validator private key file. *Default*:
|
||||
``"$TMHOME/priv_validator.json"``
|
||||
- ``prof_laddr``: Profile listen address. *Default*: ``""``
|
||||
- ``proxy_app``: The ABCI app endpoint. *Default*:
|
||||
``"tcp://127.0.0.1:46658"``
|
||||
::
|
||||
|
||||
- ``consensus.max_block_size_txs``: Maximum number of block txs.
|
||||
*Default*: ``10000``
|
||||
- ``consensus.create_empty_blocks``: Create empty blocks w/o txs.
|
||||
*Default*: ``true``
|
||||
- ``consensus.create_empty_blocks_interval``: Block creation interval, even if empty.
|
||||
- ``consensus.timeout_*``: Various consensus timeout parameters
|
||||
- ``consensus.wal_file``: Consensus state WAL. *Default*:
|
||||
``"$TMHOME/data/cs.wal/wal"``
|
||||
- ``consensus.wal_light``: Whether to use light-mode for Consensus
|
||||
state WAL. *Default*: ``false``
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
- ``mempool.*``: Various mempool parameters
|
||||
##### main base config options #####
|
||||
|
||||
- ``p2p.addr_book_file``: Peer address book. *Default*:
|
||||
``"$TMHOME/addrbook.json"``. **NOT USED**
|
||||
- ``p2p.laddr``: Node listen address. (0.0.0.0:0 means any interface,
|
||||
any port). *Default*: ``"0.0.0.0:46656"``
|
||||
- ``p2p.pex``: Enable Peer-Exchange (dev feature). *Default*: ``false``
|
||||
- ``p2p.seeds``: Comma delimited host:port seed nodes. *Default*:
|
||||
``""``
|
||||
- ``p2p.skip_upnp``: Skip UPNP detection. *Default*: ``false``
|
||||
# TCP or UNIX socket address of the ABCI application,
|
||||
# or the name of an ABCI application compiled in with the Tendermint binary
|
||||
proxy_app = "tcp://127.0.0.1:46658"
|
||||
|
||||
- ``rpc.grpc_laddr``: GRPC listen address (BroadcastTx only). Port
|
||||
required. *Default*: ``""``
|
||||
- ``rpc.laddr``: RPC listen address. Port required. *Default*:
|
||||
``"0.0.0.0:46657"``
|
||||
- ``rpc.unsafe``: Enabled unsafe rpc methods. *Default*: ``true``
|
||||
# A custom human readable name for this node
|
||||
moniker = "anonymous"
|
||||
|
||||
# If this node is many blocks behind the tip of the chain, FastSync
|
||||
# allows them to catchup quickly by downloading blocks in parallel
|
||||
# and verifying their commits
|
||||
fast_sync = true
|
||||
|
||||
# Database backend: leveldb | memdb
|
||||
db_backend = "leveldb"
|
||||
|
||||
# Database directory
|
||||
db_path = "data"
|
||||
|
||||
# Output level for logging
|
||||
log_level = "state:info,*:error"
|
||||
|
||||
##### additional base config options #####
|
||||
|
||||
# The ID of the chain to join (should be signed with every transaction and vote)
|
||||
chain_id = ""
|
||||
|
||||
# Path to the JSON file containing the initial validator set and other meta data
|
||||
genesis_file = "genesis.json"
|
||||
|
||||
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||
priv_validator_file = "priv_validator.json"
|
||||
|
||||
# Mechanism to connect to the ABCI application: socket | grpc
|
||||
abci = "socket"
|
||||
|
||||
# TCP or UNIX socket address for the profiling server to listen on
|
||||
prof_laddr = ""
|
||||
|
||||
# If true, query the ABCI app on connecting to a new peer
|
||||
# so the app can decide if we should keep the connection or not
|
||||
filter_peers = false
|
||||
|
||||
##### advanced configuration options #####
|
||||
|
||||
##### rpc server configuration options #####
|
||||
[rpc]
|
||||
|
||||
# TCP or UNIX socket address for the RPC server to listen on
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
# TCP or UNIX socket address for the gRPC server to listen on
|
||||
# NOTE: This server only supports /broadcast_tx_commit
|
||||
grpc_laddr = ""
|
||||
|
||||
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
|
||||
unsafe = false
|
||||
|
||||
##### peer to peer configuration options #####
|
||||
[p2p]
|
||||
|
||||
# Address to listen for incoming connections
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
|
||||
# Comma separated list of seed nodes to connect to
|
||||
seeds = ""
|
||||
|
||||
# Comma separated list of nodes to keep persistent connections to
|
||||
persistent_peers = ""
|
||||
|
||||
# Path to address book
|
||||
addr_book_file = "addrbook.json"
|
||||
|
||||
# Set true for strict address routability rules
|
||||
addr_book_strict = true
|
||||
|
||||
# Time to wait before flushing messages out on the connection, in ms
|
||||
flush_throttle_timeout = 100
|
||||
|
||||
# Maximum number of peers to connect to
|
||||
max_num_peers = 50
|
||||
|
||||
# Maximum size of a message packet payload, in bytes
|
||||
max_msg_packet_payload_size = 1024
|
||||
|
||||
# Rate at which packets can be sent, in bytes/second
|
||||
send_rate = 512000
|
||||
|
||||
# Rate at which packets can be received, in bytes/second
|
||||
recv_rate = 512000
|
||||
|
||||
##### mempool configuration options #####
|
||||
[mempool]
|
||||
|
||||
recheck = true
|
||||
recheck_empty = true
|
||||
broadcast = true
|
||||
wal_dir = "data/mempool.wal"
|
||||
|
||||
##### consensus configuration options #####
|
||||
[consensus]
|
||||
|
||||
wal_file = "data/cs.wal/wal"
|
||||
wal_light = false
|
||||
|
||||
# All timeouts are in milliseconds
|
||||
timeout_propose = 3000
|
||||
timeout_propose_delta = 500
|
||||
timeout_prevote = 1000
|
||||
timeout_prevote_delta = 500
|
||||
timeout_precommit = 1000
|
||||
timeout_precommit_delta = 500
|
||||
timeout_commit = 1000
|
||||
|
||||
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
|
||||
skip_timeout_commit = false
|
||||
|
||||
# BlockSize
|
||||
max_block_size_txs = 10000
|
||||
max_block_size_bytes = 1
|
||||
|
||||
# EmptyBlocks mode and possible interval between empty blocks in seconds
|
||||
create_empty_blocks = true
|
||||
create_empty_blocks_interval = 0
|
||||
|
||||
# Reactor sleep duration parameters are in milliseconds
|
||||
peer_gossip_sleep_duration = 100
|
||||
peer_query_maj23_sleep_duration = 2000
|
||||
|
||||
##### transactions indexer configuration options #####
|
||||
[tx_index]
|
||||
|
||||
# What indexer to use for transactions
|
||||
#
|
||||
# Options:
|
||||
# 1) "null" (default)
|
||||
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
indexer = "{{ .TxIndex.Indexer }}"
|
||||
|
||||
# Comma-separated list of tags to index (by default the only tag is tx hash)
|
||||
#
|
||||
# It's recommended to index only a subset of tags due to possible memory
|
||||
# bloat. This is, of course, depends on the indexer's DB and the volume of
|
||||
# transactions.
|
||||
index_tags = "{{ .TxIndex.IndexTags }}"
|
||||
|
||||
# When set to true, tells indexer to index all tags. Note this may be not
|
||||
# desirable (see the comment above). IndexTags has a precedence over
|
||||
# IndexAllTags (i.e. when given both, IndexTags will be indexed).
|
||||
index_all_tags = {{ .TxIndex.IndexAllTags }}
|
||||
|
@ -1,7 +1,7 @@
|
||||
Genesis
|
||||
=======
|
||||
|
||||
The genesis.json file in ``$TMHOME`` defines the initial TendermintCore
|
||||
The genesis.json file in ``$TMHOME/config`` defines the initial TendermintCore
|
||||
state upon genesis of the blockchain (`see
|
||||
definition <https://github.com/tendermint/tendermint/blob/master/types/genesis.go>`__).
|
||||
|
||||
|
@ -9,6 +9,7 @@ It contains the following components:
|
||||
- [Encoding and Digests](encoding.md)
|
||||
- [Blockchain](blockchain.md)
|
||||
- [State](state.md)
|
||||
- [P2P](p2p/node.md)
|
||||
|
||||
## Overview
|
||||
|
||||
|
@ -6,14 +6,14 @@ Here we describe configuration options around the Peer Exchange.
|
||||
|
||||
`--p2p.seed_mode`
|
||||
|
||||
The node operates in seed mode. It will kick incoming peers after sharing some peers.
|
||||
It will continually crawl the network for peers.
|
||||
The node operates in seed mode. In seed mode, a node continuously crawls the network for peers,
|
||||
and upon incoming connection shares some peers and disconnects.
|
||||
|
||||
## Seeds
|
||||
|
||||
`--p2p.seeds “1.2.3.4:466656,2.3.4.5:4444”`
|
||||
|
||||
Dials these seeds when we need more peers. They will return a list of peers and then disconnect.
|
||||
Dials these seeds when we need more peers. They should return a list of peers and then disconnect.
|
||||
If we already have enough peers in the address book, we may never need to dial them.
|
||||
|
||||
## Persistent Peers
|
||||
@ -27,7 +27,7 @@ anchor us in the p2p network.
|
||||
Note that the auto-redial uses exponential backoff and will give up
|
||||
after a day of trying to connect.
|
||||
|
||||
NOTE: If `dial_seeds` and `persistent_peers` intersect,
|
||||
NOTE: If `seeds` and `persistent_peers` intersect,
|
||||
the user will be WARNED that seeds may auto-close connections
|
||||
and the node may not be able to keep the connection persistent.
|
||||
|
@ -1,12 +1,14 @@
|
||||
## P2P Multiplex Connection
|
||||
|
||||
...
|
||||
|
||||
## MConnection
|
||||
|
||||
`MConnection` is a multiplex connection:
|
||||
|
||||
__multiplex__ *noun* a system or signal involving simultaneous transmission of
|
||||
several messages along a single channel of communication.
|
||||
|
||||
Each `MConnection` handles message transmission on multiple abstract communication
|
||||
`Channel`s. Each channel has a globally unique byte id.
|
||||
`MConnection` is a multiplex connection that supports multiple independent streams
|
||||
with distinct quality of service guarantees atop a single TCP connection.
|
||||
Each stream is known as a `Channel` and each `Channel` has a globally unique byte id.
|
||||
Each `Channel` also has a relative priority that determines the quality of service
|
||||
of the `Channel` in comparison to the others.
|
||||
The byte id and the relative priorities of each `Channel` are configured upon
|
||||
initialization of the connection.
|
||||
|
||||
@ -14,12 +16,13 @@ The `MConnection` supports three packet types: Ping, Pong, and Msg.
|
||||
|
||||
### Ping and Pong
|
||||
|
||||
The ping and pong messages consist of writing a single byte to the connection; 0x1 and 0x2, respectively
|
||||
The ping and pong messages consist of writing a single byte to the connection; 0x1 and 0x2, respectively.
|
||||
|
||||
When we haven't received any messages on an `MConnection` in a time `pingTimeout`, we send a ping message.
|
||||
When a ping is received on the `MConnection`, a pong is sent in response.
|
||||
When a ping is received on the `MConnection`, a pong is sent in response only if there are no other messages
|
||||
to send and the peer has not sent us too many pings.
|
||||
|
||||
If a pong is not received in sufficient time, the peer's score should be decremented (TODO).
|
||||
If a pong or message is not received in sufficient time after a ping, disconnect from the peer.
|
||||
|
||||
### Msg
|
||||
|
||||
@ -57,8 +60,8 @@ func (m MConnection) TrySend(chID byte, msg interface{}) bool {}
|
||||
for the channel with the given id byte `chID`. The message `msg` is serialized
|
||||
using the `tendermint/wire` submodule's `WriteBinary()` reflection routine.
|
||||
|
||||
`TrySend(chID, msg)` is a nonblocking call that returns false if the channel's
|
||||
queue is full.
|
||||
`TrySend(chID, msg)` is a nonblocking call that queues the message msg in the channel
|
||||
with the given id byte chID if the queue is not full; otherwise it returns false immediately.
|
||||
|
||||
`Send()` and `TrySend()` are also exposed for each `Peer`.
|
||||
|
||||
@ -103,14 +106,3 @@ for _, peer := range switch.Peers().List() {
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### PexReactor/AddrBook
|
||||
|
||||
A `PEXReactor` reactor implementation is provided to automate peer discovery.
|
||||
|
||||
```go
|
||||
book := p2p.NewAddrBook(addrBookFilePath)
|
||||
pexReactor := p2p.NewPEXReactor(book)
|
||||
...
|
||||
switch := NewSwitch([]Reactor{pexReactor, myReactor, ...})
|
||||
```
|
@ -39,8 +39,10 @@ A node checks its address book on startup and attempts to connect to peers from
|
||||
If it can't connect to any peers after some time, it falls back to the seeds to find more.
|
||||
|
||||
Restarted full nodes can run the `blockchain` or `consensus` reactor protocols to sync up
|
||||
to the latest state of the blockchain, assuming they aren't too far behind.
|
||||
If they are too far behind, they may need to validate a recent `H` and `HASH` out-of-band again.
|
||||
to the latest state of the blockchain from wherever they were last.
|
||||
In a Proof-of-Stake context, if they are sufficiently far behind (greater than the length
|
||||
of the unbonding period), they will need to validate a recent `H` and `HASH` out-of-band again
|
||||
so they know they have synced the correct chain.
|
||||
|
||||
## Validator Node
|
||||
|
||||
@ -54,6 +56,7 @@ Validators that know and trust each other can accept incoming connections from o
|
||||
## Sentry Node
|
||||
|
||||
Sentry nodes are guardians of a validator node and provide it access to the rest of the network.
|
||||
They should be well connected to other full nodes on the network.
|
||||
Sentry nodes may be dynamic, but should maintain persistent connections to some evolving random subset of each other.
|
||||
They should always expect to have direct incoming connections from the validator node and its backup/s.
|
||||
They do not report the validator node's address in the PEX.
|
@ -5,15 +5,11 @@ and how other peers are found.
|
||||
|
||||
## Peer Identity
|
||||
|
||||
Tendermint peers are expected to maintain long-term persistent identities in the form of a private key.
|
||||
Each peer has an ID defined as `peer.ID == peer.PrivKey.Address()`, where `Address` uses the scheme defined in go-crypto.
|
||||
|
||||
Peer ID's must come with some Proof-of-Work; that is,
|
||||
they must satisfy `peer.PrivKey.Address() < target` for some difficulty target.
|
||||
This ensures they are not too easy to generate. To begin, let `target == 2^240`.
|
||||
Tendermint peers are expected to maintain long-term persistent identities in the form of a public key.
|
||||
Each peer has an ID defined as `peer.ID == peer.PubKey.Address()`, where `Address` uses the scheme defined in go-crypto.
|
||||
|
||||
A single peer ID can have multiple IP addresses associated with it.
|
||||
For simplicity, we only keep track of the latest one.
|
||||
TODO: define how to deal with this.
|
||||
|
||||
When attempting to connect to a peer, we use the PeerURL: `<ID>@<IP>:<PORT>`.
|
||||
We will attempt to connect to the peer at IP:PORT, and verify,
|
||||
@ -22,7 +18,7 @@ corresponding to `<ID>`. This prevents man-in-the-middle attacks on the peer lay
|
||||
|
||||
Peers can also be connected to without specifying an ID, ie. just `<IP>:<PORT>`.
|
||||
In this case, the peer must be authenticated out-of-band of Tendermint,
|
||||
for instance via VPN
|
||||
for instance via VPN.
|
||||
|
||||
## Connections
|
||||
|
||||
@ -49,8 +45,8 @@ It goes as follows:
|
||||
- if we had the smaller ephemeral pubkey, use nonce1 for receiving, nonce2 for sending;
|
||||
else the opposite
|
||||
- all communications from now on are encrypted using the shared secret and the nonces, where each nonce
|
||||
- we now have an encrypted channel, but still need to authenticate
|
||||
increments by 2 every time it is used
|
||||
- we now have an encrypted channel, but still need to authenticate
|
||||
- generate a common challenge to sign:
|
||||
- SHA256 of the sorted (lowest first) and concatenated ephemeral pub keys
|
||||
- sign the common challenge with our persistent private key
|
||||
@ -76,7 +72,7 @@ an existing peer. If so, we disconnect.
|
||||
|
||||
We also check the peer's address and public key against
|
||||
an optional whitelist which can be managed through the ABCI app -
|
||||
if the whitelist is enabled and the peer does not qualigy, the connection is
|
||||
if the whitelist is enabled and the peer does not qualify, the connection is
|
||||
terminated.
|
||||
|
||||
|
||||
@ -86,14 +82,14 @@ The Tendermint Version Handshake allows the peers to exchange their NodeInfo:
|
||||
|
||||
```
|
||||
type NodeInfo struct {
|
||||
PubKey crypto.PubKey `json:"pub_key"`
|
||||
Moniker string `json:"moniker"`
|
||||
Network string `json:"network"`
|
||||
RemoteAddr string `json:"remote_addr"`
|
||||
ListenAddr string `json:"listen_addr"` // accepting in
|
||||
Version string `json:"version"` // major.minor.revision
|
||||
Channels []int8 `json:"channels"` // active reactor channels
|
||||
Other []string `json:"other"` // other application specific data
|
||||
PubKey crypto.PubKey
|
||||
Moniker string
|
||||
Network string
|
||||
RemoteAddr string
|
||||
ListenAddr string
|
||||
Version string
|
||||
Channels []int8
|
||||
Other []string
|
||||
}
|
||||
```
|
||||
|
@ -18,7 +18,7 @@ Configuration
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Set the ``laddr`` config parameter under ``[rpc]`` table in the
|
||||
$TMHOME/config.toml file or the ``--rpc.laddr`` command-line flag to the
|
||||
$TMHOME/config/config.toml file or the ``--rpc.laddr`` command-line flag to the
|
||||
desired protocol://host:port setting. Default: ``tcp://0.0.0.0:46657``.
|
||||
|
||||
Arguments
|
||||
@ -112,6 +112,7 @@ An HTTP Get request to the root RPC endpoint (e.g.
|
||||
http://localhost:46657/broadcast_tx_sync?tx=_
|
||||
http://localhost:46657/commit?height=_
|
||||
http://localhost:46657/dial_seeds?seeds=_
|
||||
http://localhost:46657/dial_peers?peers=_&persistent=_
|
||||
http://localhost:46657/subscribe?event=_
|
||||
http://localhost:46657/tx?hash=_&prove=_
|
||||
http://localhost:46657/unsafe_start_cpu_profiler?filename=_
|
||||
|
@ -24,7 +24,8 @@ Initialize the root directory by running:
|
||||
tendermint init
|
||||
|
||||
This will create a new private key (``priv_validator.json``), and a
|
||||
genesis file (``genesis.json``) containing the associated public key.
|
||||
genesis file (``genesis.json``) containing the associated public key,
|
||||
in ``$TMHOME/config``.
|
||||
This is all that's necessary to run a local testnet with one validator.
|
||||
|
||||
For more elaborate initialization, see our `testnet deployment
|
||||
@ -127,10 +128,14 @@ Some fields from the config file can be overwritten with flags.
|
||||
No Empty Blocks
|
||||
---------------
|
||||
|
||||
This much requested feature was implemented in version 0.10.3. While the default behaviour of ``tendermint`` is still to create blocks approximately once per second, it is possible to disable empty blocks or set a block creation interval. In the former case, blocks will be created when there are new transactions or when the AppHash changes.
|
||||
This much requested feature was implemented in version 0.10.3. While the
|
||||
default behaviour of ``tendermint`` is still to create blocks approximately
|
||||
once per second, it is possible to disable empty blocks or set a block creation
|
||||
interval. In the former case, blocks will be created when there are new
|
||||
transactions or when the AppHash changes.
|
||||
|
||||
To configure tendermint to not produce empty blocks unless there are txs or the app hash changes,
|
||||
run tendermint with this additional flag:
|
||||
To configure tendermint to not produce empty blocks unless there are
|
||||
transactions or the app hash changes, run tendermint with this additional flag:
|
||||
|
||||
::
|
||||
|
||||
@ -153,8 +158,7 @@ The block interval setting allows for a delay (in seconds) between the creation
|
||||
create_empty_blocks_interval = 5
|
||||
|
||||
With this setting, empty blocks will be produced every 5s if no block has been produced otherwise,
|
||||
regardless of the value of `create_empty_blocks`.
|
||||
|
||||
regardless of the value of ``create_empty_blocks``.
|
||||
|
||||
Broadcast API
|
||||
-------------
|
||||
@ -196,7 +200,7 @@ Tendermint Networks
|
||||
-------------------
|
||||
|
||||
When ``tendermint init`` is run, both a ``genesis.json`` and
|
||||
``priv_validator.json`` are created in ``~/.tendermint``. The
|
||||
``priv_validator.json`` are created in ``~/.tendermint/config``. The
|
||||
``genesis.json`` might look like:
|
||||
|
||||
::
|
||||
@ -246,13 +250,17 @@ conflicting messages.
|
||||
Note also that the ``pub_key`` (the public key) in the
|
||||
``priv_validator.json`` is also present in the ``genesis.json``.
|
||||
|
||||
The genesis file contains the list of public keys which may participate
|
||||
in the consensus, and their corresponding voting power. Greater than 2/3
|
||||
of the voting power must be active (ie. the corresponding private keys
|
||||
must be producing signatures) for the consensus to make progress. In our
|
||||
case, the genesis file contains the public key of our
|
||||
``priv_validator.json``, so a tendermint node started with the default
|
||||
root directory will be able to make new blocks, as we've already seen.
|
||||
The genesis file contains the list of public keys which may participate in the
|
||||
consensus, and their corresponding voting power. Greater than 2/3 of the voting
|
||||
power must be active (ie. the corresponding private keys must be producing
|
||||
signatures) for the consensus to make progress. In our case, the genesis file
|
||||
contains the public key of our ``priv_validator.json``, so a tendermint node
|
||||
started with the default root directory will be able to make progress. Voting
|
||||
power uses an `int64` but must be positive, thus the range is: 0 through
|
||||
9223372036854775807. Because of how the current proposer selection algorithm works,
|
||||
we do not recommend having voting powers greater than 10^12 (ie. 1 trillion)
|
||||
(see `Proposals section of Byzantine Consensus Algorithm
|
||||
<./specification/byzantine-consensus-algorithm.html#proposals>`__ for details).
|
||||
|
||||
If we want to add more nodes to the network, we have two choices: we can
|
||||
add a new validator node, who will also participate in the consensus by
|
||||
@ -263,8 +271,10 @@ with the consensus protocol.
|
||||
Peers
|
||||
~~~~~
|
||||
|
||||
To connect to peers on start-up, specify them in the ``config.toml`` or
|
||||
on the command line.
|
||||
To connect to peers on start-up, specify them in the ``$TMHOME/config/config.toml`` or
|
||||
on the command line. Use `seeds` to specify seed nodes from which you can get many other
|
||||
peer addresses, and ``persistent_peers`` to specify peers that your node will maintain
|
||||
persistent connections with.
|
||||
|
||||
For instance,
|
||||
|
||||
@ -273,26 +283,35 @@ For instance,
|
||||
tendermint node --p2p.seeds "1.2.3.4:46656,5.6.7.8:46656"
|
||||
|
||||
Alternatively, you can use the ``/dial_seeds`` endpoint of the RPC to
|
||||
specify peers for a running node to connect to:
|
||||
specify seeds for a running node to connect to:
|
||||
|
||||
::
|
||||
|
||||
curl --data-urlencode "seeds=[\"1.2.3.4:46656\",\"5.6.7.8:46656\"]" localhost:46657/dial_seeds
|
||||
curl 'localhost:46657/dial_seeds?seeds=\["1.2.3.4:46656","5.6.7.8:46656"\]'
|
||||
|
||||
Additionally, the peer-exchange protocol can be enabled using the
|
||||
``--pex`` flag, though this feature is `still under
|
||||
development <https://github.com/tendermint/tendermint/issues/598>`__. If
|
||||
``--pex`` is enabled, peers will gossip about known peers and form a
|
||||
more resilient network.
|
||||
Note, if the peer-exchange protocol (PEX) is enabled (default), you should not
|
||||
normally need seeds after the first start. Peers will be gossipping about known
|
||||
peers and forming a network, storing peer addresses in the addrbook.
|
||||
|
||||
If you want Tendermint to connect to specific set of addresses and maintain a
|
||||
persistent connection with each, you can use the ``--p2p.persistent_peers``
|
||||
flag or the corresponding setting in the ``config.toml`` or the
|
||||
``/dial_peers`` RPC endpoint to do it without stopping Tendermint
|
||||
core instance.
|
||||
|
||||
::
|
||||
|
||||
tendermint node --p2p.persistent_peers "10.11.12.13:46656,10.11.12.14:46656"
|
||||
curl 'localhost:46657/dial_peers?persistent=true&peers=\["1.2.3.4:46656","5.6.7.8:46656"\]'
|
||||
|
||||
Adding a Non-Validator
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Adding a non-validator is simple. Just copy the original
|
||||
``genesis.json`` to ``~/.tendermint`` on the new machine and start the
|
||||
node, specifying seeds as necessary. If no seeds are specified, the node
|
||||
won't make any blocks, because it's not a validator, and it won't hear
|
||||
about any blocks, because it's not connected to the other peer.
|
||||
``genesis.json`` to ``~/.tendermint/config`` on the new machine and start the
|
||||
node, specifying seeds or persistent peers as necessary. If no seeds or persistent
|
||||
peers are specified, the node won't make any blocks, because it's not a validator,
|
||||
and it won't hear about any blocks, because it's not connected to the other peer.
|
||||
|
||||
Adding a Validator
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
@ -358,12 +377,12 @@ then the new ``genesis.json`` will be:
|
||||
]
|
||||
}
|
||||
|
||||
Update the ``genesis.json`` in ``~/.tendermint``. Copy the genesis file
|
||||
and the new ``priv_validator.json`` to the ``~/.tendermint`` on a new
|
||||
Update the ``genesis.json`` in ``~/.tendermint/config``. Copy the genesis file
|
||||
and the new ``priv_validator.json`` to the ``~/.tendermint/config`` on a new
|
||||
machine.
|
||||
|
||||
Now run ``tendermint node`` on both machines, and use either
|
||||
``--p2p.seeds`` or the ``/dial_seeds`` to get them to peer up. They
|
||||
``--p2p.persistent_peers`` or the ``/dial_peers`` to get them to peer up. They
|
||||
should start making blocks, and will only continue to do so as long as
|
||||
both of them are online.
|
||||
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
liteErr "github.com/tendermint/tendermint/lite/errors"
|
||||
rpcclient "github.com/tendermint/tendermint/rpc/client"
|
||||
rpctest "github.com/tendermint/tendermint/rpc/test"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func TestProvider(t *testing.T) {
|
||||
@ -17,7 +18,8 @@ func TestProvider(t *testing.T) {
|
||||
|
||||
cfg := rpctest.GetConfig()
|
||||
rpcAddr := cfg.RPC.ListenAddress
|
||||
chainID := cfg.ChainID
|
||||
genDoc, _ := types.GenesisDocFromFile(cfg.GenesisFile())
|
||||
chainID := genDoc.ChainID
|
||||
p := NewHTTPProvider(rpcAddr)
|
||||
require.NotNil(t, p)
|
||||
|
||||
@ -35,7 +37,7 @@ func TestProvider(t *testing.T) {
|
||||
|
||||
// let's check this is valid somehow
|
||||
assert.Nil(seed.ValidateBasic(chainID))
|
||||
cert := lite.NewStatic(chainID, seed.Validators)
|
||||
cert := lite.NewStaticCertifier(chainID, seed.Validators)
|
||||
|
||||
// historical queries now work :)
|
||||
lower := sh - 5
|
||||
|
@ -6,9 +6,9 @@ import (
|
||||
liteErr "github.com/tendermint/tendermint/lite/errors"
|
||||
)
|
||||
|
||||
var _ Certifier = &Dynamic{}
|
||||
var _ Certifier = (*DynamicCertifier)(nil)
|
||||
|
||||
// Dynamic uses a Static for Certify, but adds an
|
||||
// DynamicCertifier uses a StaticCertifier for Certify, but adds an
|
||||
// Update method to allow for a change of validators.
|
||||
//
|
||||
// You can pass in a FullCommit with another validator set,
|
||||
@ -17,46 +17,48 @@ var _ Certifier = &Dynamic{}
|
||||
// validator set for the next Certify call.
|
||||
// For security, it will only follow validator set changes
|
||||
// going forward.
|
||||
type Dynamic struct {
|
||||
cert *Static
|
||||
type DynamicCertifier struct {
|
||||
cert *StaticCertifier
|
||||
lastHeight int64
|
||||
}
|
||||
|
||||
// NewDynamic returns a new dynamic certifier.
|
||||
func NewDynamic(chainID string, vals *types.ValidatorSet, height int64) *Dynamic {
|
||||
return &Dynamic{
|
||||
cert: NewStatic(chainID, vals),
|
||||
func NewDynamicCertifier(chainID string, vals *types.ValidatorSet, height int64) *DynamicCertifier {
|
||||
return &DynamicCertifier{
|
||||
cert: NewStaticCertifier(chainID, vals),
|
||||
lastHeight: height,
|
||||
}
|
||||
}
|
||||
|
||||
// ChainID returns the chain id of this certifier.
|
||||
func (c *Dynamic) ChainID() string {
|
||||
return c.cert.ChainID()
|
||||
// Implements Certifier.
|
||||
func (dc *DynamicCertifier) ChainID() string {
|
||||
return dc.cert.ChainID()
|
||||
}
|
||||
|
||||
// Validators returns the validators of this certifier.
|
||||
func (c *Dynamic) Validators() *types.ValidatorSet {
|
||||
return c.cert.vSet
|
||||
func (dc *DynamicCertifier) Validators() *types.ValidatorSet {
|
||||
return dc.cert.vSet
|
||||
}
|
||||
|
||||
// Hash returns the hash of this certifier.
|
||||
func (c *Dynamic) Hash() []byte {
|
||||
return c.cert.Hash()
|
||||
func (dc *DynamicCertifier) Hash() []byte {
|
||||
return dc.cert.Hash()
|
||||
}
|
||||
|
||||
// LastHeight returns the last height of this certifier.
|
||||
func (c *Dynamic) LastHeight() int64 {
|
||||
return c.lastHeight
|
||||
func (dc *DynamicCertifier) LastHeight() int64 {
|
||||
return dc.lastHeight
|
||||
}
|
||||
|
||||
// Certify will verify whether the commit is valid and will update the height if it is or return an
|
||||
// error if it is not.
|
||||
func (c *Dynamic) Certify(check Commit) error {
|
||||
err := c.cert.Certify(check)
|
||||
// Implements Certifier.
|
||||
func (dc *DynamicCertifier) Certify(check Commit) error {
|
||||
err := dc.cert.Certify(check)
|
||||
if err == nil {
|
||||
// update last seen height if input is valid
|
||||
c.lastHeight = check.Height()
|
||||
dc.lastHeight = check.Height()
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -65,15 +67,15 @@ func (c *Dynamic) Certify(check Commit) error {
|
||||
// the certifying validator set if safe to do so.
|
||||
//
|
||||
// Returns an error if update is impossible (invalid proof or IsTooMuchChangeErr)
|
||||
func (c *Dynamic) Update(fc FullCommit) error {
|
||||
func (dc *DynamicCertifier) Update(fc FullCommit) error {
|
||||
// ignore all checkpoints in the past -> only to the future
|
||||
h := fc.Height()
|
||||
if h <= c.lastHeight {
|
||||
if h <= dc.lastHeight {
|
||||
return liteErr.ErrPastTime()
|
||||
}
|
||||
|
||||
// first, verify if the input is self-consistent....
|
||||
err := fc.ValidateBasic(c.ChainID())
|
||||
err := fc.ValidateBasic(dc.ChainID())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -82,14 +84,13 @@ func (c *Dynamic) Update(fc FullCommit) error {
|
||||
// would be approved by the currently known validator set
|
||||
// as well as the new set
|
||||
commit := fc.Commit.Commit
|
||||
err = c.Validators().VerifyCommitAny(fc.Validators, c.ChainID(),
|
||||
commit.BlockID, h, commit)
|
||||
err = dc.Validators().VerifyCommitAny(fc.Validators, dc.ChainID(), commit.BlockID, h, commit)
|
||||
if err != nil {
|
||||
return liteErr.ErrTooMuchChange()
|
||||
}
|
||||
|
||||
// looks good, we can update
|
||||
c.cert = NewStatic(c.ChainID(), fc.Validators)
|
||||
c.lastHeight = h
|
||||
dc.cert = NewStaticCertifier(dc.ChainID(), fc.Validators)
|
||||
dc.lastHeight = h
|
||||
return nil
|
||||
}
|
@ -23,7 +23,7 @@ func TestDynamicCert(t *testing.T) {
|
||||
vals := keys.ToValidators(20, 10)
|
||||
// and a certifier based on our known set
|
||||
chainID := "test-dyno"
|
||||
cert := lite.NewDynamic(chainID, vals, 0)
|
||||
cert := lite.NewDynamicCertifier(chainID, vals, 0)
|
||||
|
||||
cases := []struct {
|
||||
keys lite.ValKeys
|
||||
@ -67,7 +67,7 @@ func TestDynamicUpdate(t *testing.T) {
|
||||
chainID := "test-dyno-up"
|
||||
keys := lite.GenValKeys(5)
|
||||
vals := keys.ToValidators(20, 0)
|
||||
cert := lite.NewDynamic(chainID, vals, 40)
|
||||
cert := lite.NewDynamicCertifier(chainID, vals, 40)
|
||||
|
||||
// one valid block to give us a sense of time
|
||||
h := int64(100)
|
155
lite/inquirer.go
155
lite/inquirer.go
@ -1,155 +0,0 @@
|
||||
package lite
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
|
||||
liteErr "github.com/tendermint/tendermint/lite/errors"
|
||||
)
|
||||
|
||||
// Inquiring wraps a dynamic certifier and implements an auto-update strategy. If a call to Certify
|
||||
// fails due to a change it validator set, Inquiring will try and find a previous FullCommit which
|
||||
// it can use to safely update the validator set. It uses a source provider to obtain the needed
|
||||
// FullCommits. It stores properly validated data on the local system.
|
||||
type Inquiring struct {
|
||||
cert *Dynamic
|
||||
// These are only properly validated data, from local system
|
||||
trusted Provider
|
||||
// This is a source of new info, like a node rpc, or other import method
|
||||
Source Provider
|
||||
}
|
||||
|
||||
// NewInquiring returns a new Inquiring object. It uses the trusted provider to store validated
|
||||
// data and the source provider to obtain missing FullCommits.
|
||||
//
|
||||
// Example: The trusted provider should a CacheProvider, MemProvider or files.Provider. The source
|
||||
// provider should be a client.HTTPProvider.
|
||||
func NewInquiring(chainID string, fc FullCommit, trusted Provider, source Provider) *Inquiring {
|
||||
// store the data in trusted
|
||||
// TODO: StoredCommit() can return an error and we need to handle this.
|
||||
trusted.StoreCommit(fc)
|
||||
|
||||
return &Inquiring{
|
||||
cert: NewDynamic(chainID, fc.Validators, fc.Height()),
|
||||
trusted: trusted,
|
||||
Source: source,
|
||||
}
|
||||
}
|
||||
|
||||
// ChainID returns the chain id.
|
||||
func (c *Inquiring) ChainID() string {
|
||||
return c.cert.ChainID()
|
||||
}
|
||||
|
||||
// Validators returns the validator set.
|
||||
func (c *Inquiring) Validators() *types.ValidatorSet {
|
||||
return c.cert.cert.vSet
|
||||
}
|
||||
|
||||
// LastHeight returns the last height.
|
||||
func (c *Inquiring) LastHeight() int64 {
|
||||
return c.cert.lastHeight
|
||||
}
|
||||
|
||||
// Certify makes sure this is checkpoint is valid.
|
||||
//
|
||||
// If the validators have changed since the last know time, it looks
|
||||
// for a path to prove the new validators.
|
||||
//
|
||||
// On success, it will store the checkpoint in the store for later viewing
|
||||
func (c *Inquiring) Certify(commit Commit) error {
|
||||
err := c.useClosestTrust(commit.Height())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.cert.Certify(commit)
|
||||
if !liteErr.IsValidatorsChangedErr(err) {
|
||||
return err
|
||||
}
|
||||
err = c.updateToHash(commit.Header.ValidatorsHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.cert.Certify(commit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// store the new checkpoint
|
||||
return c.trusted.StoreCommit(NewFullCommit(commit, c.Validators()))
|
||||
}
|
||||
|
||||
// Update will verify if this is a valid change and update
|
||||
// the certifying validator set if safe to do so.
|
||||
func (c *Inquiring) Update(fc FullCommit) error {
|
||||
err := c.useClosestTrust(fc.Height())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.cert.Update(fc)
|
||||
if err == nil {
|
||||
err = c.trusted.StoreCommit(fc)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Inquiring) useClosestTrust(h int64) error {
|
||||
closest, err := c.trusted.GetByHeight(h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if the best seed is not the one we currently use,
|
||||
// let's just reset the dynamic validator
|
||||
if closest.Height() != c.LastHeight() {
|
||||
c.cert = NewDynamic(c.ChainID(), closest.Validators, closest.Height())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateToHash gets the validator hash we want to update to
|
||||
// if IsTooMuchChangeErr, we try to find a path by binary search over height
|
||||
func (c *Inquiring) updateToHash(vhash []byte) error {
|
||||
// try to get the match, and update
|
||||
fc, err := c.Source.GetByHash(vhash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = c.cert.Update(fc)
|
||||
// handle IsTooMuchChangeErr by using divide and conquer
|
||||
if liteErr.IsTooMuchChangeErr(err) {
|
||||
err = c.updateToHeight(fc.Height())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// updateToHeight will use divide-and-conquer to find a path to h
|
||||
func (c *Inquiring) updateToHeight(h int64) error {
|
||||
// try to update to this height (with checks)
|
||||
fc, err := c.Source.GetByHeight(h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
start, end := c.LastHeight(), fc.Height()
|
||||
if end <= start {
|
||||
return liteErr.ErrNoPathFound()
|
||||
}
|
||||
err = c.Update(fc)
|
||||
|
||||
// we can handle IsTooMuchChangeErr specially
|
||||
if !liteErr.IsTooMuchChangeErr(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
// try to update to mid
|
||||
mid := (start + end) / 2
|
||||
err = c.updateToHeight(mid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if we made it to mid, we recurse
|
||||
return c.updateToHeight(h)
|
||||
}
|
163
lite/inquiring_certifier.go
Normal file
163
lite/inquiring_certifier.go
Normal file
@ -0,0 +1,163 @@
|
||||
package lite
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
|
||||
liteErr "github.com/tendermint/tendermint/lite/errors"
|
||||
)
|
||||
|
||||
var _ Certifier = (*InquiringCertifier)(nil)
|
||||
|
||||
// InquiringCertifier wraps a dynamic certifier and implements an auto-update strategy. If a call
|
||||
// to Certify fails due to a change it validator set, InquiringCertifier will try and find a
|
||||
// previous FullCommit which it can use to safely update the validator set. It uses a source
|
||||
// provider to obtain the needed FullCommits. It stores properly validated data on the local system.
|
||||
type InquiringCertifier struct {
|
||||
cert *DynamicCertifier
|
||||
// These are only properly validated data, from local system
|
||||
trusted Provider
|
||||
// This is a source of new info, like a node rpc, or other import method
|
||||
Source Provider
|
||||
}
|
||||
|
||||
// NewInquiringCertifier returns a new Inquiring object. It uses the trusted provider to store
|
||||
// validated data and the source provider to obtain missing FullCommits.
|
||||
//
|
||||
// Example: The trusted provider should a CacheProvider, MemProvider or files.Provider. The source
|
||||
// provider should be a client.HTTPProvider.
|
||||
func NewInquiringCertifier(chainID string, fc FullCommit, trusted Provider,
|
||||
source Provider) (*InquiringCertifier, error) {
|
||||
|
||||
// store the data in trusted
|
||||
err := trusted.StoreCommit(fc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &InquiringCertifier{
|
||||
cert: NewDynamicCertifier(chainID, fc.Validators, fc.Height()),
|
||||
trusted: trusted,
|
||||
Source: source,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ChainID returns the chain id.
|
||||
// Implements Certifier.
|
||||
func (ic *InquiringCertifier) ChainID() string {
|
||||
return ic.cert.ChainID()
|
||||
}
|
||||
|
||||
// Validators returns the validator set.
|
||||
func (ic *InquiringCertifier) Validators() *types.ValidatorSet {
|
||||
return ic.cert.cert.vSet
|
||||
}
|
||||
|
||||
// LastHeight returns the last height.
|
||||
func (ic *InquiringCertifier) LastHeight() int64 {
|
||||
return ic.cert.lastHeight
|
||||
}
|
||||
|
||||
// Certify makes sure this is checkpoint is valid.
|
||||
//
|
||||
// If the validators have changed since the last know time, it looks
|
||||
// for a path to prove the new validators.
|
||||
//
|
||||
// On success, it will store the checkpoint in the store for later viewing
|
||||
// Implements Certifier.
|
||||
func (ic *InquiringCertifier) Certify(commit Commit) error {
|
||||
err := ic.useClosestTrust(commit.Height())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ic.cert.Certify(commit)
|
||||
if !liteErr.IsValidatorsChangedErr(err) {
|
||||
return err
|
||||
}
|
||||
err = ic.updateToHash(commit.Header.ValidatorsHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ic.cert.Certify(commit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// store the new checkpoint
|
||||
return ic.trusted.StoreCommit(NewFullCommit(commit, ic.Validators()))
|
||||
}
|
||||
|
||||
// Update will verify if this is a valid change and update
|
||||
// the certifying validator set if safe to do so.
|
||||
func (ic *InquiringCertifier) Update(fc FullCommit) error {
|
||||
err := ic.useClosestTrust(fc.Height())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ic.cert.Update(fc)
|
||||
if err == nil {
|
||||
err = ic.trusted.StoreCommit(fc)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ic *InquiringCertifier) useClosestTrust(h int64) error {
|
||||
closest, err := ic.trusted.GetByHeight(h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if the best seed is not the one we currently use,
|
||||
// let's just reset the dynamic validator
|
||||
if closest.Height() != ic.LastHeight() {
|
||||
ic.cert = NewDynamicCertifier(ic.ChainID(), closest.Validators, closest.Height())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateToHash gets the validator hash we want to update to
|
||||
// if IsTooMuchChangeErr, we try to find a path by binary search over height
|
||||
func (ic *InquiringCertifier) updateToHash(vhash []byte) error {
|
||||
// try to get the match, and update
|
||||
fc, err := ic.Source.GetByHash(vhash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ic.cert.Update(fc)
|
||||
// handle IsTooMuchChangeErr by using divide and conquer
|
||||
if liteErr.IsTooMuchChangeErr(err) {
|
||||
err = ic.updateToHeight(fc.Height())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// updateToHeight will use divide-and-conquer to find a path to h
|
||||
func (ic *InquiringCertifier) updateToHeight(h int64) error {
|
||||
// try to update to this height (with checks)
|
||||
fc, err := ic.Source.GetByHeight(h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
start, end := ic.LastHeight(), fc.Height()
|
||||
if end <= start {
|
||||
return liteErr.ErrNoPathFound()
|
||||
}
|
||||
err = ic.Update(fc)
|
||||
|
||||
// we can handle IsTooMuchChangeErr specially
|
||||
if !liteErr.IsTooMuchChangeErr(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
// try to update to mid
|
||||
mid := (start + end) / 2
|
||||
err = ic.updateToHeight(mid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if we made it to mid, we recurse
|
||||
return ic.updateToHeight(h)
|
||||
}
|
@ -32,18 +32,20 @@ func TestInquirerValidPath(t *testing.T) {
|
||||
vals := keys.ToValidators(vote, 0)
|
||||
h := int64(20 + 10*i)
|
||||
appHash := []byte(fmt.Sprintf("h=%d", h))
|
||||
commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, len(keys))
|
||||
commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0,
|
||||
len(keys))
|
||||
}
|
||||
|
||||
// initialize a certifier with the initial state
|
||||
cert := lite.NewInquiring(chainID, commits[0], trust, source)
|
||||
cert, err := lite.NewInquiringCertifier(chainID, commits[0], trust, source)
|
||||
require.Nil(err)
|
||||
|
||||
// this should fail validation....
|
||||
commit := commits[count-1].Commit
|
||||
err := cert.Certify(commit)
|
||||
err = cert.Certify(commit)
|
||||
require.NotNil(err)
|
||||
|
||||
// add a few seed in the middle should be insufficient
|
||||
// adding a few commits in the middle should be insufficient
|
||||
for i := 10; i < 13; i++ {
|
||||
err := source.StoreCommit(commits[i])
|
||||
require.Nil(err)
|
||||
@ -81,11 +83,12 @@ func TestInquirerMinimalPath(t *testing.T) {
|
||||
h := int64(5 + 10*i)
|
||||
appHash := []byte(fmt.Sprintf("h=%d", h))
|
||||
resHash := []byte(fmt.Sprintf("res=%d", h))
|
||||
commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, len(keys))
|
||||
commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0,
|
||||
len(keys))
|
||||
}
|
||||
|
||||
// initialize a certifier with the initial state
|
||||
cert := lite.NewInquiring(chainID, commits[0], trust, source)
|
||||
cert, _ := lite.NewInquiringCertifier(chainID, commits[0], trust, source)
|
||||
|
||||
// this should fail validation....
|
||||
commit := commits[count-1].Commit
|
||||
@ -130,11 +133,12 @@ func TestInquirerVerifyHistorical(t *testing.T) {
|
||||
h := int64(20 + 10*i)
|
||||
appHash := []byte(fmt.Sprintf("h=%d", h))
|
||||
resHash := []byte(fmt.Sprintf("res=%d", h))
|
||||
commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, len(keys))
|
||||
commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0,
|
||||
len(keys))
|
||||
}
|
||||
|
||||
// initialize a certifier with the initial state
|
||||
cert := lite.NewInquiring(chainID, commits[0], trust, source)
|
||||
cert, _ := lite.NewInquiringCertifier(chainID, commits[0], trust, source)
|
||||
|
||||
// store a few commits as trust
|
||||
for _, i := range []int{2, 5} {
|
@ -105,7 +105,7 @@ func BenchmarkCertifyCommitSec100(b *testing.B) {
|
||||
func benchmarkCertifyCommit(b *testing.B, keys lite.ValKeys) {
|
||||
chainID := "bench-certify"
|
||||
vals := keys.ToValidators(20, 10)
|
||||
cert := lite.NewStatic(chainID, vals)
|
||||
cert := lite.NewStaticCertifier(chainID, vals)
|
||||
check := keys.GenCommit(chainID, 123, nil, vals, []byte("foo"), []byte("params"), []byte("res"), 0, len(keys))
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := cert.Certify(check)
|
||||
|
@ -6,7 +6,7 @@ import (
|
||||
"github.com/tendermint/tendermint/lite/files"
|
||||
)
|
||||
|
||||
func GetCertifier(chainID, rootDir, nodeAddr string) (*lite.Inquiring, error) {
|
||||
func GetCertifier(chainID, rootDir, nodeAddr string) (*lite.InquiringCertifier, error) {
|
||||
trust := lite.NewCacheProvider(
|
||||
lite.NewMemStoreProvider(),
|
||||
files.NewProvider(rootDir),
|
||||
@ -25,6 +25,11 @@ func GetCertifier(chainID, rootDir, nodeAddr string) (*lite.Inquiring, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cert := lite.NewInquiring(chainID, fc, trust, source)
|
||||
|
||||
cert, err := lite.NewInquiringCertifier(chainID, fc, trust, source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cert, nil
|
||||
}
|
||||
|
@ -18,7 +18,11 @@ const (
|
||||
// set up the rpc routes to proxy via the given client,
|
||||
// and start up an http/rpc server on the location given by bind (eg. :1234)
|
||||
func StartProxy(c rpcclient.Client, listenAddr string, logger log.Logger) error {
|
||||
c.Start()
|
||||
err := c.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r := RPCRoutes(c)
|
||||
|
||||
// build the handler...
|
||||
@ -30,7 +34,7 @@ func StartProxy(c rpcclient.Client, listenAddr string, logger log.Logger) error
|
||||
core.SetLogger(logger)
|
||||
mux.HandleFunc(wsEndpoint, wm.WebsocketHandler)
|
||||
|
||||
_, err := rpc.StartHTTPServer(listenAddr, mux, logger)
|
||||
_, err = rpc.StartHTTPServer(listenAddr, mux, logger)
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOption
|
||||
|
||||
// make sure the proof is the proper height
|
||||
if resp.IsErr() {
|
||||
err = errors.Errorf("Query error %d: %d", resp.Code)
|
||||
err = errors.Errorf("Query error for key %d: %d", key, resp.Code)
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(resp.Key) == 0 || len(resp.Proof) == 0 {
|
||||
@ -79,7 +79,7 @@ func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOption
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "Couldn't verify proof")
|
||||
}
|
||||
return &ctypes.ResultABCIQuery{resp}, eproof, nil
|
||||
return &ctypes.ResultABCIQuery{Response: resp}, eproof, nil
|
||||
}
|
||||
|
||||
// The key wasn't found, construct a proof of non-existence.
|
||||
@ -93,13 +93,12 @@ func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOption
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "Couldn't verify proof")
|
||||
}
|
||||
return &ctypes.ResultABCIQuery{resp}, aproof, ErrNoData()
|
||||
return &ctypes.ResultABCIQuery{Response: resp}, aproof, ErrNoData()
|
||||
}
|
||||
|
||||
// GetCertifiedCommit gets the signed header for a given height
|
||||
// and certifies it. Returns error if unable to get a proven header.
|
||||
func GetCertifiedCommit(h int64, node rpcclient.Client,
|
||||
cert lite.Certifier) (empty lite.Commit, err error) {
|
||||
func GetCertifiedCommit(h int64, node rpcclient.Client, cert lite.Certifier) (lite.Commit, error) {
|
||||
|
||||
// FIXME: cannot use cert.GetByHeight for now, as it also requires
|
||||
// Validators and will fail on querying tendermint for non-current height.
|
||||
@ -107,14 +106,18 @@ func GetCertifiedCommit(h int64, node rpcclient.Client,
|
||||
rpcclient.WaitForHeight(node, h, nil)
|
||||
cresp, err := node.Commit(&h)
|
||||
if err != nil {
|
||||
return
|
||||
return lite.Commit{}, err
|
||||
}
|
||||
commit := client.CommitFromResult(cresp)
|
||||
|
||||
commit := client.CommitFromResult(cresp)
|
||||
// validate downloaded checkpoint with our request and trust store.
|
||||
if commit.Height() != h {
|
||||
return empty, certerr.ErrHeightMismatch(h, commit.Height())
|
||||
return lite.Commit{}, certerr.ErrHeightMismatch(h, commit.Height())
|
||||
}
|
||||
err = cert.Certify(commit)
|
||||
|
||||
if err = cert.Certify(commit); err != nil {
|
||||
return lite.Commit{}, err
|
||||
}
|
||||
|
||||
return commit, nil
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ func _TestAppProofs(t *testing.T) {
|
||||
source := certclient.NewProvider(cl)
|
||||
seed, err := source.GetByHeight(brh - 2)
|
||||
require.NoError(err, "%+v", err)
|
||||
cert := lite.NewStatic("my-chain", seed.Validators)
|
||||
cert := lite.NewStaticCertifier("my-chain", seed.Validators)
|
||||
|
||||
client.WaitForHeight(cl, 3, nil)
|
||||
latest, err := source.LatestCommit()
|
||||
@ -117,7 +117,7 @@ func _TestTxProofs(t *testing.T) {
|
||||
source := certclient.NewProvider(cl)
|
||||
seed, err := source.GetByHeight(brh - 2)
|
||||
require.NoError(err, "%+v", err)
|
||||
cert := lite.NewStatic("my-chain", seed.Validators)
|
||||
cert := lite.NewStaticCertifier("my-chain", seed.Validators)
|
||||
|
||||
// First let's make sure a bogus transaction hash returns a valid non-existence proof.
|
||||
key := types.Tx([]byte("bogus")).Hash()
|
||||
@ -136,5 +136,4 @@ func _TestTxProofs(t *testing.T) {
|
||||
commit, err := GetCertifiedCommit(br.Height, cl, cert)
|
||||
require.Nil(err, "%+v", err)
|
||||
require.Equal(res.Proof.RootHash, commit.Header.DataHash)
|
||||
|
||||
}
|
||||
|
@ -15,14 +15,14 @@ var _ rpcclient.Client = Wrapper{}
|
||||
// provable before passing it along. Allows you to make any rpcclient fully secure.
|
||||
type Wrapper struct {
|
||||
rpcclient.Client
|
||||
cert *lite.Inquiring
|
||||
cert *lite.InquiringCertifier
|
||||
}
|
||||
|
||||
// SecureClient uses a given certifier to wrap an connection to an untrusted
|
||||
// host and return a cryptographically secure rpc client.
|
||||
//
|
||||
// If it is wrapping an HTTP rpcclient, it will also wrap the websocket interface
|
||||
func SecureClient(c rpcclient.Client, cert *lite.Inquiring) Wrapper {
|
||||
func SecureClient(c rpcclient.Client, cert *lite.InquiringCertifier) Wrapper {
|
||||
wrap := Wrapper{c, cert}
|
||||
// TODO: no longer possible as no more such interface exposed....
|
||||
// if we wrap http client, then we can swap out the event switch to filter
|
||||
@ -34,7 +34,9 @@ func SecureClient(c rpcclient.Client, cert *lite.Inquiring) Wrapper {
|
||||
}
|
||||
|
||||
// ABCIQueryWithOptions exposes all options for the ABCI query and verifies the returned proof
|
||||
func (w Wrapper) ABCIQueryWithOptions(path string, data data.Bytes, opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
|
||||
func (w Wrapper) ABCIQueryWithOptions(path string, data data.Bytes,
|
||||
opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
|
||||
|
||||
res, _, err := GetWithProofOptions(path, data, opts, w.Client, w.cert)
|
||||
return res, err
|
||||
}
|
||||
|
@ -10,62 +10,64 @@ import (
|
||||
liteErr "github.com/tendermint/tendermint/lite/errors"
|
||||
)
|
||||
|
||||
var _ Certifier = &Static{}
|
||||
var _ Certifier = (*StaticCertifier)(nil)
|
||||
|
||||
// Static assumes a static set of validators, set on
|
||||
// StaticCertifier assumes a static set of validators, set on
|
||||
// initilization and checks against them.
|
||||
// The signatures on every header is checked for > 2/3 votes
|
||||
// against the known validator set upon Certify
|
||||
//
|
||||
// Good for testing or really simple chains. Building block
|
||||
// to support real-world functionality.
|
||||
type Static struct {
|
||||
type StaticCertifier struct {
|
||||
chainID string
|
||||
vSet *types.ValidatorSet
|
||||
vhash []byte
|
||||
}
|
||||
|
||||
// NewStatic returns a new certifier with a static validator set.
|
||||
func NewStatic(chainID string, vals *types.ValidatorSet) *Static {
|
||||
return &Static{
|
||||
// NewStaticCertifier returns a new certifier with a static validator set.
|
||||
func NewStaticCertifier(chainID string, vals *types.ValidatorSet) *StaticCertifier {
|
||||
return &StaticCertifier{
|
||||
chainID: chainID,
|
||||
vSet: vals,
|
||||
}
|
||||
}
|
||||
|
||||
// ChainID returns the chain id.
|
||||
func (c *Static) ChainID() string {
|
||||
return c.chainID
|
||||
// Implements Certifier.
|
||||
func (sc *StaticCertifier) ChainID() string {
|
||||
return sc.chainID
|
||||
}
|
||||
|
||||
// Validators returns the validator set.
|
||||
func (c *Static) Validators() *types.ValidatorSet {
|
||||
return c.vSet
|
||||
func (sc *StaticCertifier) Validators() *types.ValidatorSet {
|
||||
return sc.vSet
|
||||
}
|
||||
|
||||
// Hash returns the hash of the validator set.
|
||||
func (c *Static) Hash() []byte {
|
||||
if len(c.vhash) == 0 {
|
||||
c.vhash = c.vSet.Hash()
|
||||
func (sc *StaticCertifier) Hash() []byte {
|
||||
if len(sc.vhash) == 0 {
|
||||
sc.vhash = sc.vSet.Hash()
|
||||
}
|
||||
return c.vhash
|
||||
return sc.vhash
|
||||
}
|
||||
|
||||
// Certify makes sure that the commit is valid.
|
||||
func (c *Static) Certify(commit Commit) error {
|
||||
// Implements Certifier.
|
||||
func (sc *StaticCertifier) Certify(commit Commit) error {
|
||||
// do basic sanity checks
|
||||
err := commit.ValidateBasic(c.chainID)
|
||||
err := commit.ValidateBasic(sc.chainID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// make sure it has the same validator set we have (static means static)
|
||||
if !bytes.Equal(c.Hash(), commit.Header.ValidatorsHash) {
|
||||
if !bytes.Equal(sc.Hash(), commit.Header.ValidatorsHash) {
|
||||
return liteErr.ErrValidatorsChanged()
|
||||
}
|
||||
|
||||
// then make sure we have the proper signatures for this
|
||||
err = c.vSet.VerifyCommit(c.chainID, commit.Commit.BlockID,
|
||||
err = sc.vSet.VerifyCommit(sc.chainID, commit.Commit.BlockID,
|
||||
commit.Header.Height, commit.Commit)
|
||||
return errors.WithStack(err)
|
||||
}
|
@ -21,7 +21,7 @@ func TestStaticCert(t *testing.T) {
|
||||
vals := keys.ToValidators(20, 10)
|
||||
// and a certifier based on our known set
|
||||
chainID := "test-static"
|
||||
cert := lite.NewStatic(chainID, vals)
|
||||
cert := lite.NewStaticCertifier(chainID, vals)
|
||||
|
||||
cases := []struct {
|
||||
keys lite.ValKeys
|
21
node/node.go
21
node/node.go
@ -181,9 +181,9 @@ func NewNode(config *cfg.Config,
|
||||
|
||||
// Log whether this node is a validator or an observer
|
||||
if state.Validators.HasAddress(privValidator.GetAddress()) {
|
||||
consensusLogger.Info("This node is a validator")
|
||||
consensusLogger.Info("This node is a validator", "addr", privValidator.GetAddress(), "pubKey", privValidator.GetPubKey())
|
||||
} else {
|
||||
consensusLogger.Info("This node is not a validator")
|
||||
consensusLogger.Info("This node is not a validator", "addr", privValidator.GetAddress(), "pubKey", privValidator.GetPubKey())
|
||||
}
|
||||
|
||||
// Make MempoolReactor
|
||||
@ -251,7 +251,8 @@ func NewNode(config *cfg.Config,
|
||||
trustMetricStore = trust.NewTrustMetricStore(trustHistoryDB, trust.DefaultConfig())
|
||||
trustMetricStore.SetLogger(p2pLogger)
|
||||
|
||||
pexReactor := p2p.NewPEXReactor(addrBook)
|
||||
pexReactor := p2p.NewPEXReactor(addrBook,
|
||||
&p2p.PEXReactorConfig{Seeds: strings.Split(config.P2P.Seeds, ",")})
|
||||
pexReactor.SetLogger(p2pLogger)
|
||||
sw.AddReactor("PEX", pexReactor)
|
||||
}
|
||||
@ -386,11 +387,10 @@ func (n *Node) OnStart() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// If seeds exist, add them to the address book and dial out
|
||||
if n.config.P2P.Seeds != "" {
|
||||
// dial out
|
||||
seeds := strings.Split(n.config.P2P.Seeds, ",")
|
||||
if err := n.DialSeeds(seeds); err != nil {
|
||||
// Always connect to persistent peers
|
||||
if n.config.P2P.PersistentPeers != "" {
|
||||
err = n.sw.DialPeersAsync(n.addrBook, strings.Split(n.config.P2P.PersistentPeers, ","), true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -582,11 +582,6 @@ func (n *Node) NodeInfo() *p2p.NodeInfo {
|
||||
return n.sw.NodeInfo()
|
||||
}
|
||||
|
||||
// DialSeeds dials the given seeds on the Switch.
|
||||
func (n *Node) DialSeeds(seeds []string) error {
|
||||
return n.sw.DialSeeds(n.addrBook, seeds)
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
var (
|
||||
|
@ -45,6 +45,7 @@ type PEXReactor struct {
|
||||
BaseReactor
|
||||
|
||||
book *AddrBook
|
||||
config *PEXReactorConfig
|
||||
ensurePeersPeriod time.Duration
|
||||
|
||||
// tracks message count by peer, so we can prevent abuse
|
||||
@ -52,10 +53,18 @@ type PEXReactor struct {
|
||||
maxMsgCountByPeer uint16
|
||||
}
|
||||
|
||||
// PEXReactorConfig holds reactor specific configuration data.
|
||||
type PEXReactorConfig struct {
|
||||
// Seeds is a list of addresses reactor may use if it can't connect to peers
|
||||
// in the addrbook.
|
||||
Seeds []string
|
||||
}
|
||||
|
||||
// NewPEXReactor creates new PEX reactor.
|
||||
func NewPEXReactor(b *AddrBook) *PEXReactor {
|
||||
func NewPEXReactor(b *AddrBook, config *PEXReactorConfig) *PEXReactor {
|
||||
r := &PEXReactor{
|
||||
book: b,
|
||||
config: config,
|
||||
ensurePeersPeriod: defaultEnsurePeersPeriod,
|
||||
msgCountByPeer: cmn.NewCMap(),
|
||||
maxMsgCountByPeer: defaultMaxMsgCountByPeer,
|
||||
@ -100,7 +109,7 @@ func (r *PEXReactor) GetChannels() []*ChannelDescriptor {
|
||||
func (r *PEXReactor) AddPeer(p Peer) {
|
||||
if p.IsOutbound() {
|
||||
// For outbound peers, the address is already in the books.
|
||||
// Either it was added in DialSeeds or when we
|
||||
// Either it was added in DialPersistentPeers or when we
|
||||
// received the peer's address in r.Receive
|
||||
if r.book.NeedMoreAddrs() {
|
||||
r.RequestPEX(p)
|
||||
@ -239,7 +248,7 @@ func (r *PEXReactor) ensurePeersRoutine() {
|
||||
// placeholder. It should not be the case that an address becomes old/vetted
|
||||
// upon a single successful connection.
|
||||
func (r *PEXReactor) ensurePeers() {
|
||||
numOutPeers, _, numDialing := r.Switch.NumPeers()
|
||||
numOutPeers, numInPeers, numDialing := r.Switch.NumPeers()
|
||||
numToDial := minNumOutboundPeers - (numOutPeers + numDialing)
|
||||
r.Logger.Info("Ensure peers", "numOutPeers", numOutPeers, "numDialing", numDialing, "numToDial", numToDial)
|
||||
if numToDial <= 0 {
|
||||
@ -286,13 +295,20 @@ func (r *PEXReactor) ensurePeers() {
|
||||
|
||||
// If we need more addresses, pick a random peer and ask for more.
|
||||
if r.book.NeedMoreAddrs() {
|
||||
if peers := r.Switch.Peers().List(); len(peers) > 0 {
|
||||
i := rand.Int() % len(peers) // nolint: gas
|
||||
peer := peers[i]
|
||||
r.Logger.Info("No addresses to dial. Sending pexRequest to random peer", "peer", peer)
|
||||
peers := r.Switch.Peers().List()
|
||||
peersCount := len(peers)
|
||||
if peersCount > 0 {
|
||||
peer := peers[rand.Int()%peersCount] // nolint: gas
|
||||
r.Logger.Info("We need more addresses. Sending pexRequest to random peer", "peer", peer)
|
||||
r.RequestPEX(peer)
|
||||
}
|
||||
}
|
||||
|
||||
// If we are not connected to nor dialing anybody, fallback to dialing seeds.
|
||||
if numOutPeers+numInPeers+numDialing+len(toDial) == 0 {
|
||||
r.Logger.Info("No addresses to dial nor connected peers. Will dial seeds", "seeds", r.config.Seeds)
|
||||
r.Switch.DialPeersAsync(r.book, r.config.Seeds, false)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *PEXReactor) flushMsgCountByPeer() {
|
||||
|
@ -25,7 +25,7 @@ func TestPEXReactorBasic(t *testing.T) {
|
||||
book := NewAddrBook(dir+"addrbook.json", true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
|
||||
r := NewPEXReactor(book)
|
||||
r := NewPEXReactor(book, &PEXReactorConfig{})
|
||||
r.SetLogger(log.TestingLogger())
|
||||
|
||||
assert.NotNil(r)
|
||||
@ -41,7 +41,7 @@ func TestPEXReactorAddRemovePeer(t *testing.T) {
|
||||
book := NewAddrBook(dir+"addrbook.json", true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
|
||||
r := NewPEXReactor(book)
|
||||
r := NewPEXReactor(book, &PEXReactorConfig{})
|
||||
r.SetLogger(log.TestingLogger())
|
||||
|
||||
size := book.Size()
|
||||
@ -77,7 +77,7 @@ func TestPEXReactorRunning(t *testing.T) {
|
||||
switches[i] = makeSwitch(config, i, "127.0.0.1", "123.123.123", func(i int, sw *Switch) *Switch {
|
||||
sw.SetLogger(log.TestingLogger().With("switch", i))
|
||||
|
||||
r := NewPEXReactor(book)
|
||||
r := NewPEXReactor(book, &PEXReactorConfig{})
|
||||
r.SetLogger(log.TestingLogger())
|
||||
r.SetEnsurePeersPeriod(250 * time.Millisecond)
|
||||
sw.AddReactor("pex", r)
|
||||
@ -108,6 +108,7 @@ func TestPEXReactorRunning(t *testing.T) {
|
||||
|
||||
func assertSomePeersWithTimeout(t *testing.T, switches []*Switch, checkPeriod, timeout time.Duration) {
|
||||
ticker := time.NewTicker(checkPeriod)
|
||||
remaining := timeout
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
@ -119,16 +120,21 @@ func assertSomePeersWithTimeout(t *testing.T, switches []*Switch, checkPeriod, t
|
||||
allGood = false
|
||||
}
|
||||
}
|
||||
remaining -= checkPeriod
|
||||
if remaining < 0 {
|
||||
remaining = 0
|
||||
}
|
||||
if allGood {
|
||||
return
|
||||
}
|
||||
case <-time.After(timeout):
|
||||
case <-time.After(remaining):
|
||||
numPeersStr := ""
|
||||
for i, s := range switches {
|
||||
outbound, inbound, _ := s.NumPeers()
|
||||
numPeersStr += fmt.Sprintf("%d => {outbound: %d, inbound: %d}, ", i, outbound, inbound)
|
||||
}
|
||||
t.Errorf("expected all switches to be connected to at least one peer (switches: %s)", numPeersStr)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -142,7 +148,7 @@ func TestPEXReactorReceive(t *testing.T) {
|
||||
book := NewAddrBook(dir+"addrbook.json", false)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
|
||||
r := NewPEXReactor(book)
|
||||
r := NewPEXReactor(book, &PEXReactorConfig{})
|
||||
r.SetLogger(log.TestingLogger())
|
||||
|
||||
peer := createRandomPeer(false)
|
||||
@ -167,7 +173,7 @@ func TestPEXReactorAbuseFromPeer(t *testing.T) {
|
||||
book := NewAddrBook(dir+"addrbook.json", true)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
|
||||
r := NewPEXReactor(book)
|
||||
r := NewPEXReactor(book, &PEXReactorConfig{})
|
||||
r.SetLogger(log.TestingLogger())
|
||||
r.SetMaxMsgCountByPeer(5)
|
||||
|
||||
@ -181,6 +187,47 @@ func TestPEXReactorAbuseFromPeer(t *testing.T) {
|
||||
assert.True(r.ReachedMaxMsgCountForPeer(peer.NodeInfo().ListenAddr))
|
||||
}
|
||||
|
||||
func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "pex_reactor")
|
||||
require.Nil(t, err)
|
||||
defer os.RemoveAll(dir) // nolint: errcheck
|
||||
|
||||
book := NewAddrBook(dir+"addrbook.json", false)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
|
||||
// 1. create seed
|
||||
seed := makeSwitch(config, 0, "127.0.0.1", "123.123.123", func(i int, sw *Switch) *Switch {
|
||||
sw.SetLogger(log.TestingLogger())
|
||||
|
||||
r := NewPEXReactor(book, &PEXReactorConfig{})
|
||||
r.SetLogger(log.TestingLogger())
|
||||
r.SetEnsurePeersPeriod(250 * time.Millisecond)
|
||||
sw.AddReactor("pex", r)
|
||||
return sw
|
||||
})
|
||||
seed.AddListener(NewDefaultListener("tcp", seed.NodeInfo().ListenAddr, true, log.TestingLogger()))
|
||||
err = seed.Start()
|
||||
require.Nil(t, err)
|
||||
defer seed.Stop()
|
||||
|
||||
// 2. create usual peer
|
||||
sw := makeSwitch(config, 1, "127.0.0.1", "123.123.123", func(i int, sw *Switch) *Switch {
|
||||
sw.SetLogger(log.TestingLogger())
|
||||
|
||||
r := NewPEXReactor(book, &PEXReactorConfig{Seeds: []string{seed.NodeInfo().ListenAddr}})
|
||||
r.SetLogger(log.TestingLogger())
|
||||
r.SetEnsurePeersPeriod(250 * time.Millisecond)
|
||||
sw.AddReactor("pex", r)
|
||||
return sw
|
||||
})
|
||||
err = sw.Start()
|
||||
require.Nil(t, err)
|
||||
defer sw.Stop()
|
||||
|
||||
// 3. check that peer at least connects to seed
|
||||
assertSomePeersWithTimeout(t, []*Switch{sw}, 10*time.Millisecond, 10*time.Second)
|
||||
}
|
||||
|
||||
func createRoutableAddr() (addr string, netAddr *NetAddress) {
|
||||
for {
|
||||
addr = cmn.Fmt("%v.%v.%v.%v:46656", rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256)
|
||||
|
@ -17,7 +17,7 @@ import (
|
||||
|
||||
const (
|
||||
// wait a random amount of time from this interval
|
||||
// before dialing seeds or reconnecting to help prevent DoS
|
||||
// before dialing peers or reconnecting to help prevent DoS
|
||||
dialRandomizerIntervalMilliseconds = 3000
|
||||
|
||||
// repeatedly try to reconnect for a few minutes
|
||||
@ -322,16 +322,16 @@ func (sw *Switch) startInitPeer(peer *peer) {
|
||||
}
|
||||
}
|
||||
|
||||
// DialSeeds dials a list of seeds asynchronously in random order.
|
||||
func (sw *Switch) DialSeeds(addrBook *AddrBook, seeds []string) error {
|
||||
netAddrs, errs := NewNetAddressStrings(seeds)
|
||||
// DialPeersAsync dials a list of peers asynchronously in random order (optionally, making them persistent).
|
||||
func (sw *Switch) DialPeersAsync(addrBook *AddrBook, peers []string, persistent bool) error {
|
||||
netAddrs, errs := NewNetAddressStrings(peers)
|
||||
// TODO: IDs
|
||||
for _, err := range errs {
|
||||
sw.Logger.Error("Error in seed's address", "err", err)
|
||||
sw.Logger.Error("Error in peer's address", "err", err)
|
||||
}
|
||||
|
||||
if addrBook != nil {
|
||||
// add seeds to `addrBook`
|
||||
// add peers to `addrBook`
|
||||
ourAddrS := sw.nodeInfo.ListenAddr
|
||||
ourAddr, _ := NewNetAddressString(ourAddrS)
|
||||
for _, netAddr := range netAddrs {
|
||||
@ -350,7 +350,12 @@ func (sw *Switch) DialSeeds(addrBook *AddrBook, seeds []string) error {
|
||||
go func(i int) {
|
||||
sw.randomSleep(0)
|
||||
j := perm[i]
|
||||
sw.dialSeed(netAddrs[j])
|
||||
peer, err := sw.DialPeerWithAddress(netAddrs[j], persistent)
|
||||
if err != nil {
|
||||
sw.Logger.Error("Error dialing peer", "err", err)
|
||||
} else {
|
||||
sw.Logger.Info("Connected to peer", "peer", peer)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
return nil
|
||||
@ -362,15 +367,6 @@ func (sw *Switch) randomSleep(interval time.Duration) {
|
||||
time.Sleep(r + interval)
|
||||
}
|
||||
|
||||
func (sw *Switch) dialSeed(addr *NetAddress) {
|
||||
peer, err := sw.DialPeerWithAddress(addr, true)
|
||||
if err != nil {
|
||||
sw.Logger.Error("Error dialing seed", "err", err)
|
||||
} else {
|
||||
sw.Logger.Info("Connected to seed", "peer", peer)
|
||||
}
|
||||
}
|
||||
|
||||
// DialPeerWithAddress dials the given peer and runs sw.addPeer if it connects successfully.
|
||||
// If `persistent == true`, the switch will always try to reconnect to this peer if the connection ever fails.
|
||||
func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) (Peer, error) {
|
||||
|
@ -68,7 +68,9 @@ func TestTrustMetricStopPause(t *testing.T) {
|
||||
tt.NextTick()
|
||||
tm.Pause()
|
||||
|
||||
// could be 1 or 2 because Pause and NextTick race
|
||||
first := tm.Copy().numIntervals
|
||||
|
||||
// Allow more time to pass and check the intervals are unchanged
|
||||
tt.NextTick()
|
||||
tt.NextTick()
|
||||
|
@ -24,7 +24,7 @@ type TestTicker struct {
|
||||
|
||||
// NewTestTicker returns our ticker used within test routines
|
||||
func NewTestTicker() *TestTicker {
|
||||
c := make(chan time.Time, 1)
|
||||
c := make(chan time.Time)
|
||||
return &TestTicker{
|
||||
C: c,
|
||||
}
|
||||
|
@ -33,7 +33,8 @@ type ABCIClient interface {
|
||||
// reading from abci app
|
||||
ABCIInfo() (*ctypes.ResultABCIInfo, error)
|
||||
ABCIQuery(path string, data data.Bytes) (*ctypes.ResultABCIQuery, error)
|
||||
ABCIQueryWithOptions(path string, data data.Bytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error)
|
||||
ABCIQueryWithOptions(path string, data data.Bytes,
|
||||
opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error)
|
||||
|
||||
// writing to abci app
|
||||
BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error)
|
||||
|
@ -88,6 +88,10 @@ func (Local) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) {
|
||||
return core.UnsafeDialSeeds(seeds)
|
||||
}
|
||||
|
||||
func (Local) DialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, error) {
|
||||
return core.UnsafeDialPeers(peers, persistent)
|
||||
}
|
||||
|
||||
func (Local) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
|
||||
return core.BlockchainInfo(minHeight, maxHeight)
|
||||
}
|
||||
|
@ -111,6 +111,10 @@ func (c Client) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) {
|
||||
return core.UnsafeDialSeeds(seeds)
|
||||
}
|
||||
|
||||
func (c Client) DialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, error) {
|
||||
return core.UnsafeDialPeers(peers, persistent)
|
||||
}
|
||||
|
||||
func (c Client) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
|
||||
return core.BlockchainInfo(minHeight, maxHeight)
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ Tendermint RPC is built using [our own RPC library](https://github.com/tendermin
|
||||
|
||||
## Configuration
|
||||
|
||||
Set the `laddr` config parameter under `[rpc]` table in the `$TMHOME/config.toml` file or the `--rpc.laddr` command-line flag to the desired protocol://host:port setting. Default: `tcp://0.0.0.0:46657`.
|
||||
Set the `laddr` config parameter under `[rpc]` table in the `$TMHOME/config/config.toml` file or the `--rpc.laddr` command-line flag to the desired protocol://host:port setting. Default: `tcp://0.0.0.0:46657`.
|
||||
|
||||
## Arguments
|
||||
|
||||
@ -95,6 +95,7 @@ Endpoints that require arguments:
|
||||
/broadcast_tx_sync?tx=_
|
||||
/commit?height=_
|
||||
/dial_seeds?seeds=_
|
||||
/dial_persistent_peers?persistent_peers=_
|
||||
/subscribe?event=_
|
||||
/tx?hash=_&prove=_
|
||||
/unsafe_start_cpu_profiler?filename=_
|
||||
|
@ -1,8 +1,7 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
)
|
||||
|
||||
@ -55,19 +54,31 @@ func NetInfo() (*ctypes.ResultNetInfo, error) {
|
||||
}
|
||||
|
||||
func UnsafeDialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) {
|
||||
|
||||
if len(seeds) == 0 {
|
||||
return &ctypes.ResultDialSeeds{}, fmt.Errorf("No seeds provided")
|
||||
return &ctypes.ResultDialSeeds{}, errors.New("No seeds provided")
|
||||
}
|
||||
// starts go routines to dial each seed after random delays
|
||||
// starts go routines to dial each peer after random delays
|
||||
logger.Info("DialSeeds", "addrBook", addrBook, "seeds", seeds)
|
||||
err := p2pSwitch.DialSeeds(addrBook, seeds)
|
||||
err := p2pSwitch.DialPeersAsync(addrBook, seeds, false)
|
||||
if err != nil {
|
||||
return &ctypes.ResultDialSeeds{}, err
|
||||
}
|
||||
return &ctypes.ResultDialSeeds{"Dialing seeds in progress. See /net_info for details"}, nil
|
||||
}
|
||||
|
||||
func UnsafeDialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, error) {
|
||||
if len(peers) == 0 {
|
||||
return &ctypes.ResultDialPeers{}, errors.New("No peers provided")
|
||||
}
|
||||
// starts go routines to dial each peer after random delays
|
||||
logger.Info("DialPeers", "addrBook", addrBook, "peers", peers, "persistent", persistent)
|
||||
err := p2pSwitch.DialPeersAsync(addrBook, peers, persistent)
|
||||
if err != nil {
|
||||
return &ctypes.ResultDialPeers{}, err
|
||||
}
|
||||
return &ctypes.ResultDialPeers{"Dialing peers in progress. See /net_info for details"}, nil
|
||||
}
|
||||
|
||||
// Get genesis file.
|
||||
//
|
||||
// ```shell
|
||||
|
@ -32,7 +32,7 @@ type P2P interface {
|
||||
NumPeers() (outbound, inbound, dialig int)
|
||||
NodeInfo() *p2p.NodeInfo
|
||||
IsListening() bool
|
||||
DialSeeds(*p2p.AddrBook, []string) error
|
||||
DialPeersAsync(*p2p.AddrBook, []string, bool) error
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
|
@ -39,6 +39,7 @@ var Routes = map[string]*rpc.RPCFunc{
|
||||
func AddUnsafeRoutes() {
|
||||
// control API
|
||||
Routes["dial_seeds"] = rpc.NewRPCFunc(UnsafeDialSeeds, "seeds")
|
||||
Routes["dial_peers"] = rpc.NewRPCFunc(UnsafeDialPeers, "peers,persistent")
|
||||
Routes["unsafe_flush_mempool"] = rpc.NewRPCFunc(UnsafeFlushMempool, "")
|
||||
|
||||
// profiler API
|
||||
|
@ -86,6 +86,10 @@ type ResultDialSeeds struct {
|
||||
Log string `json:"log"`
|
||||
}
|
||||
|
||||
type ResultDialPeers struct {
|
||||
Log string `json:"log"`
|
||||
}
|
||||
|
||||
type Peer struct {
|
||||
p2p.NodeInfo `json:"node_info"`
|
||||
IsOutbound bool `json:"is_outbound"`
|
||||
|
@ -3,7 +3,7 @@ set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
debora run -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; killall tendermint; killall logjack"
|
||||
debora run -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; tendermint unsafe_reset_priv_validator; rm -rf ~/.tendermint/data; rm ~/.tendermint/genesis.json; rm ~/.tendermint/logs/*"
|
||||
debora run -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; tendermint unsafe_reset_priv_validator; rm -rf ~/.tendermint/data; rm ~/.tendermint/config/genesis.json; rm ~/.tendermint/logs/*"
|
||||
debora run -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; git pull origin develop; make"
|
||||
debora run -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; mkdir -p ~/.tendermint/logs"
|
||||
debora run --bg --label tendermint -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; tendermint node 2>&1 | stdinwriter -outpath ~/.tendermint/logs/tendermint.log"
|
||||
|
@ -18,7 +18,8 @@ XC_ARCH=${XC_ARCH:-"386 amd64 arm"}
|
||||
XC_OS=${XC_OS:-"solaris darwin freebsd linux windows"}
|
||||
|
||||
# Make sure build tools are available.
|
||||
make tools
|
||||
# TODO: Tools should be "vendored" too.
|
||||
make get_tools
|
||||
|
||||
# Get VENDORED dependencies
|
||||
make get_vendor_deps
|
||||
|
@ -38,7 +38,7 @@ for i in $(seq 1 4); do
|
||||
--name local_testnet_$i \
|
||||
--entrypoint tendermint \
|
||||
-e TMHOME=/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$i/core \
|
||||
tendermint_tester node --p2p.seeds 172.57.0.101:46656,172.57.0.102:46656,172.57.0.103:46656,172.57.0.104:46656 --proxy_app=dummy
|
||||
tendermint_tester node --p2p.persistent_peers 172.57.0.101:46656,172.57.0.102:46656,172.57.0.103:46656,172.57.0.104:46656 --proxy_app=dummy
|
||||
done
|
||||
```
|
||||
|
||||
|
@ -23,11 +23,11 @@ docker rm -vf local_testnet_$ID
|
||||
set -e
|
||||
|
||||
# restart peer - should have an empty blockchain
|
||||
SEEDS="$(test/p2p/ip.sh 1):46656"
|
||||
PERSISTENT_PEERS="$(test/p2p/ip.sh 1):46656"
|
||||
for j in `seq 2 $N`; do
|
||||
SEEDS="$SEEDS,$(test/p2p/ip.sh $j):46656"
|
||||
PERSISTENT_PEERS="$PERSISTENT_PEERS,$(test/p2p/ip.sh $j):46656"
|
||||
done
|
||||
bash test/p2p/peer.sh $DOCKER_IMAGE $NETWORK_NAME $ID $PROXY_APP "--p2p.seeds $SEEDS --p2p.pex --rpc.unsafe"
|
||||
bash test/p2p/peer.sh $DOCKER_IMAGE $NETWORK_NAME $ID $PROXY_APP "--p2p.persistent_peers $PERSISTENT_PEERS --p2p.pex --rpc.unsafe"
|
||||
|
||||
# wait for peer to sync and check the app hash
|
||||
bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME fs_$ID "test/p2p/fast_sync/check_peer.sh $ID"
|
||||
|
@ -7,10 +7,10 @@ N=$3
|
||||
APP_PROXY=$4
|
||||
|
||||
set +u
|
||||
SEEDS=$5
|
||||
if [[ "$SEEDS" != "" ]]; then
|
||||
echo "Seeds: $SEEDS"
|
||||
SEEDS="--p2p.seeds $SEEDS"
|
||||
PERSISTENT_PEERS=$5
|
||||
if [[ "$PERSISTENT_PEERS" != "" ]]; then
|
||||
echo "PersistentPeers: $PERSISTENT_PEERS"
|
||||
PERSISTENT_PEERS="--p2p.persistent_peers $PERSISTENT_PEERS"
|
||||
fi
|
||||
set -u
|
||||
|
||||
@ -20,5 +20,5 @@ cd "$GOPATH/src/github.com/tendermint/tendermint"
|
||||
docker network create --driver bridge --subnet 172.57.0.0/16 "$NETWORK_NAME"
|
||||
|
||||
for i in $(seq 1 "$N"); do
|
||||
bash test/p2p/peer.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$i" "$APP_PROXY" "$SEEDS --p2p.pex --rpc.unsafe"
|
||||
bash test/p2p/peer.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$i" "$APP_PROXY" "$PERSISTENT_PEERS --p2p.pex --rpc.unsafe"
|
||||
done
|
||||
|
12
test/p2p/persistent_peers.sh
Normal file
12
test/p2p/persistent_peers.sh
Normal file
@ -0,0 +1,12 @@
|
||||
#! /bin/bash
|
||||
set -eu
|
||||
|
||||
N=$1
|
||||
|
||||
cd "$GOPATH/src/github.com/tendermint/tendermint"
|
||||
|
||||
persistent_peers="$(test/p2p/ip.sh 1):46656"
|
||||
for i in $(seq 2 $N); do
|
||||
persistent_peers="$persistent_peers,$(test/p2p/ip.sh $i):46656"
|
||||
done
|
||||
echo "$persistent_peers"
|
@ -19,13 +19,13 @@ for i in `seq 1 $N`; do
|
||||
done
|
||||
|
||||
set -e
|
||||
# seeds need quotes
|
||||
seeds="\"$(test/p2p/ip.sh 1):46656\""
|
||||
# persistent_peers need quotes
|
||||
persistent_peers="\"$(test/p2p/ip.sh 1):46656\""
|
||||
for i in `seq 2 $N`; do
|
||||
seeds="$seeds,\"$(test/p2p/ip.sh $i):46656\""
|
||||
persistent_peers="$persistent_peers,\"$(test/p2p/ip.sh $i):46656\""
|
||||
done
|
||||
echo $seeds
|
||||
echo $persistent_peers
|
||||
|
||||
echo $seeds
|
||||
echo $persistent_peers
|
||||
IP=$(test/p2p/ip.sh 1)
|
||||
curl --data-urlencode "seeds=[$seeds]" "$IP:46657/dial_seeds"
|
||||
curl --data-urlencode "persistent_peers=[$persistent_peers]" "$IP:46657/dial_persistent_peers"
|
@ -6,10 +6,10 @@ NETWORK_NAME=$2
|
||||
N=$3
|
||||
PROXY_APP=$4
|
||||
|
||||
cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
cd "$GOPATH/src/github.com/tendermint/tendermint"
|
||||
|
||||
echo "Test reconnecting from the address book"
|
||||
bash test/p2p/pex/test_addrbook.sh $DOCKER_IMAGE $NETWORK_NAME $N $PROXY_APP
|
||||
bash test/p2p/pex/test_addrbook.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$N" "$PROXY_APP"
|
||||
|
||||
echo "Test connecting via /dial_seeds"
|
||||
bash test/p2p/pex/test_dial_seeds.sh $DOCKER_IMAGE $NETWORK_NAME $N $PROXY_APP
|
||||
echo "Test connecting via /dial_persistent_peers"
|
||||
bash test/p2p/pex/test_dial_persistent_peers.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$N" "$PROXY_APP"
|
||||
|
@ -9,7 +9,7 @@ PROXY_APP=$4
|
||||
ID=1
|
||||
|
||||
echo "----------------------------------------------------------------------"
|
||||
echo "Testing pex creates the addrbook and uses it if seeds are not provided"
|
||||
echo "Testing pex creates the addrbook and uses it if persistent_peers are not provided"
|
||||
echo "(assuming peers are started with pex enabled)"
|
||||
|
||||
CLIENT_NAME="pex_addrbook_$ID"
|
||||
@ -22,7 +22,7 @@ set +e #CIRCLE
|
||||
docker rm -vf "local_testnet_$ID"
|
||||
set -e
|
||||
|
||||
# NOTE that we do not provide seeds
|
||||
# NOTE that we do not provide persistent_peers
|
||||
bash test/p2p/peer.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$ID" "$PROXY_APP" "--p2p.pex --rpc.unsafe"
|
||||
docker cp "/tmp/addrbook.json" "local_testnet_$ID:/go/src/github.com/tendermint/tendermint/test/p2p/data/mach1/core/addrbook.json"
|
||||
echo "with the following addrbook:"
|
||||
@ -35,7 +35,7 @@ echo ""
|
||||
bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$CLIENT_NAME" "test/p2p/pex/check_peer.sh $ID $N"
|
||||
|
||||
echo "----------------------------------------------------------------------"
|
||||
echo "Testing other peers connect to us if we have neither seeds nor the addrbook"
|
||||
echo "Testing other peers connect to us if we have neither persistent_peers nor the addrbook"
|
||||
echo "(assuming peers are started with pex enabled)"
|
||||
|
||||
CLIENT_NAME="pex_no_addrbook_$ID"
|
||||
@ -46,7 +46,7 @@ set +e #CIRCLE
|
||||
docker rm -vf "local_testnet_$ID"
|
||||
set -e
|
||||
|
||||
# NOTE that we do not provide seeds
|
||||
# NOTE that we do not provide persistent_peers
|
||||
bash test/p2p/peer.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$ID" "$PROXY_APP" "--p2p.pex --rpc.unsafe"
|
||||
|
||||
# if the client runs forever, it means other peers have removed us from their books (which should not happen)
|
||||
|
@ -11,7 +11,7 @@ ID=1
|
||||
cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
echo "----------------------------------------------------------------------"
|
||||
echo "Testing full network connection using one /dial_seeds call"
|
||||
echo "Testing full network connection using one /dial_persistent_peers call"
|
||||
echo "(assuming peers are started with pex enabled)"
|
||||
|
||||
# stop the existing testnet and remove local network
|
||||
@ -21,16 +21,16 @@ set -e
|
||||
|
||||
# start the testnet on a local network
|
||||
# NOTE we re-use the same network for all tests
|
||||
SEEDS=""
|
||||
bash test/p2p/local_testnet_start.sh $DOCKER_IMAGE $NETWORK_NAME $N $PROXY_APP $SEEDS
|
||||
PERSISTENT_PEERS=""
|
||||
bash test/p2p/local_testnet_start.sh $DOCKER_IMAGE $NETWORK_NAME $N $PROXY_APP $PERSISTENT_PEERS
|
||||
|
||||
|
||||
|
||||
# dial seeds from one node
|
||||
CLIENT_NAME="dial_seeds"
|
||||
bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME $CLIENT_NAME "test/p2p/pex/dial_seeds.sh $N"
|
||||
# dial persistent_peers from one node
|
||||
CLIENT_NAME="dial_persistent_peers"
|
||||
bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME $CLIENT_NAME "test/p2p/pex/dial_persistent_peers.sh $N"
|
||||
|
||||
# test basic connectivity and consensus
|
||||
# start client container and check the num peers and height for all nodes
|
||||
CLIENT_NAME="dial_seeds_basic"
|
||||
CLIENT_NAME="dial_persistent_peers_basic"
|
||||
bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME $CLIENT_NAME "test/p2p/basic/test.sh $N"
|
@ -1,12 +0,0 @@
|
||||
#! /bin/bash
|
||||
set -eu
|
||||
|
||||
N=$1
|
||||
|
||||
cd "$GOPATH/src/github.com/tendermint/tendermint"
|
||||
|
||||
seeds="$(test/p2p/ip.sh 1):46656"
|
||||
for i in $(seq 2 $N); do
|
||||
seeds="$seeds,$(test/p2p/ip.sh $i):46656"
|
||||
done
|
||||
echo "$seeds"
|
@ -13,11 +13,11 @@ set +e
|
||||
bash test/p2p/local_testnet_stop.sh "$NETWORK_NAME" "$N"
|
||||
set -e
|
||||
|
||||
SEEDS=$(bash test/p2p/seeds.sh $N)
|
||||
PERSISTENT_PEERS=$(bash test/p2p/persistent_peers.sh $N)
|
||||
|
||||
# start the testnet on a local network
|
||||
# NOTE we re-use the same network for all tests
|
||||
bash test/p2p/local_testnet_start.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$N" "$PROXY_APP" "$SEEDS"
|
||||
bash test/p2p/local_testnet_start.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$N" "$PROXY_APP" "$PERSISTENT_PEERS"
|
||||
|
||||
# test basic connectivity and consensus
|
||||
# start client container and check the num peers and height for all nodes
|
||||
|
@ -17,10 +17,10 @@ import (
|
||||
|
||||
// TODO: type ?
|
||||
const (
|
||||
stepNone = 0 // Used to distinguish the initial state
|
||||
stepPropose = 1
|
||||
stepPrevote = 2
|
||||
stepPrecommit = 3
|
||||
stepNone int8 = 0 // Used to distinguish the initial state
|
||||
stepPropose int8 = 1
|
||||
stepPrevote int8 = 2
|
||||
stepPrecommit int8 = 3
|
||||
)
|
||||
|
||||
func voteToStep(vote *Vote) int8 {
|
||||
@ -199,12 +199,9 @@ func (privVal *PrivValidatorFS) Reset() {
|
||||
func (privVal *PrivValidatorFS) SignVote(chainID string, vote *Vote) error {
|
||||
privVal.mtx.Lock()
|
||||
defer privVal.mtx.Unlock()
|
||||
signature, err := privVal.signBytesHRS(vote.Height, vote.Round, voteToStep(vote),
|
||||
SignBytes(chainID, vote), checkVotesOnlyDifferByTimestamp)
|
||||
if err != nil {
|
||||
if err := privVal.signVote(chainID, vote); err != nil {
|
||||
return errors.New(cmn.Fmt("Error signing vote: %v", err))
|
||||
}
|
||||
vote.Signature = signature
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -213,12 +210,9 @@ func (privVal *PrivValidatorFS) SignVote(chainID string, vote *Vote) error {
|
||||
func (privVal *PrivValidatorFS) SignProposal(chainID string, proposal *Proposal) error {
|
||||
privVal.mtx.Lock()
|
||||
defer privVal.mtx.Unlock()
|
||||
signature, err := privVal.signBytesHRS(proposal.Height, proposal.Round, stepPropose,
|
||||
SignBytes(chainID, proposal), checkProposalsOnlyDifferByTimestamp)
|
||||
if err != nil {
|
||||
if err := privVal.signProposal(chainID, proposal); err != nil {
|
||||
return fmt.Errorf("Error signing proposal: %v", err)
|
||||
}
|
||||
proposal.Signature = signature
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -250,36 +244,82 @@ func (privVal *PrivValidatorFS) checkHRS(height int64, round int, step int8) (bo
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// signBytesHRS signs the given signBytes if the height/round/step (HRS) are
|
||||
// greater than the latest state. If the HRS are equal and the only thing changed is the timestamp,
|
||||
// it returns the privValidator.LastSignature. Else it returns an error.
|
||||
func (privVal *PrivValidatorFS) signBytesHRS(height int64, round int, step int8,
|
||||
signBytes []byte, checkFn checkOnlyDifferByTimestamp) (crypto.Signature, error) {
|
||||
sig := crypto.Signature{}
|
||||
// signVote checks if the vote is good to sign and sets the vote signature.
|
||||
// It may need to set the timestamp as well if the vote is otherwise the same as
|
||||
// a previously signed vote (ie. we crashed after signing but before the vote hit the WAL).
|
||||
func (privVal *PrivValidatorFS) signVote(chainID string, vote *Vote) error {
|
||||
height, round, step := vote.Height, vote.Round, voteToStep(vote)
|
||||
signBytes := SignBytes(chainID, vote)
|
||||
|
||||
sameHRS, err := privVal.checkHRS(height, round, step)
|
||||
if err != nil {
|
||||
return sig, err
|
||||
return err
|
||||
}
|
||||
|
||||
// We might crash before writing to the wal,
|
||||
// causing us to try to re-sign for the same HRS
|
||||
// causing us to try to re-sign for the same HRS.
|
||||
// If signbytes are the same, use the last signature.
|
||||
// If they only differ by timestamp, use last timestamp and signature
|
||||
// Otherwise, return error
|
||||
if sameHRS {
|
||||
// if they're the same or only differ by timestamp,
|
||||
// return the LastSignature. Otherwise, error
|
||||
if bytes.Equal(signBytes, privVal.LastSignBytes) ||
|
||||
checkFn(privVal.LastSignBytes, signBytes) {
|
||||
return privVal.LastSignature, nil
|
||||
if bytes.Equal(signBytes, privVal.LastSignBytes) {
|
||||
vote.Signature = privVal.LastSignature
|
||||
} else if timestamp, ok := checkVotesOnlyDifferByTimestamp(privVal.LastSignBytes, signBytes); ok {
|
||||
vote.Timestamp = timestamp
|
||||
vote.Signature = privVal.LastSignature
|
||||
} else {
|
||||
err = fmt.Errorf("Conflicting data")
|
||||
}
|
||||
return sig, fmt.Errorf("Conflicting data")
|
||||
return err
|
||||
}
|
||||
|
||||
sig, err = privVal.Sign(signBytes)
|
||||
// It passed the checks. Sign the vote
|
||||
sig, err := privVal.Sign(signBytes)
|
||||
if err != nil {
|
||||
return sig, err
|
||||
return err
|
||||
}
|
||||
privVal.saveSigned(height, round, step, signBytes, sig)
|
||||
return sig, nil
|
||||
vote.Signature = sig
|
||||
return nil
|
||||
}
|
||||
|
||||
// signProposal checks if the proposal is good to sign and sets the proposal signature.
|
||||
// It may need to set the timestamp as well if the proposal is otherwise the same as
|
||||
// a previously signed proposal ie. we crashed after signing but before the proposal hit the WAL).
|
||||
func (privVal *PrivValidatorFS) signProposal(chainID string, proposal *Proposal) error {
|
||||
height, round, step := proposal.Height, proposal.Round, stepPropose
|
||||
signBytes := SignBytes(chainID, proposal)
|
||||
|
||||
sameHRS, err := privVal.checkHRS(height, round, step)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We might crash before writing to the wal,
|
||||
// causing us to try to re-sign for the same HRS.
|
||||
// If signbytes are the same, use the last signature.
|
||||
// If they only differ by timestamp, use last timestamp and signature
|
||||
// Otherwise, return error
|
||||
if sameHRS {
|
||||
if bytes.Equal(signBytes, privVal.LastSignBytes) {
|
||||
proposal.Signature = privVal.LastSignature
|
||||
} else if timestamp, ok := checkProposalsOnlyDifferByTimestamp(privVal.LastSignBytes, signBytes); ok {
|
||||
proposal.Timestamp = timestamp
|
||||
proposal.Signature = privVal.LastSignature
|
||||
} else {
|
||||
err = fmt.Errorf("Conflicting data")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// It passed the checks. Sign the proposal
|
||||
sig, err := privVal.Sign(signBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
privVal.saveSigned(height, round, step, signBytes, sig)
|
||||
proposal.Signature = sig
|
||||
return nil
|
||||
}
|
||||
|
||||
// Persist height/round/step and signature
|
||||
@ -329,10 +369,9 @@ func (pvs PrivValidatorsByAddress) Swap(i, j int) {
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type checkOnlyDifferByTimestamp func([]byte, []byte) bool
|
||||
|
||||
// returns true if the only difference in the votes is their timestamp
|
||||
func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) bool {
|
||||
// returns the timestamp from the lastSignBytes.
|
||||
// returns true if the only difference in the votes is their timestamp.
|
||||
func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) {
|
||||
var lastVote, newVote CanonicalJSONOnceVote
|
||||
if err := json.Unmarshal(lastSignBytes, &lastVote); err != nil {
|
||||
panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into vote: %v", err))
|
||||
@ -341,6 +380,11 @@ func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) bool {
|
||||
panic(fmt.Sprintf("signBytes cannot be unmarshalled into vote: %v", err))
|
||||
}
|
||||
|
||||
lastTime, err := time.Parse(timeFormat, lastVote.Vote.Timestamp)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// set the times to the same value and check equality
|
||||
now := CanonicalTime(time.Now())
|
||||
lastVote.Vote.Timestamp = now
|
||||
@ -348,11 +392,12 @@ func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) bool {
|
||||
lastVoteBytes, _ := json.Marshal(lastVote)
|
||||
newVoteBytes, _ := json.Marshal(newVote)
|
||||
|
||||
return bytes.Equal(newVoteBytes, lastVoteBytes)
|
||||
return lastTime, bytes.Equal(newVoteBytes, lastVoteBytes)
|
||||
}
|
||||
|
||||
// returns the timestamp from the lastSignBytes.
|
||||
// returns true if the only difference in the proposals is their timestamp
|
||||
func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) bool {
|
||||
func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) {
|
||||
var lastProposal, newProposal CanonicalJSONOnceProposal
|
||||
if err := json.Unmarshal(lastSignBytes, &lastProposal); err != nil {
|
||||
panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into proposal: %v", err))
|
||||
@ -361,6 +406,11 @@ func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) boo
|
||||
panic(fmt.Sprintf("signBytes cannot be unmarshalled into proposal: %v", err))
|
||||
}
|
||||
|
||||
lastTime, err := time.Parse(timeFormat, lastProposal.Proposal.Timestamp)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// set the times to the same value and check equality
|
||||
now := CanonicalTime(time.Now())
|
||||
lastProposal.Proposal.Timestamp = now
|
||||
@ -368,5 +418,5 @@ func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) boo
|
||||
lastProposalBytes, _ := json.Marshal(lastProposal)
|
||||
newProposalBytes, _ := json.Marshal(newProposal)
|
||||
|
||||
return bytes.Equal(newProposalBytes, lastProposalBytes)
|
||||
return lastTime, bytes.Equal(newProposalBytes, lastProposalBytes)
|
||||
}
|
||||
|
@ -173,6 +173,58 @@ func TestSignProposal(t *testing.T) {
|
||||
assert.Equal(sig, proposal.Signature)
|
||||
}
|
||||
|
||||
func TestDifferByTimestamp(t *testing.T) {
|
||||
_, tempFilePath := cmn.Tempfile("priv_validator_")
|
||||
privVal := GenPrivValidatorFS(tempFilePath)
|
||||
|
||||
block1 := PartSetHeader{5, []byte{1, 2, 3}}
|
||||
height, round := int64(10), 1
|
||||
chainID := "mychainid"
|
||||
|
||||
// test proposal
|
||||
{
|
||||
proposal := newProposal(height, round, block1)
|
||||
err := privVal.SignProposal(chainID, proposal)
|
||||
assert.NoError(t, err, "expected no error signing proposal")
|
||||
signBytes := SignBytes(chainID, proposal)
|
||||
sig := proposal.Signature
|
||||
timeStamp := clipToMS(proposal.Timestamp)
|
||||
|
||||
// manipulate the timestamp. should get changed back
|
||||
proposal.Timestamp = proposal.Timestamp.Add(time.Millisecond)
|
||||
proposal.Signature = crypto.Signature{}
|
||||
err = privVal.SignProposal("mychainid", proposal)
|
||||
assert.NoError(t, err, "expected no error on signing same proposal")
|
||||
|
||||
assert.Equal(t, timeStamp, proposal.Timestamp)
|
||||
assert.Equal(t, signBytes, SignBytes(chainID, proposal))
|
||||
assert.Equal(t, sig, proposal.Signature)
|
||||
}
|
||||
|
||||
// test vote
|
||||
{
|
||||
voteType := VoteTypePrevote
|
||||
blockID := BlockID{[]byte{1, 2, 3}, PartSetHeader{}}
|
||||
vote := newVote(privVal.Address, 0, height, round, voteType, blockID)
|
||||
err := privVal.SignVote("mychainid", vote)
|
||||
assert.NoError(t, err, "expected no error signing vote")
|
||||
|
||||
signBytes := SignBytes(chainID, vote)
|
||||
sig := vote.Signature
|
||||
timeStamp := clipToMS(vote.Timestamp)
|
||||
|
||||
// manipulate the timestamp. should get changed back
|
||||
vote.Timestamp = vote.Timestamp.Add(time.Millisecond)
|
||||
vote.Signature = crypto.Signature{}
|
||||
err = privVal.SignVote("mychainid", vote)
|
||||
assert.NoError(t, err, "expected no error on signing same vote")
|
||||
|
||||
assert.Equal(t, timeStamp, vote.Timestamp)
|
||||
assert.Equal(t, signBytes, SignBytes(chainID, vote))
|
||||
assert.Equal(t, sig, vote.Signature)
|
||||
}
|
||||
}
|
||||
|
||||
func newVote(addr data.Bytes, idx int, height int64, round int, typ byte, blockID BlockID) *Vote {
|
||||
return &Vote{
|
||||
ValidatorAddress: addr,
|
||||
@ -190,5 +242,13 @@ func newProposal(height int64, round int, partsHeader PartSetHeader) *Proposal {
|
||||
Height: height,
|
||||
Round: round,
|
||||
BlockPartsHeader: partsHeader,
|
||||
Timestamp: time.Now().UTC(),
|
||||
}
|
||||
}
|
||||
|
||||
func clipToMS(t time.Time) time.Time {
|
||||
nano := t.UnixNano()
|
||||
million := int64(1000000)
|
||||
nano = (nano / million) * million
|
||||
return time.Unix(0, nano).UTC()
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ package types
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
@ -48,12 +49,12 @@ func NewValidatorSet(vals []*Validator) *ValidatorSet {
|
||||
}
|
||||
|
||||
// incrementAccum and update the proposer
|
||||
// TODO: mind the overflow when times and votingPower shares too large.
|
||||
func (valSet *ValidatorSet) IncrementAccum(times int) {
|
||||
// Add VotingPower * times to each validator and order into heap.
|
||||
validatorsHeap := cmn.NewHeap()
|
||||
for _, val := range valSet.Validators {
|
||||
val.Accum += val.VotingPower * int64(times) // TODO: mind overflow
|
||||
// check for overflow both multiplication and sum
|
||||
val.Accum = safeAddClip(val.Accum, safeMulClip(val.VotingPower, int64(times)))
|
||||
validatorsHeap.Push(val, accumComparable{val})
|
||||
}
|
||||
|
||||
@ -63,7 +64,9 @@ func (valSet *ValidatorSet) IncrementAccum(times int) {
|
||||
if i == times-1 {
|
||||
valSet.Proposer = mostest
|
||||
}
|
||||
mostest.Accum -= int64(valSet.TotalVotingPower())
|
||||
|
||||
// mind underflow
|
||||
mostest.Accum = safeSubClip(mostest.Accum, valSet.TotalVotingPower())
|
||||
validatorsHeap.Update(mostest, accumComparable{mostest})
|
||||
}
|
||||
}
|
||||
@ -117,7 +120,8 @@ func (valSet *ValidatorSet) Size() int {
|
||||
func (valSet *ValidatorSet) TotalVotingPower() int64 {
|
||||
if valSet.totalVotingPower == 0 {
|
||||
for _, val := range valSet.Validators {
|
||||
valSet.totalVotingPower += val.VotingPower
|
||||
// mind overflow
|
||||
valSet.totalVotingPower = safeAddClip(valSet.totalVotingPower, val.VotingPower)
|
||||
}
|
||||
}
|
||||
return valSet.totalVotingPower
|
||||
@ -425,3 +429,77 @@ func RandValidatorSet(numValidators int, votingPower int64) (*ValidatorSet, []*P
|
||||
sort.Sort(PrivValidatorsByAddress(privValidators))
|
||||
return valSet, privValidators
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Safe multiplication and addition/subtraction
|
||||
|
||||
func safeMul(a, b int64) (int64, bool) {
|
||||
if a == 0 || b == 0 {
|
||||
return 0, false
|
||||
}
|
||||
if a == 1 {
|
||||
return b, false
|
||||
}
|
||||
if b == 1 {
|
||||
return a, false
|
||||
}
|
||||
if a == math.MinInt64 || b == math.MinInt64 {
|
||||
return -1, true
|
||||
}
|
||||
c := a * b
|
||||
return c, c/b != a
|
||||
}
|
||||
|
||||
func safeAdd(a, b int64) (int64, bool) {
|
||||
if b > 0 && a > math.MaxInt64-b {
|
||||
return -1, true
|
||||
} else if b < 0 && a < math.MinInt64-b {
|
||||
return -1, true
|
||||
}
|
||||
return a + b, false
|
||||
}
|
||||
|
||||
func safeSub(a, b int64) (int64, bool) {
|
||||
if b > 0 && a < math.MinInt64+b {
|
||||
return -1, true
|
||||
} else if b < 0 && a > math.MaxInt64+b {
|
||||
return -1, true
|
||||
}
|
||||
return a - b, false
|
||||
}
|
||||
|
||||
func safeMulClip(a, b int64) int64 {
|
||||
c, overflow := safeMul(a, b)
|
||||
if overflow {
|
||||
if (a < 0 || b < 0) && !(a < 0 && b < 0) {
|
||||
return math.MinInt64
|
||||
} else {
|
||||
return math.MaxInt64
|
||||
}
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func safeAddClip(a, b int64) int64 {
|
||||
c, overflow := safeAdd(a, b)
|
||||
if overflow {
|
||||
if b < 0 {
|
||||
return math.MinInt64
|
||||
} else {
|
||||
return math.MaxInt64
|
||||
}
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func safeSubClip(a, b int64) int64 {
|
||||
c, overflow := safeSub(a, b)
|
||||
if overflow {
|
||||
if b > 0 {
|
||||
return math.MinInt64
|
||||
} else {
|
||||
return math.MaxInt64
|
||||
}
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
@ -2,11 +2,14 @@ package types
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
"github.com/tendermint/go-crypto"
|
||||
"github.com/tendermint/go-wire"
|
||||
"github.com/stretchr/testify/assert"
|
||||
crypto "github.com/tendermint/go-crypto"
|
||||
wire "github.com/tendermint/go-wire"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
|
||||
@ -190,6 +193,85 @@ func TestProposerSelection3(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatorSetTotalVotingPowerOverflows(t *testing.T) {
|
||||
vset := NewValidatorSet([]*Validator{
|
||||
{Address: []byte("a"), VotingPower: math.MaxInt64, Accum: 0},
|
||||
{Address: []byte("b"), VotingPower: math.MaxInt64, Accum: 0},
|
||||
{Address: []byte("c"), VotingPower: math.MaxInt64, Accum: 0},
|
||||
})
|
||||
|
||||
assert.EqualValues(t, math.MaxInt64, vset.TotalVotingPower())
|
||||
}
|
||||
|
||||
func TestValidatorSetIncrementAccumOverflows(t *testing.T) {
|
||||
// NewValidatorSet calls IncrementAccum(1)
|
||||
vset := NewValidatorSet([]*Validator{
|
||||
// too much voting power
|
||||
0: {Address: []byte("a"), VotingPower: math.MaxInt64, Accum: 0},
|
||||
// too big accum
|
||||
1: {Address: []byte("b"), VotingPower: 10, Accum: math.MaxInt64},
|
||||
// almost too big accum
|
||||
2: {Address: []byte("c"), VotingPower: 10, Accum: math.MaxInt64 - 5},
|
||||
})
|
||||
|
||||
assert.Equal(t, int64(0), vset.Validators[0].Accum, "0") // because we decrement val with most voting power
|
||||
assert.EqualValues(t, math.MaxInt64, vset.Validators[1].Accum, "1")
|
||||
assert.EqualValues(t, math.MaxInt64, vset.Validators[2].Accum, "2")
|
||||
}
|
||||
|
||||
func TestValidatorSetIncrementAccumUnderflows(t *testing.T) {
|
||||
// NewValidatorSet calls IncrementAccum(1)
|
||||
vset := NewValidatorSet([]*Validator{
|
||||
0: {Address: []byte("a"), VotingPower: math.MaxInt64, Accum: math.MinInt64},
|
||||
1: {Address: []byte("b"), VotingPower: 1, Accum: math.MinInt64},
|
||||
})
|
||||
|
||||
vset.IncrementAccum(5)
|
||||
|
||||
assert.EqualValues(t, math.MinInt64, vset.Validators[0].Accum, "0")
|
||||
assert.EqualValues(t, math.MinInt64, vset.Validators[1].Accum, "1")
|
||||
}
|
||||
|
||||
func TestSafeMul(t *testing.T) {
|
||||
f := func(a, b int64) bool {
|
||||
c, overflow := safeMul(a, b)
|
||||
return overflow || (!overflow && c == a*b)
|
||||
}
|
||||
if err := quick.Check(f, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSafeAdd(t *testing.T) {
|
||||
f := func(a, b int64) bool {
|
||||
c, overflow := safeAdd(a, b)
|
||||
return overflow || (!overflow && c == a+b)
|
||||
}
|
||||
if err := quick.Check(f, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSafeMulClip(t *testing.T) {
|
||||
assert.EqualValues(t, math.MaxInt64, safeMulClip(math.MinInt64, math.MinInt64))
|
||||
assert.EqualValues(t, math.MinInt64, safeMulClip(math.MaxInt64, math.MinInt64))
|
||||
assert.EqualValues(t, math.MinInt64, safeMulClip(math.MinInt64, math.MaxInt64))
|
||||
assert.EqualValues(t, math.MaxInt64, safeMulClip(math.MaxInt64, 2))
|
||||
}
|
||||
|
||||
func TestSafeAddClip(t *testing.T) {
|
||||
assert.EqualValues(t, math.MaxInt64, safeAddClip(math.MaxInt64, 10))
|
||||
assert.EqualValues(t, math.MaxInt64, safeAddClip(math.MaxInt64, math.MaxInt64))
|
||||
assert.EqualValues(t, math.MinInt64, safeAddClip(math.MinInt64, -10))
|
||||
}
|
||||
|
||||
func TestSafeSubClip(t *testing.T) {
|
||||
assert.EqualValues(t, math.MinInt64, safeSubClip(math.MinInt64, 10))
|
||||
assert.EqualValues(t, 0, safeSubClip(math.MinInt64, math.MinInt64))
|
||||
assert.EqualValues(t, math.MinInt64, safeSubClip(math.MinInt64, math.MaxInt64))
|
||||
assert.EqualValues(t, math.MaxInt64, safeSubClip(math.MaxInt64, -10))
|
||||
}
|
||||
|
||||
func BenchmarkValidatorSetCopy(b *testing.B) {
|
||||
b.StopTimer()
|
||||
vset := NewValidatorSet([]*Validator{})
|
||||
|
Reference in New Issue
Block a user