2018-06-20 17:35:30 -07:00
|
|
|
|
package config
|
|
|
|
|
|
|
|
|
|
import (
|
|
|
|
|
"fmt"
|
2019-09-04 19:58:43 +04:00
|
|
|
|
"net/http"
|
2018-06-20 17:35:30 -07:00
|
|
|
|
"os"
|
|
|
|
|
"path/filepath"
|
|
|
|
|
"time"
|
2018-10-01 16:38:35 +04:00
|
|
|
|
|
|
|
|
|
"github.com/pkg/errors"
|
2018-06-20 17:35:30 -07:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
// FuzzModeDrop is a mode in which we randomly drop reads/writes, connections or sleep
|
|
|
|
|
FuzzModeDrop = iota
|
|
|
|
|
// FuzzModeDelay is a mode in which we randomly sleep
|
|
|
|
|
FuzzModeDelay
|
2018-11-16 03:05:06 +04:00
|
|
|
|
|
|
|
|
|
// LogFormatPlain is a format for colored text
|
|
|
|
|
LogFormatPlain = "plain"
|
|
|
|
|
// LogFormatJSON is a format for json output
|
|
|
|
|
LogFormatJSON = "json"
|
2018-06-20 17:35:30 -07:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
// NOTE: Most of the structs & relevant comments + the
|
|
|
|
|
// default configuration options were used to manually
|
|
|
|
|
// generate the config.toml. Please reflect any changes
|
|
|
|
|
// made here in the defaultConfigTemplate constant in
|
|
|
|
|
// config/toml.go
|
2018-09-28 23:32:13 -04:00
|
|
|
|
// NOTE: libs/cli must know to look in the config dir!
|
2018-06-20 17:35:30 -07:00
|
|
|
|
var (
|
|
|
|
|
DefaultTendermintDir = ".tendermint"
|
|
|
|
|
defaultConfigDir = "config"
|
|
|
|
|
defaultDataDir = "data"
|
|
|
|
|
|
|
|
|
|
defaultConfigFileName = "config.toml"
|
|
|
|
|
defaultGenesisJSONName = "genesis.json"
|
|
|
|
|
|
2018-12-22 05:58:27 +08:00
|
|
|
|
defaultPrivValKeyName = "priv_validator_key.json"
|
|
|
|
|
defaultPrivValStateName = "priv_validator_state.json"
|
|
|
|
|
|
2018-06-20 17:35:30 -07:00
|
|
|
|
defaultNodeKeyName = "node_key.json"
|
|
|
|
|
defaultAddrBookName = "addrbook.json"
|
|
|
|
|
|
2018-12-22 05:58:27 +08:00
|
|
|
|
defaultConfigFilePath = filepath.Join(defaultConfigDir, defaultConfigFileName)
|
|
|
|
|
defaultGenesisJSONPath = filepath.Join(defaultConfigDir, defaultGenesisJSONName)
|
|
|
|
|
defaultPrivValKeyPath = filepath.Join(defaultConfigDir, defaultPrivValKeyName)
|
|
|
|
|
defaultPrivValStatePath = filepath.Join(defaultDataDir, defaultPrivValStateName)
|
|
|
|
|
|
|
|
|
|
defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName)
|
|
|
|
|
defaultAddrBookPath = filepath.Join(defaultConfigDir, defaultAddrBookName)
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
var (
|
|
|
|
|
oldPrivVal = "priv_validator.json"
|
|
|
|
|
oldPrivValPath = filepath.Join(defaultConfigDir, oldPrivVal)
|
2018-06-20 17:35:30 -07:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
// Config defines the top level configuration for a Tendermint node
|
|
|
|
|
type Config struct {
|
|
|
|
|
// Top level options use an anonymous struct
|
|
|
|
|
BaseConfig `mapstructure:",squash"`
|
|
|
|
|
|
|
|
|
|
// Options for services
|
|
|
|
|
RPC *RPCConfig `mapstructure:"rpc"`
|
|
|
|
|
P2P *P2PConfig `mapstructure:"p2p"`
|
|
|
|
|
Mempool *MempoolConfig `mapstructure:"mempool"`
|
blockchain: Reorg reactor (#3561)
* go routines in blockchain reactor
* Added reference to the go routine diagram
* Initial commit
* cleanup
* Undo testing_logger change, committed by mistake
* Fix the test loggers
* pulled some fsm code into pool.go
* added pool tests
* changes to the design
added block requests under peer
moved the request trigger in the reactor poolRoutine, triggered now by a ticker
in general moved everything required for making block requests smarter in the poolRoutine
added a simple map of heights to keep track of what will need to be requested next
added a few more tests
* send errors to FSM in a different channel than blocks
send errors (RemovePeer) from switch on a different channel than the
one receiving blocks
renamed channels
added more pool tests
* more pool tests
* lint errors
* more tests
* more tests
* switch fast sync to new implementation
* fixed data race in tests
* cleanup
* finished fsm tests
* address golangci comments :)
* address golangci comments :)
* Added timeout on next block needed to advance
* updating docs and cleanup
* fix issue in test from previous cleanup
* cleanup
* Added termination scenarios, tests and more cleanup
* small fixes to adr, comments and cleanup
* Fix bug in sendRequest()
If we tried to send a request to a peer not present in the switch, a
missing continue statement caused the request to be blackholed in a peer
that was removed and never retried.
While this bug was manifesting, the reactor kept asking for other
blocks that would be stored and never consumed. Added the number of
unconsumed blocks in the math for requesting blocks ahead of current
processing height so eventually there will be no more blocks requested
until the already received ones are consumed.
* remove bpPeer's didTimeout field
* Use distinct err codes for peer timeout and FSM timeouts
* Don't allow peers to update with lower height
* review comments from Ethan and Zarko
* some cleanup, renaming, comments
* Move block execution in separate goroutine
* Remove pool's numPending
* review comments
* fix lint, remove old blockchain reactor and duplicates in fsm tests
* small reorg around peer after review comments
* add the reactor spec
* verify block only once
* review comments
* change to int for max number of pending requests
* cleanup and godoc
* Add configuration flag fast sync version
* golangci fixes
* fix config template
* move both reactor versions under blockchain
* cleanup, golint, renaming stuff
* updated documentation, fixed more golint warnings
* integrate with behavior package
* sync with master
* gofmt
* add changelog_pending entry
* move to improvments
* suggestion to changelog entry
2019-07-23 10:58:52 +02:00
|
|
|
|
FastSync *FastSyncConfig `mapstructure:"fastsync"`
|
2018-06-20 17:35:30 -07:00
|
|
|
|
Consensus *ConsensusConfig `mapstructure:"consensus"`
|
|
|
|
|
TxIndex *TxIndexConfig `mapstructure:"tx_index"`
|
|
|
|
|
Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DefaultConfig returns a default configuration for a Tendermint node
|
|
|
|
|
func DefaultConfig() *Config {
|
|
|
|
|
return &Config{
|
|
|
|
|
BaseConfig: DefaultBaseConfig(),
|
|
|
|
|
RPC: DefaultRPCConfig(),
|
|
|
|
|
P2P: DefaultP2PConfig(),
|
|
|
|
|
Mempool: DefaultMempoolConfig(),
|
blockchain: Reorg reactor (#3561)
* go routines in blockchain reactor
* Added reference to the go routine diagram
* Initial commit
* cleanup
* Undo testing_logger change, committed by mistake
* Fix the test loggers
* pulled some fsm code into pool.go
* added pool tests
* changes to the design
added block requests under peer
moved the request trigger in the reactor poolRoutine, triggered now by a ticker
in general moved everything required for making block requests smarter in the poolRoutine
added a simple map of heights to keep track of what will need to be requested next
added a few more tests
* send errors to FSM in a different channel than blocks
send errors (RemovePeer) from switch on a different channel than the
one receiving blocks
renamed channels
added more pool tests
* more pool tests
* lint errors
* more tests
* more tests
* switch fast sync to new implementation
* fixed data race in tests
* cleanup
* finished fsm tests
* address golangci comments :)
* address golangci comments :)
* Added timeout on next block needed to advance
* updating docs and cleanup
* fix issue in test from previous cleanup
* cleanup
* Added termination scenarios, tests and more cleanup
* small fixes to adr, comments and cleanup
* Fix bug in sendRequest()
If we tried to send a request to a peer not present in the switch, a
missing continue statement caused the request to be blackholed in a peer
that was removed and never retried.
While this bug was manifesting, the reactor kept asking for other
blocks that would be stored and never consumed. Added the number of
unconsumed blocks in the math for requesting blocks ahead of current
processing height so eventually there will be no more blocks requested
until the already received ones are consumed.
* remove bpPeer's didTimeout field
* Use distinct err codes for peer timeout and FSM timeouts
* Don't allow peers to update with lower height
* review comments from Ethan and Zarko
* some cleanup, renaming, comments
* Move block execution in separate goroutine
* Remove pool's numPending
* review comments
* fix lint, remove old blockchain reactor and duplicates in fsm tests
* small reorg around peer after review comments
* add the reactor spec
* verify block only once
* review comments
* change to int for max number of pending requests
* cleanup and godoc
* Add configuration flag fast sync version
* golangci fixes
* fix config template
* move both reactor versions under blockchain
* cleanup, golint, renaming stuff
* updated documentation, fixed more golint warnings
* integrate with behavior package
* sync with master
* gofmt
* add changelog_pending entry
* move to improvments
* suggestion to changelog entry
2019-07-23 10:58:52 +02:00
|
|
|
|
FastSync: DefaultFastSyncConfig(),
|
2018-06-20 17:35:30 -07:00
|
|
|
|
Consensus: DefaultConsensusConfig(),
|
|
|
|
|
TxIndex: DefaultTxIndexConfig(),
|
|
|
|
|
Instrumentation: DefaultInstrumentationConfig(),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TestConfig returns a configuration that can be used for testing
|
|
|
|
|
func TestConfig() *Config {
|
|
|
|
|
return &Config{
|
|
|
|
|
BaseConfig: TestBaseConfig(),
|
|
|
|
|
RPC: TestRPCConfig(),
|
|
|
|
|
P2P: TestP2PConfig(),
|
|
|
|
|
Mempool: TestMempoolConfig(),
|
blockchain: Reorg reactor (#3561)
* go routines in blockchain reactor
* Added reference to the go routine diagram
* Initial commit
* cleanup
* Undo testing_logger change, committed by mistake
* Fix the test loggers
* pulled some fsm code into pool.go
* added pool tests
* changes to the design
added block requests under peer
moved the request trigger in the reactor poolRoutine, triggered now by a ticker
in general moved everything required for making block requests smarter in the poolRoutine
added a simple map of heights to keep track of what will need to be requested next
added a few more tests
* send errors to FSM in a different channel than blocks
send errors (RemovePeer) from switch on a different channel than the
one receiving blocks
renamed channels
added more pool tests
* more pool tests
* lint errors
* more tests
* more tests
* switch fast sync to new implementation
* fixed data race in tests
* cleanup
* finished fsm tests
* address golangci comments :)
* address golangci comments :)
* Added timeout on next block needed to advance
* updating docs and cleanup
* fix issue in test from previous cleanup
* cleanup
* Added termination scenarios, tests and more cleanup
* small fixes to adr, comments and cleanup
* Fix bug in sendRequest()
If we tried to send a request to a peer not present in the switch, a
missing continue statement caused the request to be blackholed in a peer
that was removed and never retried.
While this bug was manifesting, the reactor kept asking for other
blocks that would be stored and never consumed. Added the number of
unconsumed blocks in the math for requesting blocks ahead of current
processing height so eventually there will be no more blocks requested
until the already received ones are consumed.
* remove bpPeer's didTimeout field
* Use distinct err codes for peer timeout and FSM timeouts
* Don't allow peers to update with lower height
* review comments from Ethan and Zarko
* some cleanup, renaming, comments
* Move block execution in separate goroutine
* Remove pool's numPending
* review comments
* fix lint, remove old blockchain reactor and duplicates in fsm tests
* small reorg around peer after review comments
* add the reactor spec
* verify block only once
* review comments
* change to int for max number of pending requests
* cleanup and godoc
* Add configuration flag fast sync version
* golangci fixes
* fix config template
* move both reactor versions under blockchain
* cleanup, golint, renaming stuff
* updated documentation, fixed more golint warnings
* integrate with behavior package
* sync with master
* gofmt
* add changelog_pending entry
* move to improvments
* suggestion to changelog entry
2019-07-23 10:58:52 +02:00
|
|
|
|
FastSync: TestFastSyncConfig(),
|
2018-06-20 17:35:30 -07:00
|
|
|
|
Consensus: TestConsensusConfig(),
|
|
|
|
|
TxIndex: TestTxIndexConfig(),
|
|
|
|
|
Instrumentation: TestInstrumentationConfig(),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetRoot sets the RootDir for all Config structs
|
|
|
|
|
func (cfg *Config) SetRoot(root string) *Config {
|
|
|
|
|
cfg.BaseConfig.RootDir = root
|
|
|
|
|
cfg.RPC.RootDir = root
|
|
|
|
|
cfg.P2P.RootDir = root
|
|
|
|
|
cfg.Mempool.RootDir = root
|
|
|
|
|
cfg.Consensus.RootDir = root
|
|
|
|
|
return cfg
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-26 14:04:44 +04:00
|
|
|
|
// ValidateBasic performs basic validation (checking param bounds, etc.) and
|
|
|
|
|
// returns an error if any check fails.
|
|
|
|
|
func (cfg *Config) ValidateBasic() error {
|
2018-11-16 03:05:06 +04:00
|
|
|
|
if err := cfg.BaseConfig.ValidateBasic(); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2018-10-01 16:38:35 +04:00
|
|
|
|
if err := cfg.RPC.ValidateBasic(); err != nil {
|
|
|
|
|
return errors.Wrap(err, "Error in [rpc] section")
|
2018-09-26 14:04:44 +04:00
|
|
|
|
}
|
2018-10-01 16:38:35 +04:00
|
|
|
|
if err := cfg.P2P.ValidateBasic(); err != nil {
|
|
|
|
|
return errors.Wrap(err, "Error in [p2p] section")
|
2018-09-26 14:04:44 +04:00
|
|
|
|
}
|
2018-10-01 16:38:35 +04:00
|
|
|
|
if err := cfg.Mempool.ValidateBasic(); err != nil {
|
|
|
|
|
return errors.Wrap(err, "Error in [mempool] section")
|
2018-09-26 14:04:44 +04:00
|
|
|
|
}
|
blockchain: Reorg reactor (#3561)
* go routines in blockchain reactor
* Added reference to the go routine diagram
* Initial commit
* cleanup
* Undo testing_logger change, committed by mistake
* Fix the test loggers
* pulled some fsm code into pool.go
* added pool tests
* changes to the design
added block requests under peer
moved the request trigger in the reactor poolRoutine, triggered now by a ticker
in general moved everything required for making block requests smarter in the poolRoutine
added a simple map of heights to keep track of what will need to be requested next
added a few more tests
* send errors to FSM in a different channel than blocks
send errors (RemovePeer) from switch on a different channel than the
one receiving blocks
renamed channels
added more pool tests
* more pool tests
* lint errors
* more tests
* more tests
* switch fast sync to new implementation
* fixed data race in tests
* cleanup
* finished fsm tests
* address golangci comments :)
* address golangci comments :)
* Added timeout on next block needed to advance
* updating docs and cleanup
* fix issue in test from previous cleanup
* cleanup
* Added termination scenarios, tests and more cleanup
* small fixes to adr, comments and cleanup
* Fix bug in sendRequest()
If we tried to send a request to a peer not present in the switch, a
missing continue statement caused the request to be blackholed in a peer
that was removed and never retried.
While this bug was manifesting, the reactor kept asking for other
blocks that would be stored and never consumed. Added the number of
unconsumed blocks in the math for requesting blocks ahead of current
processing height so eventually there will be no more blocks requested
until the already received ones are consumed.
* remove bpPeer's didTimeout field
* Use distinct err codes for peer timeout and FSM timeouts
* Don't allow peers to update with lower height
* review comments from Ethan and Zarko
* some cleanup, renaming, comments
* Move block execution in separate goroutine
* Remove pool's numPending
* review comments
* fix lint, remove old blockchain reactor and duplicates in fsm tests
* small reorg around peer after review comments
* add the reactor spec
* verify block only once
* review comments
* change to int for max number of pending requests
* cleanup and godoc
* Add configuration flag fast sync version
* golangci fixes
* fix config template
* move both reactor versions under blockchain
* cleanup, golint, renaming stuff
* updated documentation, fixed more golint warnings
* integrate with behavior package
* sync with master
* gofmt
* add changelog_pending entry
* move to improvments
* suggestion to changelog entry
2019-07-23 10:58:52 +02:00
|
|
|
|
if err := cfg.FastSync.ValidateBasic(); err != nil {
|
|
|
|
|
return errors.Wrap(err, "Error in [fastsync] section")
|
|
|
|
|
}
|
2018-10-01 16:38:35 +04:00
|
|
|
|
if err := cfg.Consensus.ValidateBasic(); err != nil {
|
|
|
|
|
return errors.Wrap(err, "Error in [consensus] section")
|
2018-09-26 14:04:44 +04:00
|
|
|
|
}
|
2018-10-01 16:38:35 +04:00
|
|
|
|
return errors.Wrap(
|
|
|
|
|
cfg.Instrumentation.ValidateBasic(),
|
|
|
|
|
"Error in [instrumentation] section",
|
|
|
|
|
)
|
2018-09-26 14:04:44 +04:00
|
|
|
|
}
|
|
|
|
|
|
2018-06-20 17:35:30 -07:00
|
|
|
|
//-----------------------------------------------------------------------------
|
|
|
|
|
// BaseConfig
|
|
|
|
|
|
|
|
|
|
// BaseConfig defines the base configuration for a Tendermint node
|
|
|
|
|
type BaseConfig struct {
|
|
|
|
|
// chainID is unexposed and immutable but here for convenience
|
|
|
|
|
chainID string
|
|
|
|
|
|
|
|
|
|
// The root directory for all data.
|
|
|
|
|
// This should be set in viper so it can unmarshal into this struct
|
|
|
|
|
RootDir string `mapstructure:"home"`
|
|
|
|
|
|
2018-08-01 16:20:59 +04:00
|
|
|
|
// TCP or UNIX socket address of the ABCI application,
|
|
|
|
|
// or the name of an ABCI application compiled in with the Tendermint binary
|
|
|
|
|
ProxyApp string `mapstructure:"proxy_app"`
|
|
|
|
|
|
|
|
|
|
// A custom human readable name for this node
|
|
|
|
|
Moniker string `mapstructure:"moniker"`
|
|
|
|
|
|
|
|
|
|
// If this node is many blocks behind the tip of the chain, FastSync
|
|
|
|
|
// allows them to catchup quickly by downloading blocks in parallel
|
|
|
|
|
// and verifying their commits
|
blockchain: Reorg reactor (#3561)
* go routines in blockchain reactor
* Added reference to the go routine diagram
* Initial commit
* cleanup
* Undo testing_logger change, committed by mistake
* Fix the test loggers
* pulled some fsm code into pool.go
* added pool tests
* changes to the design
added block requests under peer
moved the request trigger in the reactor poolRoutine, triggered now by a ticker
in general moved everything required for making block requests smarter in the poolRoutine
added a simple map of heights to keep track of what will need to be requested next
added a few more tests
* send errors to FSM in a different channel than blocks
send errors (RemovePeer) from switch on a different channel than the
one receiving blocks
renamed channels
added more pool tests
* more pool tests
* lint errors
* more tests
* more tests
* switch fast sync to new implementation
* fixed data race in tests
* cleanup
* finished fsm tests
* address golangci comments :)
* address golangci comments :)
* Added timeout on next block needed to advance
* updating docs and cleanup
* fix issue in test from previous cleanup
* cleanup
* Added termination scenarios, tests and more cleanup
* small fixes to adr, comments and cleanup
* Fix bug in sendRequest()
If we tried to send a request to a peer not present in the switch, a
missing continue statement caused the request to be blackholed in a peer
that was removed and never retried.
While this bug was manifesting, the reactor kept asking for other
blocks that would be stored and never consumed. Added the number of
unconsumed blocks in the math for requesting blocks ahead of current
processing height so eventually there will be no more blocks requested
until the already received ones are consumed.
* remove bpPeer's didTimeout field
* Use distinct err codes for peer timeout and FSM timeouts
* Don't allow peers to update with lower height
* review comments from Ethan and Zarko
* some cleanup, renaming, comments
* Move block execution in separate goroutine
* Remove pool's numPending
* review comments
* fix lint, remove old blockchain reactor and duplicates in fsm tests
* small reorg around peer after review comments
* add the reactor spec
* verify block only once
* review comments
* change to int for max number of pending requests
* cleanup and godoc
* Add configuration flag fast sync version
* golangci fixes
* fix config template
* move both reactor versions under blockchain
* cleanup, golint, renaming stuff
* updated documentation, fixed more golint warnings
* integrate with behavior package
* sync with master
* gofmt
* add changelog_pending entry
* move to improvments
* suggestion to changelog entry
2019-07-23 10:58:52 +02:00
|
|
|
|
FastSyncMode bool `mapstructure:"fast_sync"`
|
2018-08-01 16:20:59 +04:00
|
|
|
|
|
2019-05-07 12:33:47 +04:00
|
|
|
|
// Database backend: goleveldb | cleveldb | boltdb
|
|
|
|
|
// * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
|
|
|
|
|
// - pure go
|
|
|
|
|
// - stable
|
|
|
|
|
// * cleveldb (uses levigo wrapper)
|
|
|
|
|
// - fast
|
|
|
|
|
// - requires gcc
|
|
|
|
|
// - use cleveldb build tag (go build -tags cleveldb)
|
|
|
|
|
// * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt)
|
|
|
|
|
// - EXPERIMENTAL
|
|
|
|
|
// - may be faster is some use-cases (random reads - indexer)
|
|
|
|
|
// - use boltdb build tag (go build -tags boltdb)
|
2018-08-01 16:20:59 +04:00
|
|
|
|
DBBackend string `mapstructure:"db_backend"`
|
|
|
|
|
|
|
|
|
|
// Database directory
|
|
|
|
|
DBPath string `mapstructure:"db_dir"`
|
|
|
|
|
|
|
|
|
|
// Output level for logging
|
|
|
|
|
LogLevel string `mapstructure:"log_level"`
|
|
|
|
|
|
2018-11-16 03:05:06 +04:00
|
|
|
|
// Output format: 'plain' (colored text) or 'json'
|
|
|
|
|
LogFormat string `mapstructure:"log_format"`
|
|
|
|
|
|
2018-06-20 17:35:30 -07:00
|
|
|
|
// Path to the JSON file containing the initial validator set and other meta data
|
|
|
|
|
Genesis string `mapstructure:"genesis_file"`
|
|
|
|
|
|
|
|
|
|
// Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
2018-12-22 05:58:27 +08:00
|
|
|
|
PrivValidatorKey string `mapstructure:"priv_validator_key_file"`
|
|
|
|
|
|
|
|
|
|
// Path to the JSON file containing the last sign state of a validator
|
|
|
|
|
PrivValidatorState string `mapstructure:"priv_validator_state_file"`
|
2018-06-20 17:35:30 -07:00
|
|
|
|
|
|
|
|
|
// TCP or UNIX socket address for Tendermint to listen on for
|
|
|
|
|
// connections from an external PrivValidator process
|
|
|
|
|
PrivValidatorListenAddr string `mapstructure:"priv_validator_laddr"`
|
|
|
|
|
|
2018-08-01 16:20:59 +04:00
|
|
|
|
// A JSON file containing the private key to use for p2p authenticated encryption
|
|
|
|
|
NodeKey string `mapstructure:"node_key_file"`
|
2018-06-20 17:35:30 -07:00
|
|
|
|
|
|
|
|
|
// Mechanism to connect to the ABCI application: socket | grpc
|
|
|
|
|
ABCI string `mapstructure:"abci"`
|
|
|
|
|
|
|
|
|
|
// TCP or UNIX socket address for the profiling server to listen on
|
|
|
|
|
ProfListenAddress string `mapstructure:"prof_laddr"`
|
|
|
|
|
|
|
|
|
|
// If true, query the ABCI app on connecting to a new peer
|
|
|
|
|
// so the app can decide if we should keep the connection or not
|
|
|
|
|
FilterPeers bool `mapstructure:"filter_peers"` // false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DefaultBaseConfig returns a default base configuration for a Tendermint node
|
|
|
|
|
func DefaultBaseConfig() BaseConfig {
|
|
|
|
|
return BaseConfig{
|
2018-12-22 05:58:27 +08:00
|
|
|
|
Genesis: defaultGenesisJSONPath,
|
|
|
|
|
PrivValidatorKey: defaultPrivValKeyPath,
|
|
|
|
|
PrivValidatorState: defaultPrivValStatePath,
|
|
|
|
|
NodeKey: defaultNodeKeyPath,
|
|
|
|
|
Moniker: defaultMoniker,
|
|
|
|
|
ProxyApp: "tcp://127.0.0.1:26658",
|
|
|
|
|
ABCI: "socket",
|
|
|
|
|
LogLevel: DefaultPackageLogLevels(),
|
|
|
|
|
LogFormat: LogFormatPlain,
|
|
|
|
|
ProfListenAddress: "",
|
blockchain: Reorg reactor (#3561)
* go routines in blockchain reactor
* Added reference to the go routine diagram
* Initial commit
* cleanup
* Undo testing_logger change, committed by mistake
* Fix the test loggers
* pulled some fsm code into pool.go
* added pool tests
* changes to the design
added block requests under peer
moved the request trigger in the reactor poolRoutine, triggered now by a ticker
in general moved everything required for making block requests smarter in the poolRoutine
added a simple map of heights to keep track of what will need to be requested next
added a few more tests
* send errors to FSM in a different channel than blocks
send errors (RemovePeer) from switch on a different channel than the
one receiving blocks
renamed channels
added more pool tests
* more pool tests
* lint errors
* more tests
* more tests
* switch fast sync to new implementation
* fixed data race in tests
* cleanup
* finished fsm tests
* address golangci comments :)
* address golangci comments :)
* Added timeout on next block needed to advance
* updating docs and cleanup
* fix issue in test from previous cleanup
* cleanup
* Added termination scenarios, tests and more cleanup
* small fixes to adr, comments and cleanup
* Fix bug in sendRequest()
If we tried to send a request to a peer not present in the switch, a
missing continue statement caused the request to be blackholed in a peer
that was removed and never retried.
While this bug was manifesting, the reactor kept asking for other
blocks that would be stored and never consumed. Added the number of
unconsumed blocks in the math for requesting blocks ahead of current
processing height so eventually there will be no more blocks requested
until the already received ones are consumed.
* remove bpPeer's didTimeout field
* Use distinct err codes for peer timeout and FSM timeouts
* Don't allow peers to update with lower height
* review comments from Ethan and Zarko
* some cleanup, renaming, comments
* Move block execution in separate goroutine
* Remove pool's numPending
* review comments
* fix lint, remove old blockchain reactor and duplicates in fsm tests
* small reorg around peer after review comments
* add the reactor spec
* verify block only once
* review comments
* change to int for max number of pending requests
* cleanup and godoc
* Add configuration flag fast sync version
* golangci fixes
* fix config template
* move both reactor versions under blockchain
* cleanup, golint, renaming stuff
* updated documentation, fixed more golint warnings
* integrate with behavior package
* sync with master
* gofmt
* add changelog_pending entry
* move to improvments
* suggestion to changelog entry
2019-07-23 10:58:52 +02:00
|
|
|
|
FastSyncMode: true,
|
2018-12-22 05:58:27 +08:00
|
|
|
|
FilterPeers: false,
|
2019-05-07 12:33:47 +04:00
|
|
|
|
DBBackend: "goleveldb",
|
2018-12-22 05:58:27 +08:00
|
|
|
|
DBPath: "data",
|
2018-06-20 17:35:30 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TestBaseConfig returns a base configuration for testing a Tendermint node
|
|
|
|
|
func TestBaseConfig() BaseConfig {
|
|
|
|
|
cfg := DefaultBaseConfig()
|
|
|
|
|
cfg.chainID = "tendermint_test"
|
|
|
|
|
cfg.ProxyApp = "kvstore"
|
blockchain: Reorg reactor (#3561)
* go routines in blockchain reactor
* Added reference to the go routine diagram
* Initial commit
* cleanup
* Undo testing_logger change, committed by mistake
* Fix the test loggers
* pulled some fsm code into pool.go
* added pool tests
* changes to the design
added block requests under peer
moved the request trigger in the reactor poolRoutine, triggered now by a ticker
in general moved everything required for making block requests smarter in the poolRoutine
added a simple map of heights to keep track of what will need to be requested next
added a few more tests
* send errors to FSM in a different channel than blocks
send errors (RemovePeer) from switch on a different channel than the
one receiving blocks
renamed channels
added more pool tests
* more pool tests
* lint errors
* more tests
* more tests
* switch fast sync to new implementation
* fixed data race in tests
* cleanup
* finished fsm tests
* address golangci comments :)
* address golangci comments :)
* Added timeout on next block needed to advance
* updating docs and cleanup
* fix issue in test from previous cleanup
* cleanup
* Added termination scenarios, tests and more cleanup
* small fixes to adr, comments and cleanup
* Fix bug in sendRequest()
If we tried to send a request to a peer not present in the switch, a
missing continue statement caused the request to be blackholed in a peer
that was removed and never retried.
While this bug was manifesting, the reactor kept asking for other
blocks that would be stored and never consumed. Added the number of
unconsumed blocks in the math for requesting blocks ahead of current
processing height so eventually there will be no more blocks requested
until the already received ones are consumed.
* remove bpPeer's didTimeout field
* Use distinct err codes for peer timeout and FSM timeouts
* Don't allow peers to update with lower height
* review comments from Ethan and Zarko
* some cleanup, renaming, comments
* Move block execution in separate goroutine
* Remove pool's numPending
* review comments
* fix lint, remove old blockchain reactor and duplicates in fsm tests
* small reorg around peer after review comments
* add the reactor spec
* verify block only once
* review comments
* change to int for max number of pending requests
* cleanup and godoc
* Add configuration flag fast sync version
* golangci fixes
* fix config template
* move both reactor versions under blockchain
* cleanup, golint, renaming stuff
* updated documentation, fixed more golint warnings
* integrate with behavior package
* sync with master
* gofmt
* add changelog_pending entry
* move to improvments
* suggestion to changelog entry
2019-07-23 10:58:52 +02:00
|
|
|
|
cfg.FastSyncMode = false
|
2018-06-20 17:35:30 -07:00
|
|
|
|
cfg.DBBackend = "memdb"
|
|
|
|
|
return cfg
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (cfg BaseConfig) ChainID() string {
|
|
|
|
|
return cfg.chainID
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GenesisFile returns the full path to the genesis.json file
|
|
|
|
|
func (cfg BaseConfig) GenesisFile() string {
|
|
|
|
|
return rootify(cfg.Genesis, cfg.RootDir)
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-22 05:58:27 +08:00
|
|
|
|
// PrivValidatorKeyFile returns the full path to the priv_validator_key.json file
|
|
|
|
|
func (cfg BaseConfig) PrivValidatorKeyFile() string {
|
|
|
|
|
return rootify(cfg.PrivValidatorKey, cfg.RootDir)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// PrivValidatorFile returns the full path to the priv_validator_state.json file
|
|
|
|
|
func (cfg BaseConfig) PrivValidatorStateFile() string {
|
|
|
|
|
return rootify(cfg.PrivValidatorState, cfg.RootDir)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// OldPrivValidatorFile returns the full path of the priv_validator.json from pre v0.28.0.
|
|
|
|
|
// TODO: eventually remove.
|
|
|
|
|
func (cfg BaseConfig) OldPrivValidatorFile() string {
|
|
|
|
|
return rootify(oldPrivValPath, cfg.RootDir)
|
2018-06-20 17:35:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// NodeKeyFile returns the full path to the node_key.json file
|
|
|
|
|
func (cfg BaseConfig) NodeKeyFile() string {
|
|
|
|
|
return rootify(cfg.NodeKey, cfg.RootDir)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DBDir returns the full path to the database directory
|
|
|
|
|
func (cfg BaseConfig) DBDir() string {
|
|
|
|
|
return rootify(cfg.DBPath, cfg.RootDir)
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-16 03:05:06 +04:00
|
|
|
|
// ValidateBasic performs basic validation (checking param bounds, etc.) and
|
|
|
|
|
// returns an error if any check fails.
|
|
|
|
|
func (cfg BaseConfig) ValidateBasic() error {
|
|
|
|
|
switch cfg.LogFormat {
|
|
|
|
|
case LogFormatPlain, LogFormatJSON:
|
|
|
|
|
default:
|
|
|
|
|
return errors.New("unknown log_format (must be 'plain' or 'json')")
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-20 17:35:30 -07:00
|
|
|
|
// DefaultLogLevel returns a default log level of "error"
|
|
|
|
|
func DefaultLogLevel() string {
|
|
|
|
|
return "error"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DefaultPackageLogLevels returns a default log level setting so all packages
|
|
|
|
|
// log at "error", while the `state` and `main` packages log at "info"
|
|
|
|
|
func DefaultPackageLogLevels() string {
|
|
|
|
|
return fmt.Sprintf("main:info,state:info,*:%s", DefaultLogLevel())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//-----------------------------------------------------------------------------
|
|
|
|
|
// RPCConfig
|
|
|
|
|
|
|
|
|
|
// RPCConfig defines the configuration options for the Tendermint RPC server
|
|
|
|
|
type RPCConfig struct {
|
|
|
|
|
RootDir string `mapstructure:"home"`
|
|
|
|
|
|
|
|
|
|
// TCP or UNIX socket address for the RPC server to listen on
|
|
|
|
|
ListenAddress string `mapstructure:"laddr"`
|
|
|
|
|
|
2018-11-14 15:47:41 +03:00
|
|
|
|
// A list of origins a cross-domain request can be executed from.
|
|
|
|
|
// If the special '*' value is present in the list, all origins will be allowed.
|
|
|
|
|
// An origin may contain a wildcard (*) to replace 0 or more characters (i.e.: http://*.domain.com).
|
|
|
|
|
// Only one wildcard can be used per origin.
|
|
|
|
|
CORSAllowedOrigins []string `mapstructure:"cors_allowed_origins"`
|
|
|
|
|
|
|
|
|
|
// A list of methods the client is allowed to use with cross-domain requests.
|
|
|
|
|
CORSAllowedMethods []string `mapstructure:"cors_allowed_methods"`
|
|
|
|
|
|
|
|
|
|
// A list of non simple headers the client is allowed to use with cross-domain requests.
|
|
|
|
|
CORSAllowedHeaders []string `mapstructure:"cors_allowed_headers"`
|
|
|
|
|
|
2018-06-20 17:35:30 -07:00
|
|
|
|
// TCP or UNIX socket address for the gRPC server to listen on
|
|
|
|
|
// NOTE: This server only supports /broadcast_tx_commit
|
|
|
|
|
GRPCListenAddress string `mapstructure:"grpc_laddr"`
|
|
|
|
|
|
2018-06-20 18:38:42 +04:00
|
|
|
|
// Maximum number of simultaneous connections.
|
|
|
|
|
// Does not include RPC (HTTP&WebSocket) connections. See max_open_connections
|
2018-12-15 15:26:27 -05:00
|
|
|
|
// If you want to accept a larger number than the default, make sure
|
2018-06-20 18:38:42 +04:00
|
|
|
|
// you increase your OS limits.
|
|
|
|
|
// 0 - unlimited.
|
|
|
|
|
GRPCMaxOpenConnections int `mapstructure:"grpc_max_open_connections"`
|
|
|
|
|
|
2018-06-20 17:35:30 -07:00
|
|
|
|
// Activate unsafe RPC commands like /dial_persistent_peers and /unsafe_flush_mempool
|
|
|
|
|
Unsafe bool `mapstructure:"unsafe"`
|
2018-06-20 18:38:42 +04:00
|
|
|
|
|
|
|
|
|
// Maximum number of simultaneous connections (including WebSocket).
|
|
|
|
|
// Does not include gRPC connections. See grpc_max_open_connections
|
2018-12-15 15:26:27 -05:00
|
|
|
|
// If you want to accept a larger number than the default, make sure
|
2018-06-20 18:38:42 +04:00
|
|
|
|
// you increase your OS limits.
|
|
|
|
|
// 0 - unlimited.
|
2018-08-15 02:25:56 +04:00
|
|
|
|
// Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
|
|
|
|
|
// 1024 - 40 - 10 - 50 = 924 = ~900
|
2018-06-20 18:38:42 +04:00
|
|
|
|
MaxOpenConnections int `mapstructure:"max_open_connections"`
|
2019-03-11 22:45:58 +04:00
|
|
|
|
|
|
|
|
|
// Maximum number of unique clientIDs that can /subscribe
|
|
|
|
|
// If you're using /broadcast_tx_commit, set to the estimated maximum number
|
|
|
|
|
// of broadcast_tx_commit calls per block.
|
|
|
|
|
MaxSubscriptionClients int `mapstructure:"max_subscription_clients"`
|
|
|
|
|
|
|
|
|
|
// Maximum number of unique queries a given client can /subscribe to
|
|
|
|
|
// If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set
|
|
|
|
|
// to the estimated maximum number of broadcast_tx_commit calls per block.
|
|
|
|
|
MaxSubscriptionsPerClient int `mapstructure:"max_subscriptions_per_client"`
|
|
|
|
|
|
|
|
|
|
// How long to wait for a tx to be committed during /broadcast_tx_commit
|
2019-03-20 00:45:51 +01:00
|
|
|
|
// WARNING: Using a value larger than 10s will result in increasing the
|
|
|
|
|
// global HTTP write timeout, which applies to all connections and endpoints.
|
|
|
|
|
// See https://github.com/tendermint/tendermint/issues/3435
|
2019-03-11 22:45:58 +04:00
|
|
|
|
TimeoutBroadcastTxCommit time.Duration `mapstructure:"timeout_broadcast_tx_commit"`
|
2019-03-24 01:08:15 +08:00
|
|
|
|
|
2019-07-20 16:44:42 +09:00
|
|
|
|
// Maximum size of request body, in bytes
|
|
|
|
|
MaxBodyBytes int64 `mapstructure:"max_body_bytes"`
|
|
|
|
|
|
|
|
|
|
// Maximum size of request header, in bytes
|
|
|
|
|
MaxHeaderBytes int `mapstructure:"max_header_bytes"`
|
|
|
|
|
|
2019-07-01 12:48:54 +04:00
|
|
|
|
// The path to a file containing certificate that is used to create the HTTPS server.
|
|
|
|
|
// Migth be either absolute path or path related to tendermint's config directory.
|
2019-03-24 01:08:15 +08:00
|
|
|
|
//
|
|
|
|
|
// If the certificate is signed by a certificate authority,
|
|
|
|
|
// the certFile should be the concatenation of the server's certificate, any intermediates,
|
|
|
|
|
// and the CA's certificate.
|
|
|
|
|
//
|
|
|
|
|
// NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run.
|
|
|
|
|
TLSCertFile string `mapstructure:"tls_cert_file"`
|
|
|
|
|
|
2019-07-01 12:48:54 +04:00
|
|
|
|
// The path to a file containing matching private key that is used to create the HTTPS server.
|
|
|
|
|
// Migth be either absolute path or path related to tendermint's config directory.
|
2019-03-24 01:08:15 +08:00
|
|
|
|
//
|
|
|
|
|
// NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run.
|
|
|
|
|
TLSKeyFile string `mapstructure:"tls_key_file"`
|
2018-06-20 17:35:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DefaultRPCConfig returns a default configuration for the RPC server
|
|
|
|
|
func DefaultRPCConfig() *RPCConfig {
|
|
|
|
|
return &RPCConfig{
|
2019-06-24 10:32:12 -04:00
|
|
|
|
ListenAddress: "tcp://127.0.0.1:26657",
|
2018-11-14 15:47:41 +03:00
|
|
|
|
CORSAllowedOrigins: []string{},
|
2019-09-04 19:58:43 +04:00
|
|
|
|
CORSAllowedMethods: []string{http.MethodHead, http.MethodGet, http.MethodPost},
|
2018-11-14 15:47:41 +03:00
|
|
|
|
CORSAllowedHeaders: []string{"Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time"},
|
2018-06-20 18:38:42 +04:00
|
|
|
|
GRPCListenAddress: "",
|
2018-08-15 02:25:56 +04:00
|
|
|
|
GRPCMaxOpenConnections: 900,
|
2018-06-20 18:38:42 +04:00
|
|
|
|
|
2018-08-15 02:25:56 +04:00
|
|
|
|
Unsafe: false,
|
2018-06-22 22:48:20 +04:00
|
|
|
|
MaxOpenConnections: 900,
|
2019-03-11 22:45:58 +04:00
|
|
|
|
|
|
|
|
|
MaxSubscriptionClients: 100,
|
|
|
|
|
MaxSubscriptionsPerClient: 5,
|
|
|
|
|
TimeoutBroadcastTxCommit: 10 * time.Second,
|
2019-03-24 01:08:15 +08:00
|
|
|
|
|
2019-07-20 16:44:42 +09:00
|
|
|
|
MaxBodyBytes: int64(1000000), // 1MB
|
|
|
|
|
MaxHeaderBytes: 1 << 20, // same as the net/http default
|
|
|
|
|
|
2019-03-24 01:08:15 +08:00
|
|
|
|
TLSCertFile: "",
|
|
|
|
|
TLSKeyFile: "",
|
2018-06-20 17:35:30 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TestRPCConfig returns a configuration for testing the RPC server
|
|
|
|
|
func TestRPCConfig() *RPCConfig {
|
|
|
|
|
cfg := DefaultRPCConfig()
|
|
|
|
|
cfg.ListenAddress = "tcp://0.0.0.0:36657"
|
|
|
|
|
cfg.GRPCListenAddress = "tcp://0.0.0.0:36658"
|
|
|
|
|
cfg.Unsafe = true
|
|
|
|
|
return cfg
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-01 16:38:35 +04:00
|
|
|
|
// ValidateBasic performs basic validation (checking param bounds, etc.) and
|
|
|
|
|
// returns an error if any check fails.
|
|
|
|
|
func (cfg *RPCConfig) ValidateBasic() error {
|
|
|
|
|
if cfg.GRPCMaxOpenConnections < 0 {
|
|
|
|
|
return errors.New("grpc_max_open_connections can't be negative")
|
|
|
|
|
}
|
|
|
|
|
if cfg.MaxOpenConnections < 0 {
|
|
|
|
|
return errors.New("max_open_connections can't be negative")
|
|
|
|
|
}
|
2019-03-11 22:45:58 +04:00
|
|
|
|
if cfg.MaxSubscriptionClients < 0 {
|
|
|
|
|
return errors.New("max_subscription_clients can't be negative")
|
|
|
|
|
}
|
|
|
|
|
if cfg.MaxSubscriptionsPerClient < 0 {
|
|
|
|
|
return errors.New("max_subscriptions_per_client can't be negative")
|
|
|
|
|
}
|
|
|
|
|
if cfg.TimeoutBroadcastTxCommit < 0 {
|
|
|
|
|
return errors.New("timeout_broadcast_tx_commit can't be negative")
|
|
|
|
|
}
|
2019-07-20 16:44:42 +09:00
|
|
|
|
if cfg.MaxBodyBytes < 0 {
|
|
|
|
|
return errors.New("max_body_bytes can't be negative")
|
|
|
|
|
}
|
|
|
|
|
if cfg.MaxHeaderBytes < 0 {
|
|
|
|
|
return errors.New("max_header_bytes can't be negative")
|
|
|
|
|
}
|
2018-10-01 16:38:35 +04:00
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-14 15:47:41 +03:00
|
|
|
|
// IsCorsEnabled returns true if cross-origin resource sharing is enabled.
|
|
|
|
|
func (cfg *RPCConfig) IsCorsEnabled() bool {
|
|
|
|
|
return len(cfg.CORSAllowedOrigins) != 0
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-24 01:08:15 +08:00
|
|
|
|
func (cfg RPCConfig) KeyFile() string {
|
2019-07-01 12:48:54 +04:00
|
|
|
|
path := cfg.TLSKeyFile
|
|
|
|
|
if filepath.IsAbs(path) {
|
|
|
|
|
return path
|
|
|
|
|
}
|
|
|
|
|
return rootify(filepath.Join(defaultConfigDir, path), cfg.RootDir)
|
2019-03-24 01:08:15 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (cfg RPCConfig) CertFile() string {
|
2019-07-01 12:48:54 +04:00
|
|
|
|
path := cfg.TLSCertFile
|
|
|
|
|
if filepath.IsAbs(path) {
|
|
|
|
|
return path
|
|
|
|
|
}
|
|
|
|
|
return rootify(filepath.Join(defaultConfigDir, path), cfg.RootDir)
|
2019-03-24 01:08:15 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (cfg RPCConfig) IsTLSEnabled() bool {
|
|
|
|
|
return cfg.TLSCertFile != "" && cfg.TLSKeyFile != ""
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-20 17:35:30 -07:00
|
|
|
|
//-----------------------------------------------------------------------------
|
|
|
|
|
// P2PConfig
|
|
|
|
|
|
|
|
|
|
// P2PConfig defines the configuration options for the Tendermint peer-to-peer networking layer
|
|
|
|
|
type P2PConfig struct {
|
|
|
|
|
RootDir string `mapstructure:"home"`
|
|
|
|
|
|
|
|
|
|
// Address to listen for incoming connections
|
|
|
|
|
ListenAddress string `mapstructure:"laddr"`
|
|
|
|
|
|
2018-07-01 22:21:29 -04:00
|
|
|
|
// Address to advertise to peers for them to dial
|
|
|
|
|
ExternalAddress string `mapstructure:"external_address"`
|
|
|
|
|
|
2018-06-20 17:35:30 -07:00
|
|
|
|
// Comma separated list of seed nodes to connect to
|
|
|
|
|
// We only use these if we can’t connect to peers in the addrbook
|
|
|
|
|
Seeds string `mapstructure:"seeds"`
|
|
|
|
|
|
|
|
|
|
// Comma separated list of nodes to keep persistent connections to
|
|
|
|
|
PersistentPeers string `mapstructure:"persistent_peers"`
|
|
|
|
|
|
2018-06-28 00:09:39 -07:00
|
|
|
|
// UPNP port forwarding
|
|
|
|
|
UPNP bool `mapstructure:"upnp"`
|
2018-06-20 17:35:30 -07:00
|
|
|
|
|
|
|
|
|
// Path to address book
|
|
|
|
|
AddrBook string `mapstructure:"addr_book_file"`
|
|
|
|
|
|
|
|
|
|
// Set true for strict address routability rules
|
2018-09-05 02:30:36 -04:00
|
|
|
|
// Set false for private or local networks
|
2018-06-20 17:35:30 -07:00
|
|
|
|
AddrBookStrict bool `mapstructure:"addr_book_strict"`
|
|
|
|
|
|
2018-08-15 02:25:56 +04:00
|
|
|
|
// Maximum number of inbound peers
|
|
|
|
|
MaxNumInboundPeers int `mapstructure:"max_num_inbound_peers"`
|
|
|
|
|
|
|
|
|
|
// Maximum number of outbound peers to connect to, excluding persistent peers
|
|
|
|
|
MaxNumOutboundPeers int `mapstructure:"max_num_outbound_peers"`
|
2018-06-20 17:35:30 -07:00
|
|
|
|
|
2018-09-26 14:04:44 +04:00
|
|
|
|
// Time to wait before flushing messages out on the connection
|
|
|
|
|
FlushThrottleTimeout time.Duration `mapstructure:"flush_throttle_timeout"`
|
2018-06-20 17:35:30 -07:00
|
|
|
|
|
2018-06-29 12:17:26 +04:00
|
|
|
|
// Maximum size of a message packet payload, in bytes
|
|
|
|
|
MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"`
|
2018-06-20 17:35:30 -07:00
|
|
|
|
|
|
|
|
|
// Rate at which packets can be sent, in bytes/second
|
|
|
|
|
SendRate int64 `mapstructure:"send_rate"`
|
|
|
|
|
|
|
|
|
|
// Rate at which packets can be received, in bytes/second
|
|
|
|
|
RecvRate int64 `mapstructure:"recv_rate"`
|
|
|
|
|
|
|
|
|
|
// Set true to enable the peer-exchange reactor
|
|
|
|
|
PexReactor bool `mapstructure:"pex"`
|
|
|
|
|
|
|
|
|
|
// Seed mode, in which node constantly crawls the network and looks for
|
|
|
|
|
// peers. If another node asks it for addresses, it responds and disconnects.
|
|
|
|
|
//
|
|
|
|
|
// Does not work if the peer-exchange reactor is disabled.
|
|
|
|
|
SeedMode bool `mapstructure:"seed_mode"`
|
|
|
|
|
|
|
|
|
|
// Comma separated list of peer IDs to keep private (will not be gossiped to
|
|
|
|
|
// other peers)
|
|
|
|
|
PrivatePeerIDs string `mapstructure:"private_peer_ids"`
|
|
|
|
|
|
|
|
|
|
// Toggle to disable guard against peers connecting from the same ip.
|
|
|
|
|
AllowDuplicateIP bool `mapstructure:"allow_duplicate_ip"`
|
|
|
|
|
|
|
|
|
|
// Peer connection configuration.
|
|
|
|
|
HandshakeTimeout time.Duration `mapstructure:"handshake_timeout"`
|
|
|
|
|
DialTimeout time.Duration `mapstructure:"dial_timeout"`
|
|
|
|
|
|
|
|
|
|
// Testing params.
|
|
|
|
|
// Force dial to fail
|
|
|
|
|
TestDialFail bool `mapstructure:"test_dial_fail"`
|
|
|
|
|
// FUzz connection
|
|
|
|
|
TestFuzz bool `mapstructure:"test_fuzz"`
|
|
|
|
|
TestFuzzConfig *FuzzConnConfig `mapstructure:"test_fuzz_config"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DefaultP2PConfig returns a default configuration for the peer-to-peer layer
|
|
|
|
|
func DefaultP2PConfig() *P2PConfig {
|
|
|
|
|
return &P2PConfig{
|
2018-06-29 12:17:26 +04:00
|
|
|
|
ListenAddress: "tcp://0.0.0.0:26656",
|
2018-07-01 22:21:29 -04:00
|
|
|
|
ExternalAddress: "",
|
2018-06-29 12:17:26 +04:00
|
|
|
|
UPNP: false,
|
|
|
|
|
AddrBook: defaultAddrBookPath,
|
|
|
|
|
AddrBookStrict: true,
|
2018-08-15 02:25:56 +04:00
|
|
|
|
MaxNumInboundPeers: 40,
|
|
|
|
|
MaxNumOutboundPeers: 10,
|
2018-09-26 14:04:44 +04:00
|
|
|
|
FlushThrottleTimeout: 100 * time.Millisecond,
|
2018-07-16 00:17:27 -07:00
|
|
|
|
MaxPacketMsgPayloadSize: 1024, // 1 kB
|
|
|
|
|
SendRate: 5120000, // 5 mB/s
|
|
|
|
|
RecvRate: 5120000, // 5 mB/s
|
2018-06-29 12:17:26 +04:00
|
|
|
|
PexReactor: true,
|
|
|
|
|
SeedMode: false,
|
2018-12-17 20:52:33 +04:00
|
|
|
|
AllowDuplicateIP: false,
|
2018-06-29 12:17:26 +04:00
|
|
|
|
HandshakeTimeout: 20 * time.Second,
|
|
|
|
|
DialTimeout: 3 * time.Second,
|
|
|
|
|
TestDialFail: false,
|
|
|
|
|
TestFuzz: false,
|
|
|
|
|
TestFuzzConfig: DefaultFuzzConnConfig(),
|
2018-06-20 17:35:30 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TestP2PConfig returns a configuration for testing the peer-to-peer layer
|
|
|
|
|
func TestP2PConfig() *P2PConfig {
|
|
|
|
|
cfg := DefaultP2PConfig()
|
|
|
|
|
cfg.ListenAddress = "tcp://0.0.0.0:36656"
|
2018-10-08 17:03:38 +04:00
|
|
|
|
cfg.FlushThrottleTimeout = 10 * time.Millisecond
|
2018-06-20 17:35:30 -07:00
|
|
|
|
cfg.AllowDuplicateIP = true
|
|
|
|
|
return cfg
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// AddrBookFile returns the full path to the address book
|
|
|
|
|
func (cfg *P2PConfig) AddrBookFile() string {
|
|
|
|
|
return rootify(cfg.AddrBook, cfg.RootDir)
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-01 16:38:35 +04:00
|
|
|
|
// ValidateBasic performs basic validation (checking param bounds, etc.) and
|
|
|
|
|
// returns an error if any check fails.
|
|
|
|
|
func (cfg *P2PConfig) ValidateBasic() error {
|
|
|
|
|
if cfg.MaxNumInboundPeers < 0 {
|
|
|
|
|
return errors.New("max_num_inbound_peers can't be negative")
|
|
|
|
|
}
|
|
|
|
|
if cfg.MaxNumOutboundPeers < 0 {
|
|
|
|
|
return errors.New("max_num_outbound_peers can't be negative")
|
|
|
|
|
}
|
|
|
|
|
if cfg.FlushThrottleTimeout < 0 {
|
|
|
|
|
return errors.New("flush_throttle_timeout can't be negative")
|
|
|
|
|
}
|
|
|
|
|
if cfg.MaxPacketMsgPayloadSize < 0 {
|
|
|
|
|
return errors.New("max_packet_msg_payload_size can't be negative")
|
|
|
|
|
}
|
|
|
|
|
if cfg.SendRate < 0 {
|
|
|
|
|
return errors.New("send_rate can't be negative")
|
|
|
|
|
}
|
|
|
|
|
if cfg.RecvRate < 0 {
|
|
|
|
|
return errors.New("recv_rate can't be negative")
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-20 17:35:30 -07:00
|
|
|
|
// FuzzConnConfig is a FuzzedConnection configuration.
|
|
|
|
|
type FuzzConnConfig struct {
|
|
|
|
|
Mode int
|
|
|
|
|
MaxDelay time.Duration
|
|
|
|
|
ProbDropRW float64
|
|
|
|
|
ProbDropConn float64
|
|
|
|
|
ProbSleep float64
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DefaultFuzzConnConfig returns the default config.
|
|
|
|
|
func DefaultFuzzConnConfig() *FuzzConnConfig {
|
|
|
|
|
return &FuzzConnConfig{
|
|
|
|
|
Mode: FuzzModeDrop,
|
|
|
|
|
MaxDelay: 3 * time.Second,
|
|
|
|
|
ProbDropRW: 0.2,
|
|
|
|
|
ProbDropConn: 0.00,
|
|
|
|
|
ProbSleep: 0.00,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//-----------------------------------------------------------------------------
|
|
|
|
|
// MempoolConfig
|
|
|
|
|
|
|
|
|
|
// MempoolConfig defines the configuration options for the Tendermint mempool
|
|
|
|
|
type MempoolConfig struct {
|
2019-02-23 19:32:31 +04:00
|
|
|
|
RootDir string `mapstructure:"home"`
|
|
|
|
|
Recheck bool `mapstructure:"recheck"`
|
|
|
|
|
Broadcast bool `mapstructure:"broadcast"`
|
|
|
|
|
WalPath string `mapstructure:"wal_dir"`
|
|
|
|
|
Size int `mapstructure:"size"`
|
|
|
|
|
MaxTxsBytes int64 `mapstructure:"max_txs_bytes"`
|
|
|
|
|
CacheSize int `mapstructure:"cache_size"`
|
2019-08-06 01:01:30 +09:00
|
|
|
|
MaxTxBytes int `mapstructure:"max_tx_bytes"`
|
2018-06-20 17:35:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool
|
|
|
|
|
func DefaultMempoolConfig() *MempoolConfig {
|
|
|
|
|
return &MempoolConfig{
|
2018-10-04 15:37:13 +02:00
|
|
|
|
Recheck: true,
|
|
|
|
|
Broadcast: true,
|
|
|
|
|
WalPath: "",
|
2019-02-23 19:32:31 +04:00
|
|
|
|
// Each signature verification takes .5ms, Size reduced until we implement
|
2018-08-30 14:41:58 -07:00
|
|
|
|
// ABCI Recheck
|
2019-02-23 19:32:31 +04:00
|
|
|
|
Size: 5000,
|
|
|
|
|
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
|
|
|
|
|
CacheSize: 10000,
|
2019-08-06 01:01:30 +09:00
|
|
|
|
MaxTxBytes: 1024 * 1024, // 1MB
|
2018-06-20 17:35:30 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TestMempoolConfig returns a configuration for testing the Tendermint mempool
|
|
|
|
|
func TestMempoolConfig() *MempoolConfig {
|
|
|
|
|
cfg := DefaultMempoolConfig()
|
|
|
|
|
cfg.CacheSize = 1000
|
|
|
|
|
return cfg
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// WalDir returns the full path to the mempool's write-ahead log
|
|
|
|
|
func (cfg *MempoolConfig) WalDir() string {
|
|
|
|
|
return rootify(cfg.WalPath, cfg.RootDir)
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-06 07:39:05 +01:00
|
|
|
|
// WalEnabled returns true if the WAL is enabled.
|
|
|
|
|
func (cfg *MempoolConfig) WalEnabled() bool {
|
|
|
|
|
return cfg.WalPath != ""
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-01 16:38:35 +04:00
|
|
|
|
// ValidateBasic performs basic validation (checking param bounds, etc.) and
|
|
|
|
|
// returns an error if any check fails.
|
|
|
|
|
func (cfg *MempoolConfig) ValidateBasic() error {
|
|
|
|
|
if cfg.Size < 0 {
|
|
|
|
|
return errors.New("size can't be negative")
|
|
|
|
|
}
|
2019-02-23 19:32:31 +04:00
|
|
|
|
if cfg.MaxTxsBytes < 0 {
|
|
|
|
|
return errors.New("max_txs_bytes can't be negative")
|
|
|
|
|
}
|
2018-10-01 16:38:35 +04:00
|
|
|
|
if cfg.CacheSize < 0 {
|
|
|
|
|
return errors.New("cache_size can't be negative")
|
|
|
|
|
}
|
2019-08-06 01:01:30 +09:00
|
|
|
|
if cfg.MaxTxBytes < 0 {
|
|
|
|
|
return errors.New("max_tx_bytes can't be negative")
|
2019-07-23 00:17:10 +09:00
|
|
|
|
}
|
2018-10-01 16:38:35 +04:00
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
blockchain: Reorg reactor (#3561)
* go routines in blockchain reactor
* Added reference to the go routine diagram
* Initial commit
* cleanup
* Undo testing_logger change, committed by mistake
* Fix the test loggers
* pulled some fsm code into pool.go
* added pool tests
* changes to the design
added block requests under peer
moved the request trigger in the reactor poolRoutine, triggered now by a ticker
in general moved everything required for making block requests smarter in the poolRoutine
added a simple map of heights to keep track of what will need to be requested next
added a few more tests
* send errors to FSM in a different channel than blocks
send errors (RemovePeer) from switch on a different channel than the
one receiving blocks
renamed channels
added more pool tests
* more pool tests
* lint errors
* more tests
* more tests
* switch fast sync to new implementation
* fixed data race in tests
* cleanup
* finished fsm tests
* address golangci comments :)
* address golangci comments :)
* Added timeout on next block needed to advance
* updating docs and cleanup
* fix issue in test from previous cleanup
* cleanup
* Added termination scenarios, tests and more cleanup
* small fixes to adr, comments and cleanup
* Fix bug in sendRequest()
If we tried to send a request to a peer not present in the switch, a
missing continue statement caused the request to be blackholed in a peer
that was removed and never retried.
While this bug was manifesting, the reactor kept asking for other
blocks that would be stored and never consumed. Added the number of
unconsumed blocks in the math for requesting blocks ahead of current
processing height so eventually there will be no more blocks requested
until the already received ones are consumed.
* remove bpPeer's didTimeout field
* Use distinct err codes for peer timeout and FSM timeouts
* Don't allow peers to update with lower height
* review comments from Ethan and Zarko
* some cleanup, renaming, comments
* Move block execution in separate goroutine
* Remove pool's numPending
* review comments
* fix lint, remove old blockchain reactor and duplicates in fsm tests
* small reorg around peer after review comments
* add the reactor spec
* verify block only once
* review comments
* change to int for max number of pending requests
* cleanup and godoc
* Add configuration flag fast sync version
* golangci fixes
* fix config template
* move both reactor versions under blockchain
* cleanup, golint, renaming stuff
* updated documentation, fixed more golint warnings
* integrate with behavior package
* sync with master
* gofmt
* add changelog_pending entry
* move to improvments
* suggestion to changelog entry
2019-07-23 10:58:52 +02:00
|
|
|
|
//-----------------------------------------------------------------------------
|
|
|
|
|
// FastSyncConfig
|
|
|
|
|
|
|
|
|
|
// FastSyncConfig defines the configuration for the Tendermint fast sync service
|
|
|
|
|
type FastSyncConfig struct {
|
|
|
|
|
Version string `mapstructure:"version"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DefaultFastSyncConfig returns a default configuration for the fast sync service
|
|
|
|
|
func DefaultFastSyncConfig() *FastSyncConfig {
|
|
|
|
|
return &FastSyncConfig{
|
|
|
|
|
Version: "v0",
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TestFastSyncConfig returns a default configuration for the fast sync.
|
|
|
|
|
func TestFastSyncConfig() *FastSyncConfig {
|
|
|
|
|
return DefaultFastSyncConfig()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ValidateBasic performs basic validation.
|
|
|
|
|
func (cfg *FastSyncConfig) ValidateBasic() error {
|
|
|
|
|
switch cfg.Version {
|
|
|
|
|
case "v0":
|
|
|
|
|
return nil
|
|
|
|
|
case "v1":
|
|
|
|
|
return nil
|
|
|
|
|
default:
|
|
|
|
|
return fmt.Errorf("unknown fastsync version %s", cfg.Version)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-20 17:35:30 -07:00
|
|
|
|
//-----------------------------------------------------------------------------
|
|
|
|
|
// ConsensusConfig
|
|
|
|
|
|
|
|
|
|
// ConsensusConfig defines the configuration for the Tendermint consensus service,
|
|
|
|
|
// including timeouts and details about the WAL and the block structure.
|
|
|
|
|
type ConsensusConfig struct {
|
|
|
|
|
RootDir string `mapstructure:"home"`
|
|
|
|
|
WalPath string `mapstructure:"wal_file"`
|
|
|
|
|
walFile string // overrides WalPath if set
|
|
|
|
|
|
2018-09-26 14:04:44 +04:00
|
|
|
|
TimeoutPropose time.Duration `mapstructure:"timeout_propose"`
|
|
|
|
|
TimeoutProposeDelta time.Duration `mapstructure:"timeout_propose_delta"`
|
|
|
|
|
TimeoutPrevote time.Duration `mapstructure:"timeout_prevote"`
|
|
|
|
|
TimeoutPrevoteDelta time.Duration `mapstructure:"timeout_prevote_delta"`
|
|
|
|
|
TimeoutPrecommit time.Duration `mapstructure:"timeout_precommit"`
|
|
|
|
|
TimeoutPrecommitDelta time.Duration `mapstructure:"timeout_precommit_delta"`
|
|
|
|
|
TimeoutCommit time.Duration `mapstructure:"timeout_commit"`
|
2018-06-20 17:35:30 -07:00
|
|
|
|
|
|
|
|
|
// Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
|
|
|
|
|
SkipTimeoutCommit bool `mapstructure:"skip_timeout_commit"`
|
|
|
|
|
|
2018-09-26 14:04:44 +04:00
|
|
|
|
// EmptyBlocks mode and possible interval between empty blocks
|
|
|
|
|
CreateEmptyBlocks bool `mapstructure:"create_empty_blocks"`
|
|
|
|
|
CreateEmptyBlocksInterval time.Duration `mapstructure:"create_empty_blocks_interval"`
|
2018-06-20 17:35:30 -07:00
|
|
|
|
|
2018-09-26 14:04:44 +04:00
|
|
|
|
// Reactor sleep duration parameters
|
|
|
|
|
PeerGossipSleepDuration time.Duration `mapstructure:"peer_gossip_sleep_duration"`
|
|
|
|
|
PeerQueryMaj23SleepDuration time.Duration `mapstructure:"peer_query_maj23_sleep_duration"`
|
2018-06-20 17:35:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DefaultConsensusConfig returns a default configuration for the consensus service
|
|
|
|
|
func DefaultConsensusConfig() *ConsensusConfig {
|
|
|
|
|
return &ConsensusConfig{
|
|
|
|
|
WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"),
|
2018-09-26 14:04:44 +04:00
|
|
|
|
TimeoutPropose: 3000 * time.Millisecond,
|
|
|
|
|
TimeoutProposeDelta: 500 * time.Millisecond,
|
|
|
|
|
TimeoutPrevote: 1000 * time.Millisecond,
|
|
|
|
|
TimeoutPrevoteDelta: 500 * time.Millisecond,
|
|
|
|
|
TimeoutPrecommit: 1000 * time.Millisecond,
|
|
|
|
|
TimeoutPrecommitDelta: 500 * time.Millisecond,
|
|
|
|
|
TimeoutCommit: 1000 * time.Millisecond,
|
2018-06-20 17:35:30 -07:00
|
|
|
|
SkipTimeoutCommit: false,
|
|
|
|
|
CreateEmptyBlocks: true,
|
2018-09-26 14:04:44 +04:00
|
|
|
|
CreateEmptyBlocksInterval: 0 * time.Second,
|
|
|
|
|
PeerGossipSleepDuration: 100 * time.Millisecond,
|
|
|
|
|
PeerQueryMaj23SleepDuration: 2000 * time.Millisecond,
|
2018-06-20 17:35:30 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TestConsensusConfig returns a configuration for testing the consensus service
|
|
|
|
|
func TestConsensusConfig() *ConsensusConfig {
|
|
|
|
|
cfg := DefaultConsensusConfig()
|
2018-10-12 22:13:01 +02:00
|
|
|
|
cfg.TimeoutPropose = 40 * time.Millisecond
|
2018-09-26 14:04:44 +04:00
|
|
|
|
cfg.TimeoutProposeDelta = 1 * time.Millisecond
|
|
|
|
|
cfg.TimeoutPrevote = 10 * time.Millisecond
|
|
|
|
|
cfg.TimeoutPrevoteDelta = 1 * time.Millisecond
|
|
|
|
|
cfg.TimeoutPrecommit = 10 * time.Millisecond
|
|
|
|
|
cfg.TimeoutPrecommitDelta = 1 * time.Millisecond
|
|
|
|
|
cfg.TimeoutCommit = 10 * time.Millisecond
|
2018-06-20 17:35:30 -07:00
|
|
|
|
cfg.SkipTimeoutCommit = true
|
2018-09-26 14:04:44 +04:00
|
|
|
|
cfg.PeerGossipSleepDuration = 5 * time.Millisecond
|
|
|
|
|
cfg.PeerQueryMaj23SleepDuration = 250 * time.Millisecond
|
2018-06-20 17:35:30 -07:00
|
|
|
|
return cfg
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// WaitForTxs returns true if the consensus should wait for transactions before entering the propose step
|
|
|
|
|
func (cfg *ConsensusConfig) WaitForTxs() bool {
|
|
|
|
|
return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Propose returns the amount of time to wait for a proposal
|
|
|
|
|
func (cfg *ConsensusConfig) Propose(round int) time.Duration {
|
2018-09-26 14:04:44 +04:00
|
|
|
|
return time.Duration(
|
|
|
|
|
cfg.TimeoutPropose.Nanoseconds()+cfg.TimeoutProposeDelta.Nanoseconds()*int64(round),
|
|
|
|
|
) * time.Nanosecond
|
2018-06-20 17:35:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Prevote returns the amount of time to wait for straggler votes after receiving any +2/3 prevotes
|
|
|
|
|
func (cfg *ConsensusConfig) Prevote(round int) time.Duration {
|
2018-09-26 14:04:44 +04:00
|
|
|
|
return time.Duration(
|
|
|
|
|
cfg.TimeoutPrevote.Nanoseconds()+cfg.TimeoutPrevoteDelta.Nanoseconds()*int64(round),
|
|
|
|
|
) * time.Nanosecond
|
2018-06-20 17:35:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Precommit returns the amount of time to wait for straggler votes after receiving any +2/3 precommits
|
|
|
|
|
func (cfg *ConsensusConfig) Precommit(round int) time.Duration {
|
2018-09-26 14:04:44 +04:00
|
|
|
|
return time.Duration(
|
|
|
|
|
cfg.TimeoutPrecommit.Nanoseconds()+cfg.TimeoutPrecommitDelta.Nanoseconds()*int64(round),
|
|
|
|
|
) * time.Nanosecond
|
2018-06-20 17:35:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Commit returns the amount of time to wait for straggler votes after receiving +2/3 precommits for a single block (ie. a commit).
|
|
|
|
|
func (cfg *ConsensusConfig) Commit(t time.Time) time.Time {
|
2018-09-26 14:04:44 +04:00
|
|
|
|
return t.Add(cfg.TimeoutCommit)
|
2018-06-20 17:35:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// WalFile returns the full path to the write-ahead log file
|
|
|
|
|
func (cfg *ConsensusConfig) WalFile() string {
|
|
|
|
|
if cfg.walFile != "" {
|
|
|
|
|
return cfg.walFile
|
|
|
|
|
}
|
|
|
|
|
return rootify(cfg.WalPath, cfg.RootDir)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetWalFile sets the path to the write-ahead log file
|
|
|
|
|
func (cfg *ConsensusConfig) SetWalFile(walFile string) {
|
|
|
|
|
cfg.walFile = walFile
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-01 16:38:35 +04:00
|
|
|
|
// ValidateBasic performs basic validation (checking param bounds, etc.) and
|
|
|
|
|
// returns an error if any check fails.
|
|
|
|
|
func (cfg *ConsensusConfig) ValidateBasic() error {
|
|
|
|
|
if cfg.TimeoutPropose < 0 {
|
|
|
|
|
return errors.New("timeout_propose can't be negative")
|
|
|
|
|
}
|
|
|
|
|
if cfg.TimeoutProposeDelta < 0 {
|
|
|
|
|
return errors.New("timeout_propose_delta can't be negative")
|
|
|
|
|
}
|
|
|
|
|
if cfg.TimeoutPrevote < 0 {
|
|
|
|
|
return errors.New("timeout_prevote can't be negative")
|
|
|
|
|
}
|
|
|
|
|
if cfg.TimeoutPrevoteDelta < 0 {
|
|
|
|
|
return errors.New("timeout_prevote_delta can't be negative")
|
|
|
|
|
}
|
|
|
|
|
if cfg.TimeoutPrecommit < 0 {
|
|
|
|
|
return errors.New("timeout_precommit can't be negative")
|
|
|
|
|
}
|
|
|
|
|
if cfg.TimeoutPrecommitDelta < 0 {
|
|
|
|
|
return errors.New("timeout_precommit_delta can't be negative")
|
|
|
|
|
}
|
|
|
|
|
if cfg.TimeoutCommit < 0 {
|
|
|
|
|
return errors.New("timeout_commit can't be negative")
|
|
|
|
|
}
|
|
|
|
|
if cfg.CreateEmptyBlocksInterval < 0 {
|
|
|
|
|
return errors.New("create_empty_blocks_interval can't be negative")
|
|
|
|
|
}
|
|
|
|
|
if cfg.PeerGossipSleepDuration < 0 {
|
|
|
|
|
return errors.New("peer_gossip_sleep_duration can't be negative")
|
|
|
|
|
}
|
|
|
|
|
if cfg.PeerQueryMaj23SleepDuration < 0 {
|
|
|
|
|
return errors.New("peer_query_maj23_sleep_duration can't be negative")
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-20 17:35:30 -07:00
|
|
|
|
//-----------------------------------------------------------------------------
|
|
|
|
|
// TxIndexConfig
|
|
|
|
|
|
2018-09-05 11:05:06 +04:00
|
|
|
|
// TxIndexConfig defines the configuration for the transaction indexer,
|
|
|
|
|
// including tags to index.
|
2018-06-20 17:35:30 -07:00
|
|
|
|
type TxIndexConfig struct {
|
|
|
|
|
// What indexer to use for transactions
|
|
|
|
|
//
|
|
|
|
|
// Options:
|
|
|
|
|
// 1) "null"
|
|
|
|
|
// 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
|
|
|
|
Indexer string `mapstructure:"indexer"`
|
|
|
|
|
|
2018-09-05 11:05:06 +04:00
|
|
|
|
// Comma-separated list of tags to index (by default the only tag is "tx.hash")
|
2018-06-20 17:35:30 -07:00
|
|
|
|
//
|
2018-09-05 11:05:06 +04:00
|
|
|
|
// You can also index transactions by height by adding "tx.height" tag here.
|
2018-09-05 16:49:34 +04:00
|
|
|
|
//
|
2018-06-20 17:35:30 -07:00
|
|
|
|
// It's recommended to index only a subset of tags due to possible memory
|
|
|
|
|
// bloat. This is, of course, depends on the indexer's DB and the volume of
|
|
|
|
|
// transactions.
|
|
|
|
|
IndexTags string `mapstructure:"index_tags"`
|
|
|
|
|
|
2018-09-05 11:05:06 +04:00
|
|
|
|
// When set to true, tells indexer to index all tags (predefined tags:
|
2018-09-05 16:49:34 +04:00
|
|
|
|
// "tx.hash", "tx.height" and all tags from DeliverTx responses).
|
|
|
|
|
//
|
2018-09-05 11:05:06 +04:00
|
|
|
|
// Note this may be not desirable (see the comment above). IndexTags has a
|
|
|
|
|
// precedence over IndexAllTags (i.e. when given both, IndexTags will be
|
|
|
|
|
// indexed).
|
2018-06-20 17:35:30 -07:00
|
|
|
|
IndexAllTags bool `mapstructure:"index_all_tags"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DefaultTxIndexConfig returns a default configuration for the transaction indexer.
|
|
|
|
|
func DefaultTxIndexConfig() *TxIndexConfig {
|
|
|
|
|
return &TxIndexConfig{
|
|
|
|
|
Indexer: "kv",
|
|
|
|
|
IndexTags: "",
|
|
|
|
|
IndexAllTags: false,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TestTxIndexConfig returns a default configuration for the transaction indexer.
|
|
|
|
|
func TestTxIndexConfig() *TxIndexConfig {
|
|
|
|
|
return DefaultTxIndexConfig()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//-----------------------------------------------------------------------------
|
|
|
|
|
// InstrumentationConfig
|
|
|
|
|
|
|
|
|
|
// InstrumentationConfig defines the configuration for metrics reporting.
|
|
|
|
|
type InstrumentationConfig struct {
|
|
|
|
|
// When true, Prometheus metrics are served under /metrics on
|
|
|
|
|
// PrometheusListenAddr.
|
|
|
|
|
// Check out the documentation for the list of available metrics.
|
|
|
|
|
Prometheus bool `mapstructure:"prometheus"`
|
|
|
|
|
|
|
|
|
|
// Address to listen for Prometheus collector(s) connections.
|
|
|
|
|
PrometheusListenAddr string `mapstructure:"prometheus_listen_addr"`
|
2018-07-10 15:49:48 +04:00
|
|
|
|
|
|
|
|
|
// Maximum number of simultaneous connections.
|
2018-12-15 15:26:27 -05:00
|
|
|
|
// If you want to accept a larger number than the default, make sure
|
2018-07-10 15:49:48 +04:00
|
|
|
|
// you increase your OS limits.
|
|
|
|
|
// 0 - unlimited.
|
|
|
|
|
MaxOpenConnections int `mapstructure:"max_open_connections"`
|
2018-09-25 04:14:38 -07:00
|
|
|
|
|
2018-12-15 15:26:27 -05:00
|
|
|
|
// Instrumentation namespace.
|
2018-09-25 04:14:38 -07:00
|
|
|
|
Namespace string `mapstructure:"namespace"`
|
2018-06-20 17:35:30 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DefaultInstrumentationConfig returns a default configuration for metrics
|
|
|
|
|
// reporting.
|
|
|
|
|
func DefaultInstrumentationConfig() *InstrumentationConfig {
|
|
|
|
|
return &InstrumentationConfig{
|
|
|
|
|
Prometheus: false,
|
|
|
|
|
PrometheusListenAddr: ":26660",
|
2018-07-10 15:49:48 +04:00
|
|
|
|
MaxOpenConnections: 3,
|
2018-09-25 04:14:38 -07:00
|
|
|
|
Namespace: "tendermint",
|
2018-06-20 17:35:30 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TestInstrumentationConfig returns a default configuration for metrics
|
|
|
|
|
// reporting.
|
|
|
|
|
func TestInstrumentationConfig() *InstrumentationConfig {
|
|
|
|
|
return DefaultInstrumentationConfig()
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-01 16:38:35 +04:00
|
|
|
|
// ValidateBasic performs basic validation (checking param bounds, etc.) and
|
|
|
|
|
// returns an error if any check fails.
|
|
|
|
|
func (cfg *InstrumentationConfig) ValidateBasic() error {
|
|
|
|
|
if cfg.MaxOpenConnections < 0 {
|
|
|
|
|
return errors.New("max_open_connections can't be negative")
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-20 17:35:30 -07:00
|
|
|
|
//-----------------------------------------------------------------------------
|
|
|
|
|
// Utils
|
|
|
|
|
|
|
|
|
|
// helper function to make config creation independent of root dir
|
|
|
|
|
func rootify(path, root string) string {
|
|
|
|
|
if filepath.IsAbs(path) {
|
|
|
|
|
return path
|
|
|
|
|
}
|
|
|
|
|
return filepath.Join(root, path)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//-----------------------------------------------------------------------------
|
|
|
|
|
// Moniker
|
|
|
|
|
|
|
|
|
|
var defaultMoniker = getDefaultMoniker()
|
|
|
|
|
|
|
|
|
|
// getDefaultMoniker returns a default moniker, which is the host name. If runtime
|
|
|
|
|
// fails to get the host name, "anonymous" will be returned.
|
|
|
|
|
func getDefaultMoniker() string {
|
|
|
|
|
moniker, err := os.Hostname()
|
|
|
|
|
if err != nil {
|
|
|
|
|
moniker = "anonymous"
|
|
|
|
|
}
|
|
|
|
|
return moniker
|
|
|
|
|
}
|