mirror of
https://github.com/fluencelabs/tendermint
synced 2025-07-31 20:21:56 +00:00
Compare commits
5 Commits
anton/chan
...
marko/remo
Author | SHA1 | Date | |
---|---|---|---|
|
cc628c358e | ||
|
b50aa9a9ca | ||
|
60368ac164 | ||
|
d70135ec71 | ||
|
e179787d40 |
@@ -19,8 +19,9 @@ program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [privval] \#3370 Refactors and simplifies validator/kms connection handling. Please refer to thttps://github.com/tendermint/tendermint/pull/3370#issue-257360971
|
||||
- [consensus] \#3839 Reduce "Error attempting to add vote" message severity (Error -> Info)
|
||||
- [privval] \#3370 Refactors and simplifies validator/kms connection handling. Please refer to [this comment](https://github.com/tendermint/tendermint/pull/3370#issue-257360971) for details.
|
||||
- [mempool] \#3877 Make `max_tx_bytes` configurable instead of `max_msg_bytes`
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
|
@@ -3,7 +3,6 @@ package types
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
@@ -13,10 +12,9 @@ import (
|
||||
)
|
||||
|
||||
func TestMarshalJSON(t *testing.T) {
|
||||
b, err := json.Marshal(&ResponseDeliverTx{})
|
||||
_, err := json.Marshal(&ResponseDeliverTx{})
|
||||
assert.Nil(t, err)
|
||||
// Do not include empty fields.
|
||||
assert.False(t, strings.Contains(string(b), "code"))
|
||||
// include empty fields.
|
||||
|
||||
r1 := ResponseCheckTx{
|
||||
Code: 1,
|
||||
@@ -31,7 +29,7 @@ func TestMarshalJSON(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
b, err = json.Marshal(&r1)
|
||||
b, err := json.Marshal(&r1)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var r2 ResponseCheckTx
|
||||
|
@@ -42,14 +42,14 @@ func (r ResponseQuery) IsErr() bool {
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// override JSON marshalling so we dont emit defaults (ie. disable omitempty)
|
||||
// override JSON marshalling so we emit defaults (ie. disable omitempty)
|
||||
// note we need Unmarshal functions too because protobuf had the bright idea
|
||||
// to marshal int64->string. cool. cool, cool, cool: https://developers.google.com/protocol-buffers/docs/proto3#json
|
||||
|
||||
var (
|
||||
jsonpbMarshaller = jsonpb.Marshaler{
|
||||
EnumsAsInts: true,
|
||||
EmitDefaults: false,
|
||||
EmitDefaults: true,
|
||||
}
|
||||
jsonpbUnmarshaller = jsonpb.Unmarshaler{}
|
||||
)
|
||||
|
@@ -34,11 +34,11 @@ var _ = math.Inf
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type ResultStatus struct {
|
||||
NodeInfo *NodeInfo `protobuf:"bytes,1,opt,name=nodeInfo" json:"nodeInfo,omitempty"`
|
||||
PubKey *PubKey `protobuf:"bytes,2,req,name=pubKey" json:"pubKey,omitempty"`
|
||||
LatestBlockHash []byte `protobuf:"bytes,3,req,name=latestBlockHash" json:"latestBlockHash,omitempty"`
|
||||
LatestBlockHeight *int64 `protobuf:"varint,4,req,name=latestBlockHeight" json:"latestBlockHeight,omitempty"`
|
||||
LatestBlocktime *int64 `protobuf:"varint,5,req,name=latestBlocktime" json:"latestBlocktime,omitempty"`
|
||||
NodeInfo *NodeInfo `protobuf:"bytes,1,opt,name=nodeInfo" json:"nodeInfo"`
|
||||
PubKey *PubKey `protobuf:"bytes,2,req,name=pubKey" json:"pubKey"`
|
||||
LatestBlockHash []byte `protobuf:"bytes,3,req,name=latestBlockHash" json:"latestBlockHash"`
|
||||
LatestBlockHeight *int64 `protobuf:"varint,4,req,name=latestBlockHeight" json:"latestBlockHeight"`
|
||||
LatestBlocktime *int64 `protobuf:"varint,5,req,name=latestBlocktime" json:"latestBlocktime"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
@@ -83,13 +83,13 @@ func (m *ResultStatus) GetLatestBlocktime() int64 {
|
||||
}
|
||||
|
||||
type NodeInfo struct {
|
||||
Id *ID `protobuf:"bytes,1,req,name=id" json:"id,omitempty"`
|
||||
Moniker *string `protobuf:"bytes,2,req,name=moniker" json:"moniker,omitempty"`
|
||||
Network *string `protobuf:"bytes,3,req,name=network" json:"network,omitempty"`
|
||||
RemoteAddr *string `protobuf:"bytes,4,req,name=remoteAddr" json:"remoteAddr,omitempty"`
|
||||
ListenAddr *string `protobuf:"bytes,5,req,name=listenAddr" json:"listenAddr,omitempty"`
|
||||
Version *string `protobuf:"bytes,6,req,name=version" json:"version,omitempty"`
|
||||
Other []string `protobuf:"bytes,7,rep,name=other" json:"other,omitempty"`
|
||||
Id *ID `protobuf:"bytes,1,req,name=id" json:"id"`
|
||||
Moniker *string `protobuf:"bytes,2,req,name=moniker" json:"moniker"`
|
||||
Network *string `protobuf:"bytes,3,req,name=network" json:"network"`
|
||||
RemoteAddr *string `protobuf:"bytes,4,req,name=remoteAddr" json:"remoteAddr"`
|
||||
ListenAddr *string `protobuf:"bytes,5,req,name=listenAddr" json:"listenAddr"`
|
||||
Version *string `protobuf:"bytes,6,req,name=version" json:"version"`
|
||||
Other []string `protobuf:"bytes,7,rep,name=other" json:"other"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
@@ -148,7 +148,7 @@ func (m *NodeInfo) GetOther() []string {
|
||||
}
|
||||
|
||||
type ID struct {
|
||||
Id *string `protobuf:"bytes,1,req,name=id" json:"id,omitempty"`
|
||||
Id *string `protobuf:"bytes,1,req,name=id" json:"id"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
@@ -165,7 +165,7 @@ func (m *ID) GetId() string {
|
||||
}
|
||||
|
||||
type PubKey struct {
|
||||
Ed25519 *PubKeyEd25519 `protobuf:"bytes,1,opt,name=ed25519" json:"ed25519,omitempty"`
|
||||
Ed25519 *PubKeyEd25519 `protobuf:"bytes,1,opt,name=ed25519" json:"ed25519"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
@@ -182,7 +182,7 @@ func (m *PubKey) GetEd25519() *PubKeyEd25519 {
|
||||
}
|
||||
|
||||
type PubKeyEd25519 struct {
|
||||
Bytes []byte `protobuf:"bytes,1,req,name=bytes" json:"bytes,omitempty"`
|
||||
Bytes []byte `protobuf:"bytes,1,req,name=bytes" json:"bytes"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
|
@@ -637,7 +637,7 @@ type MempoolConfig struct {
|
||||
Size int `mapstructure:"size"`
|
||||
MaxTxsBytes int64 `mapstructure:"max_txs_bytes"`
|
||||
CacheSize int `mapstructure:"cache_size"`
|
||||
MaxMsgBytes int `mapstructure:"max_msg_bytes"`
|
||||
MaxTxBytes int `mapstructure:"max_tx_bytes"`
|
||||
}
|
||||
|
||||
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool
|
||||
@@ -651,7 +651,7 @@ func DefaultMempoolConfig() *MempoolConfig {
|
||||
Size: 5000,
|
||||
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
|
||||
CacheSize: 10000,
|
||||
MaxMsgBytes: 1024 * 1024, // 1MB
|
||||
MaxTxBytes: 1024 * 1024, // 1MB
|
||||
}
|
||||
}
|
||||
|
||||
@@ -684,8 +684,8 @@ func (cfg *MempoolConfig) ValidateBasic() error {
|
||||
if cfg.CacheSize < 0 {
|
||||
return errors.New("cache_size can't be negative")
|
||||
}
|
||||
if cfg.MaxMsgBytes < 0 {
|
||||
return errors.New("max_msg_bytes can't be negative")
|
||||
if cfg.MaxTxBytes < 0 {
|
||||
return errors.New("max_tx_bytes can't be negative")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -294,8 +294,9 @@ max_txs_bytes = {{ .Mempool.MaxTxsBytes }}
|
||||
# Size of the cache (used to filter transactions we saw earlier) in transactions
|
||||
cache_size = {{ .Mempool.CacheSize }}
|
||||
|
||||
# Limit the size of TxMessage
|
||||
max_msg_bytes = {{ .Mempool.MaxMsgBytes }}
|
||||
# Maximum size of a single transaction.
|
||||
# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes} + {amino overhead}.
|
||||
max_tx_bytes = {{ .Mempool.MaxTxBytes }}
|
||||
|
||||
##### fast sync configuration options #####
|
||||
[fastsync]
|
||||
|
239
docs/architecture/adr-042-state-sync.md
Normal file
239
docs/architecture/adr-042-state-sync.md
Normal file
@@ -0,0 +1,239 @@
|
||||
# ADR 042: State Sync Design
|
||||
|
||||
## Changelog
|
||||
|
||||
2019-06-27: Init by EB
|
||||
2019-07-04: Follow up by brapse
|
||||
|
||||
## Context
|
||||
StateSync is a feature which would allow a new node to receive a
|
||||
snapshot of the application state without downloading blocks or going
|
||||
through consensus. Once downloaded, the node could switch to FastSync
|
||||
and eventually participate in consensus. The goal of StateSync is to
|
||||
facilitate setting up a new node as quickly as possible.
|
||||
|
||||
## Considerations
|
||||
Because Tendermint doesn't know anything about the application state,
|
||||
StateSync will broker messages between nodes and through
|
||||
the ABCI to an opaque applicaton. The implementation will have multiple
|
||||
touch points on both the tendermint code base and ABCI application.
|
||||
|
||||
* A StateSync reactor to facilitate peer communication - Tendermint
|
||||
* A Set of ABCI messages to transmit application state to the reactor - Tendermint
|
||||
* A Set of MultiStore APIs for exposing snapshot data to the ABCI - ABCI application
|
||||
* A Storage format with validation and performance considerations - ABCI application
|
||||
|
||||
### Implementation Properties
|
||||
Beyond the approach, any implementation of StateSync can be evaluated
|
||||
across different criteria:
|
||||
|
||||
* Speed: Expected throughput of producing and consuming snapshots
|
||||
* Safety: Cost of pushing invalid snapshots to a node
|
||||
* Liveness: Cost of preventing a node from receiving/constructing a snapshot
|
||||
* Effort: How much effort does an implementation require
|
||||
|
||||
### Implementation Question
|
||||
* What is the format of a snapshot
|
||||
* Complete snapshot
|
||||
* Ordered IAVL key ranges
|
||||
* Compressed individually chunks which can be validated
|
||||
* How is data validated
|
||||
* Trust a peer with it's data blindly
|
||||
* Trust a majority of peers
|
||||
* Use light client validation to validate each chunk against consensus
|
||||
produced merkle tree root
|
||||
* What are the performance characteristics
|
||||
* Random vs sequential reads
|
||||
* How parallelizeable is the scheduling algorithm
|
||||
|
||||
### Proposals
|
||||
Broadly speaking there are two approaches to this problem which have had
|
||||
varying degrees of discussion and progress. These approach can be
|
||||
summarized as:
|
||||
|
||||
**Lazy:** Where snapshots are produced dynamically at request time. This
|
||||
solution would use the existing data structure.
|
||||
**Eager:** Where snapshots are produced periodically and served from disk at
|
||||
request time. This solution would create an auxiliary data structure
|
||||
optimized for batch read/writes.
|
||||
|
||||
Additionally the propsosals tend to vary on how they provide safety
|
||||
properties.
|
||||
|
||||
**LightClient** Where a client can aquire the merkle root from the block
|
||||
headers synchronized from a trusted validator set. Subsets of the application state,
|
||||
called chunks can therefore be validated on receipt to ensure each chunk
|
||||
is part of the merkle root.
|
||||
|
||||
**Majority of Peers** Where manifests of chunks along with checksums are
|
||||
downloaded and compared against versions provided by a majority of
|
||||
peers.
|
||||
|
||||
#### Lazy StateSync
|
||||
An [initial specification](https://docs.google.com/document/d/15MFsQtNA0MGBv7F096FFWRDzQ1vR6_dics5Y49vF8JU/edit?ts=5a0f3629) was published by Alexis Sellier.
|
||||
In this design, the state has a given `size` of primitive elements (like
|
||||
keys or nodes), each element is assigned a number from 0 to `size-1`,
|
||||
and chunks consists of a range of such elements. Ackratos raised
|
||||
[some concerns](https://docs.google.com/document/d/1npGTAa1qxe8EQZ1wG0a0Sip9t5oX2vYZNUDwr_LVRR4/edit)
|
||||
about this design, somewhat specific to the IAVL tree, and mainly concerning
|
||||
performance of random reads and of iterating through the tree to determine element numbers
|
||||
(ie. elements aren't indexed by the element number).
|
||||
|
||||
An alternative design was suggested by Jae Kwon in
|
||||
[#3639](https://github.com/tendermint/tendermint/issues/3639) where chunking
|
||||
happens lazily and in a dynamic way: nodes request key ranges from their peers,
|
||||
and peers respond with some subset of the
|
||||
requested range and with notes on how to request the rest in parallel from other
|
||||
peers. Unlike chunk numbers, keys can be verified directly. And if some keys in the
|
||||
range are ommitted, proofs for the range will fail to verify.
|
||||
This way a node can start by requesting the entire tree from one peer,
|
||||
and that peer can respond with say the first few keys, and the ranges to request
|
||||
from other peers.
|
||||
|
||||
Additionally, per chunk validation tends to come more naturally to the
|
||||
Lazy approach since it tends to use the existing structure of the tree
|
||||
(ie. keys or nodes) rather than state-sync specific chunks. Such a
|
||||
design for tendermint was originally tracked in
|
||||
[#828](https://github.com/tendermint/tendermint/issues/828).
|
||||
|
||||
#### Eager StateSync
|
||||
Warp Sync as implemented in Parity
|
||||
["Warp Sync"](https://wiki.parity.io/Warp-Sync-Snapshot-Format.html) to rapidly
|
||||
download both blocks and state snapshots from peers. Data is carved into ~4MB
|
||||
chunks and snappy compressed. Hashes of snappy compressed chunks are stored in a
|
||||
manifest file which co-ordinates the state-sync. Obtaining a correct manifest
|
||||
file seems to require an honest majority of peers. This means you may not find
|
||||
out the state is incorrect until you download the whole thing and compare it
|
||||
with a verified block header.
|
||||
|
||||
A similar solution was implemented by Binance in
|
||||
[#3594](https://github.com/tendermint/tendermint/pull/3594)
|
||||
based on their initial implementation in
|
||||
[PR #3243](https://github.com/tendermint/tendermint/pull/3243)
|
||||
and [some learnings](https://docs.google.com/document/d/1npGTAa1qxe8EQZ1wG0a0Sip9t5oX2vYZNUDwr_LVRR4/edit).
|
||||
Note this still requires the honest majority peer assumption.
|
||||
|
||||
As an eager protocol, warp-sync can efficiently compress larger, more
|
||||
predicatable chunks once per snapshot and service many new peers. By
|
||||
comparison lazy chunkers would have to compress each chunk at request
|
||||
time.
|
||||
|
||||
### Analysis of Lazy vs Eager
|
||||
Lazy vs Eager have more in common than they differ. They all require
|
||||
reactors on the tendermint side, a set of ABCI messages and a method for
|
||||
serializing/deserializing snapshots facilitated by a SnapshotFormat.
|
||||
|
||||
The biggest difference between Lazy and Eager proposals is in the
|
||||
read/write patterns necessitated by serving a snapshot chunk.
|
||||
Specifically, Lazy State Sync performs random reads to the underlying data
|
||||
structure while Eager can optimize for sequential reads.
|
||||
|
||||
This distinctin between approaches was demonstrated by Binance's
|
||||
[ackratos](https://github.com/ackratos) in their implementation of [Lazy
|
||||
State sync](https://github.com/tendermint/tendermint/pull/3243), The
|
||||
[analysis](https://docs.google.com/document/d/1npGTAa1qxe8EQZ1wG0a0Sip9t5oX2vYZNUDwr_LVRR4/)
|
||||
of the performance, and follow up implementation of [Warp
|
||||
Sync](http://github.com/tendermint/tendermint/pull/3594).
|
||||
|
||||
#### Compairing Security Models
|
||||
There are several different security models which have been
|
||||
discussed/proposed in the past but generally fall into two categories.
|
||||
|
||||
Light client validation: In which the node receiving data is expected to
|
||||
first perform a light client sync and have all the nessesary block
|
||||
headers. Within the trusted block header (trusted in terms of from a
|
||||
validator set subject to [weak
|
||||
subjectivity](https://github.com/tendermint/tendermint/pull/3795)) and
|
||||
can compare any subset of keys called a chunk against the merkle root.
|
||||
The advantage of light client validation is that the block headers are
|
||||
signed by validators which have something to lose for malicious
|
||||
behaviour. If a validator were to provide an invalid proof, they can be
|
||||
slashed.
|
||||
|
||||
Majority of peer validation: A manifest file containing a list of chunks
|
||||
along with checksums of each chunk is downloaded from a
|
||||
trusted source. That source can be a community resource similar to
|
||||
[sum.golang.org](https://sum.golang.org) or downloaded from the majority
|
||||
of peers. One disadantage of the majority of peer security model is the
|
||||
vuliberability to eclipse attacks in which a malicious users looks to
|
||||
saturate a target node's peer list and produce a manufactured picture of
|
||||
majority.
|
||||
|
||||
A third option would be to include snapshot related data in the
|
||||
block header. This could include the manifest with related checksums and be
|
||||
secured through consensus. One challenge of this approach is to
|
||||
ensure that creating snapshots does not put undo burden on block
|
||||
propsers by synchronizing snapshot creation and block creation. One
|
||||
approach to minimizing the burden is for snapshots for height
|
||||
`H` to be included in block `H+n` where `n` is some `n` block away,
|
||||
giving the block propser enough time to complete the snapshot
|
||||
asynchronousy.
|
||||
|
||||
## Proposal: Eager StateSync With Per Chunk Light Client Validation
|
||||
The conclusion after some concideration of the advantages/disadvances of
|
||||
eager/lazy and different security models is to produce a state sync
|
||||
which eagerly produces snapshots and uses light client validation. This
|
||||
approach has the performance advantages of pre-computing efficient
|
||||
snapshots which can streamed to new nodes on demand using sequential IO.
|
||||
Secondly, by using light client validation we cna validate each chunk on
|
||||
receipt and avoid the potential eclipse attack of majority of peer based
|
||||
security.
|
||||
|
||||
### Implementation
|
||||
Tendermint is responsible for downloading and verifying chunks of
|
||||
AppState from peers. ABCI Application is responsible for taking
|
||||
AppStateChunk objects from TM and constructing a valid state tree whose
|
||||
root corresponds with the AppHash of syncing block. In particular we
|
||||
will need implement:
|
||||
|
||||
* Build new StateSync reactor brokers message transmission between the peers
|
||||
and the ABCI application
|
||||
* A set of ABCI Messages
|
||||
* Design SnapshotFormat as an interface which can:
|
||||
* validate chunks
|
||||
* read/write chunks from file
|
||||
* read/write chunks to/from application state store
|
||||
* convert manifests into chunkRequest ABCI messages
|
||||
* Implement SnapshotFormat for cosmos-hub with concrete implementation for:
|
||||
* read/write chunks in a way which can be:
|
||||
* parallelized across peers
|
||||
* validated on receipt
|
||||
* read/write to/from IAVL+ tree
|
||||
|
||||

|
||||
|
||||
## Implementation Path
|
||||
* Create StateSync reactor based on [#3753](https://github.com/tendermint/tendermint/pull/3753)
|
||||
* Design SnapshotFormat with an eye towards cosmos-hub implementation
|
||||
* ABCI message to send/receive SnapshotFormat
|
||||
* IAVL+ changes to support SnapshotFormat
|
||||
* Deliver Warp sync (no chunk validation)
|
||||
* light client implementation for weak subjectivity
|
||||
* Deliver StateSync with chunk validation
|
||||
|
||||
## Status
|
||||
|
||||
Proposed
|
||||
|
||||
## Concequences
|
||||
|
||||
### Neutral
|
||||
|
||||
### Positive
|
||||
* Safe & performant state sync design substantiated with real world implementation experience
|
||||
* General interfaces allowing application specific innovation
|
||||
* Parallizable implementation trajectory with reasonable engineering effort
|
||||
|
||||
### Negative
|
||||
* Static Scheduling lacks opportunity for real time chunk availability optimizations
|
||||
|
||||
## References
|
||||
[sync: Sync current state without full replay for Applications](https://github.com/tendermint/tendermint/issues/828) - original issue
|
||||
[tendermint state sync proposal](https://docs.google.com/document/d/15MFsQtNA0MGBv7F096FFWRDzQ1vR6_dics5Y49vF8JU/edit?ts=5a0f3629) - Cloudhead proposal
|
||||
[tendermint state sync proposal 2](https://docs.google.com/document/d/1npGTAa1qxe8EQZ1wG0a0Sip9t5oX2vYZNUDwr_LVRR4/edit) - ackratos proposal
|
||||
[proposal 2 implementation](https://github.com/tendermint/tendermint/pull/3243) - ackratos implementation
|
||||
[WIP General/Lazy State-Sync pseudo-spec](https://github.com/tendermint/tendermint/issues/3639) - Jae Proposal
|
||||
[Warp Sync Implementation](https://github.com/tendermint/tendermint/pull/3594) - ackratos
|
||||
[Chunk Proposal](https://github.com/tendermint/tendermint/pull/3799) - Bucky proposed
|
||||
|
||||
|
BIN
docs/architecture/img/state-sync.png
Normal file
BIN
docs/architecture/img/state-sync.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 99 KiB |
@@ -240,8 +240,9 @@ max_txs_bytes = 1073741824
|
||||
# Size of the cache (used to filter transactions we saw earlier) in transactions
|
||||
cache_size = 10000
|
||||
|
||||
# Limit the size of TxMessage
|
||||
max_msg_bytes = 1048576
|
||||
# Maximum size of a single transaction.
|
||||
# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes} + {amino overhead}.
|
||||
max_tx_bytes = 1048576
|
||||
|
||||
##### fast sync configuration options #####
|
||||
[fastsync]
|
||||
|
@@ -232,8 +232,8 @@ func (mem *CListMempool) CheckTxWithInfo(tx types.Tx, cb func(*abci.Response), t
|
||||
// The size of the corresponding amino-encoded TxMessage
|
||||
// can't be larger than the maxMsgSize, otherwise we can't
|
||||
// relay it to peers.
|
||||
if max := calcMaxTxSize(mem.config.MaxMsgBytes); txSize > max {
|
||||
return ErrTxTooLarge{max, txSize}
|
||||
if txSize > mem.config.MaxTxBytes {
|
||||
return ErrTxTooLarge{mem.config.MaxTxBytes, txSize}
|
||||
}
|
||||
|
||||
if mem.preCheck != nil {
|
||||
|
@@ -426,8 +426,8 @@ func TestMempoolMaxMsgSize(t *testing.T) {
|
||||
mempl, cleanup := newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
|
||||
maxMsgSize := mempl.config.MaxMsgBytes
|
||||
maxTxSize := calcMaxTxSize(mempl.config.MaxMsgBytes)
|
||||
maxTxSize := mempl.config.MaxTxBytes
|
||||
maxMsgSize := calcMaxMsgSize(maxTxSize)
|
||||
|
||||
testCases := []struct {
|
||||
len int
|
||||
|
@@ -263,8 +263,9 @@ func RegisterMempoolMessages(cdc *amino.Codec) {
|
||||
}
|
||||
|
||||
func (memR *Reactor) decodeMsg(bz []byte) (msg MempoolMessage, err error) {
|
||||
if l := len(bz); l > memR.config.MaxMsgBytes {
|
||||
return msg, ErrTxTooLarge{memR.config.MaxMsgBytes, l}
|
||||
maxMsgSize := calcMaxMsgSize(memR.config.MaxTxBytes)
|
||||
if l := len(bz); l > maxMsgSize {
|
||||
return msg, ErrTxTooLarge{maxMsgSize, l}
|
||||
}
|
||||
err = cdc.UnmarshalBinaryBare(bz, &msg)
|
||||
return
|
||||
@@ -282,8 +283,8 @@ func (m *TxMessage) String() string {
|
||||
return fmt.Sprintf("[TxMessage %v]", m.Tx)
|
||||
}
|
||||
|
||||
// calcMaxTxSize returns the max size of Tx
|
||||
// calcMaxMsgSize returns the max size of TxMessage
|
||||
// account for amino overhead of TxMessage
|
||||
func calcMaxTxSize(maxMsgSize int) int {
|
||||
return maxMsgSize - aminoOverheadForTxMessage
|
||||
func calcMaxMsgSize(maxTxSize int) int {
|
||||
return maxTxSize + aminoOverheadForTxMessage
|
||||
}
|
||||
|
@@ -1,7 +1,7 @@
|
||||
package privval
|
||||
|
||||
import (
|
||||
"github.com/tendermint/go-amino"
|
||||
amino "github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
@@ -160,7 +160,7 @@ func validatePage(page, perPage, totalCount int) (int, error) {
|
||||
pages = 1 // one page (even if it's empty)
|
||||
}
|
||||
if page < 0 || page > pages {
|
||||
return 1, fmt.Errorf("page should be within [1, %d] range, given %d", pages, page)
|
||||
return 1, fmt.Errorf("page should be within [0, %d] range, given %d", pages, page)
|
||||
}
|
||||
|
||||
return page, nil
|
||||
|
@@ -83,9 +83,9 @@ func (txs Txs) Proof(i int) TxProof {
|
||||
|
||||
// TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree.
|
||||
type TxProof struct {
|
||||
RootHash cmn.HexBytes
|
||||
Data Tx
|
||||
Proof merkle.SimpleProof
|
||||
RootHash cmn.HexBytes `json:"root_hash"`
|
||||
Data Tx `json:"data"`
|
||||
Proof merkle.SimpleProof `json:"proof"`
|
||||
}
|
||||
|
||||
// Leaf returns the hash(tx), which is the leaf in the merkle tree which this proof refers to.
|
||||
|
Reference in New Issue
Block a user