2015-12-01 20:12:01 -08:00
|
|
|
package mempool
|
|
|
|
|
|
|
|
import (
|
2017-07-13 15:03:19 -04:00
|
|
|
"crypto/rand"
|
2019-02-12 05:54:12 +01:00
|
|
|
"crypto/sha256"
|
2015-12-01 20:12:01 -08:00
|
|
|
"encoding/binary"
|
2017-11-23 17:18:05 -07:00
|
|
|
"fmt"
|
|
|
|
"io/ioutil"
|
2019-03-31 07:14:18 -04:00
|
|
|
mrand "math/rand"
|
2017-11-23 17:18:05 -07:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2015-12-01 20:12:01 -08:00
|
|
|
"testing"
|
2017-07-13 15:03:19 -04:00
|
|
|
"time"
|
2015-12-01 20:12:01 -08:00
|
|
|
|
2018-09-12 13:41:19 -07:00
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
2019-03-26 01:27:29 -07:00
|
|
|
|
2019-01-20 17:27:49 -05:00
|
|
|
amino "github.com/tendermint/go-amino"
|
2019-03-26 01:27:29 -07:00
|
|
|
|
2018-06-22 06:59:02 +02:00
|
|
|
"github.com/tendermint/tendermint/abci/example/counter"
|
|
|
|
"github.com/tendermint/tendermint/abci/example/kvstore"
|
2019-03-31 07:14:18 -04:00
|
|
|
abciserver "github.com/tendermint/tendermint/abci/server"
|
2018-06-22 06:59:02 +02:00
|
|
|
abci "github.com/tendermint/tendermint/abci/types"
|
2017-05-01 21:25:10 -04:00
|
|
|
cfg "github.com/tendermint/tendermint/config"
|
2019-01-20 17:27:49 -05:00
|
|
|
cmn "github.com/tendermint/tendermint/libs/common"
|
2018-09-12 13:41:19 -07:00
|
|
|
"github.com/tendermint/tendermint/libs/log"
|
2016-10-10 17:05:50 -04:00
|
|
|
"github.com/tendermint/tendermint/proxy"
|
2015-12-01 20:12:01 -08:00
|
|
|
"github.com/tendermint/tendermint/types"
|
|
|
|
)
|
|
|
|
|
2019-02-18 13:23:40 +04:00
|
|
|
// A cleanupFunc cleans up any config / test files created for a particular
|
|
|
|
// test.
|
|
|
|
type cleanupFunc func()
|
|
|
|
|
2019-05-04 10:41:31 +04:00
|
|
|
func newMempoolWithApp(cc proxy.ClientCreator) (*CListMempool, cleanupFunc) {
|
2019-02-23 19:32:31 +04:00
|
|
|
return newMempoolWithAppAndConfig(cc, cfg.ResetTestRoot("mempool_test"))
|
|
|
|
}
|
2015-12-01 20:12:01 -08:00
|
|
|
|
2019-05-04 10:41:31 +04:00
|
|
|
func newMempoolWithAppAndConfig(cc proxy.ClientCreator, config *cfg.Config) (*CListMempool, cleanupFunc) {
|
2017-01-12 15:53:32 -05:00
|
|
|
appConnMem, _ := cc.NewABCIClient()
|
2017-05-16 19:06:35 +02:00
|
|
|
appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
|
2017-11-06 13:20:39 -05:00
|
|
|
err := appConnMem.Start()
|
2017-09-21 11:42:44 -04:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2019-05-04 10:41:31 +04:00
|
|
|
mempool := NewCListMempool(config.Mempool, appConnMem, 0)
|
2017-07-13 15:03:19 -04:00
|
|
|
mempool.SetLogger(log.TestingLogger())
|
2019-02-18 08:45:27 +01:00
|
|
|
return mempool, func() { os.RemoveAll(config.RootDir) }
|
2017-07-13 15:03:19 -04:00
|
|
|
}
|
|
|
|
|
2018-07-23 20:58:24 -04:00
|
|
|
func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) {
|
2017-07-13 15:03:19 -04:00
|
|
|
timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond)
|
|
|
|
select {
|
|
|
|
case <-ch:
|
|
|
|
t.Fatal("Expected not to fire")
|
|
|
|
case <-timer.C:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-23 20:58:24 -04:00
|
|
|
func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) {
|
2017-07-13 15:03:19 -04:00
|
|
|
timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond)
|
|
|
|
select {
|
|
|
|
case <-ch:
|
|
|
|
case <-timer.C:
|
|
|
|
t.Fatal("Expected to fire")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-04 10:41:31 +04:00
|
|
|
func checkTxs(t *testing.T, mempool Mempool, count int, peerID uint16) types.Txs {
|
2017-07-13 15:03:19 -04:00
|
|
|
txs := make(types.Txs, count)
|
2019-05-04 10:41:31 +04:00
|
|
|
txInfo := TxInfo{SenderID: peerID}
|
2017-07-13 15:03:19 -04:00
|
|
|
for i := 0; i < count; i++ {
|
|
|
|
txBytes := make([]byte, 20)
|
|
|
|
txs[i] = txBytes
|
2017-09-06 13:11:47 -04:00
|
|
|
_, err := rand.Read(txBytes)
|
2017-07-13 15:03:19 -04:00
|
|
|
if err != nil {
|
2017-09-06 13:11:47 -04:00
|
|
|
t.Error(err)
|
|
|
|
}
|
2019-03-26 01:27:29 -07:00
|
|
|
if err := mempool.CheckTxWithInfo(txBytes, nil, txInfo); err != nil {
|
2018-11-07 05:23:44 +01:00
|
|
|
// Skip invalid txs.
|
|
|
|
// TestMempoolFilters will fail otherwise. It asserts a number of txs
|
|
|
|
// returned.
|
2018-11-05 22:32:52 -08:00
|
|
|
if IsPreCheckError(err) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
t.Fatalf("CheckTx failed: %v while checking #%d tx", err, i)
|
2017-07-13 15:03:19 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return txs
|
|
|
|
}
|
|
|
|
|
2018-09-12 13:41:19 -07:00
|
|
|
func TestReapMaxBytesMaxGas(t *testing.T) {
|
|
|
|
app := kvstore.NewKVStoreApplication()
|
|
|
|
cc := proxy.NewLocalClientCreator(app)
|
2019-02-18 08:45:27 +01:00
|
|
|
mempool, cleanup := newMempoolWithApp(cc)
|
|
|
|
defer cleanup()
|
2018-09-12 13:41:19 -07:00
|
|
|
|
|
|
|
// Ensure gas calculation behaves as expected
|
2019-03-26 01:27:29 -07:00
|
|
|
checkTxs(t, mempool, 1, UnknownPeerID)
|
2018-09-12 13:41:19 -07:00
|
|
|
tx0 := mempool.TxsFront().Value.(*mempoolTx)
|
|
|
|
// assert that kv store has gas wanted = 1.
|
2019-06-21 01:56:27 -04:00
|
|
|
require.Equal(t, app.CheckTx(abci.RequestCheckTx{Tx: tx0.tx}).GasWanted, int64(1), "KVStore had a gas value neq to 1")
|
2018-09-12 13:41:19 -07:00
|
|
|
require.Equal(t, tx0.gasWanted, int64(1), "transactions gas was set incorrectly")
|
|
|
|
// ensure each tx is 20 bytes long
|
|
|
|
require.Equal(t, len(tx0.tx), 20, "Tx is longer than 20 bytes")
|
|
|
|
mempool.Flush()
|
|
|
|
|
|
|
|
// each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs.
|
|
|
|
// each tx has 20 bytes + amino overhead = 21 bytes, 1 gas
|
|
|
|
tests := []struct {
|
|
|
|
numTxsToCreate int
|
2018-09-21 13:00:36 +04:00
|
|
|
maxBytes int64
|
2018-09-12 13:41:19 -07:00
|
|
|
maxGas int64
|
|
|
|
expectedNumTxs int
|
|
|
|
}{
|
|
|
|
{20, -1, -1, 20},
|
|
|
|
{20, -1, 0, 0},
|
|
|
|
{20, -1, 10, 10},
|
|
|
|
{20, -1, 30, 20},
|
|
|
|
{20, 0, -1, 0},
|
|
|
|
{20, 0, 10, 0},
|
|
|
|
{20, 10, 10, 0},
|
2018-11-11 16:09:33 +01:00
|
|
|
{20, 22, 10, 1},
|
|
|
|
{20, 220, -1, 10},
|
|
|
|
{20, 220, 5, 5},
|
|
|
|
{20, 220, 10, 10},
|
|
|
|
{20, 220, 15, 10},
|
2018-09-12 13:41:19 -07:00
|
|
|
{20, 20000, -1, 20},
|
|
|
|
{20, 20000, 5, 5},
|
|
|
|
{20, 20000, 30, 20},
|
|
|
|
}
|
|
|
|
for tcIndex, tt := range tests {
|
2019-03-26 01:27:29 -07:00
|
|
|
checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID)
|
2018-09-12 13:41:19 -07:00
|
|
|
got := mempool.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas)
|
|
|
|
assert.Equal(t, tt.expectedNumTxs, len(got), "Got %d txs, expected %d, tc #%d",
|
|
|
|
len(got), tt.expectedNumTxs, tcIndex)
|
|
|
|
mempool.Flush()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-21 17:50:06 -07:00
|
|
|
func TestMempoolFilters(t *testing.T) {
|
|
|
|
app := kvstore.NewKVStoreApplication()
|
|
|
|
cc := proxy.NewLocalClientCreator(app)
|
2019-02-18 08:45:27 +01:00
|
|
|
mempool, cleanup := newMempoolWithApp(cc)
|
|
|
|
defer cleanup()
|
2018-09-21 17:50:06 -07:00
|
|
|
emptyTxArr := []types.Tx{[]byte{}}
|
|
|
|
|
2018-11-05 22:32:52 -08:00
|
|
|
nopPreFilter := func(tx types.Tx) error { return nil }
|
|
|
|
nopPostFilter := func(tx types.Tx, res *abci.ResponseCheckTx) error { return nil }
|
2018-09-21 17:50:06 -07:00
|
|
|
|
|
|
|
// each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs.
|
|
|
|
// each tx has 20 bytes + amino overhead = 21 bytes, 1 gas
|
|
|
|
tests := []struct {
|
|
|
|
numTxsToCreate int
|
2018-11-05 22:32:52 -08:00
|
|
|
preFilter PreCheckFunc
|
|
|
|
postFilter PostCheckFunc
|
2018-09-21 17:50:06 -07:00
|
|
|
expectedNumTxs int
|
|
|
|
}{
|
|
|
|
{10, nopPreFilter, nopPostFilter, 10},
|
2018-11-05 22:32:52 -08:00
|
|
|
{10, PreCheckAminoMaxBytes(10), nopPostFilter, 0},
|
|
|
|
{10, PreCheckAminoMaxBytes(20), nopPostFilter, 0},
|
2018-11-11 16:09:33 +01:00
|
|
|
{10, PreCheckAminoMaxBytes(22), nopPostFilter, 10},
|
2018-11-05 22:32:52 -08:00
|
|
|
{10, nopPreFilter, PostCheckMaxGas(-1), 10},
|
|
|
|
{10, nopPreFilter, PostCheckMaxGas(0), 0},
|
|
|
|
{10, nopPreFilter, PostCheckMaxGas(1), 10},
|
|
|
|
{10, nopPreFilter, PostCheckMaxGas(3000), 10},
|
|
|
|
{10, PreCheckAminoMaxBytes(10), PostCheckMaxGas(20), 0},
|
|
|
|
{10, PreCheckAminoMaxBytes(30), PostCheckMaxGas(20), 10},
|
2018-11-11 16:09:33 +01:00
|
|
|
{10, PreCheckAminoMaxBytes(22), PostCheckMaxGas(1), 10},
|
|
|
|
{10, PreCheckAminoMaxBytes(22), PostCheckMaxGas(0), 0},
|
2018-09-21 17:50:06 -07:00
|
|
|
}
|
|
|
|
for tcIndex, tt := range tests {
|
mempool: remove only valid (Code==0) txs on Update (#3625)
* mempool: remove only valid (Code==0) txs on Update
so evil proposers can't drop valid txs in Commit stage.
Also remove invalid (Code!=0) txs from the cache so they can be
resubmitted.
Fixes #3322
@rickyyangz:
In the end of commit stage, we will update mempool to remove all the txs
in current block.
// Update mempool.
err = blockExec.mempool.Update(
block.Height,
block.Txs,
TxPreCheck(state),
TxPostCheck(state),
)
Assum an account has 3 transactions in the mempool, the sequences are
100, 101 and 102 separately, So an evil proposal can only package the
101 and 102 transactions into its proposal block, and leave 100 still in
mempool, then the two txs will be removed from all validators' mempool
when commit. So the account lost the two valid txs.
@ebuchman:
In the longer term we may want to do something like #2639 so we can
validate txs before we commit the block. But even in this case we'd only
want to run the equivalent of CheckTx, which means the DeliverTx could
still fail even if the CheckTx passes depending on how the app handles
the ABCI Code semantics. So more work will be required around the ABCI
code. See also #2185
* add changelog entry and tests
* improve changelog message
* reformat code
2019-05-07 12:25:35 +04:00
|
|
|
mempool.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter)
|
2019-03-26 01:27:29 -07:00
|
|
|
checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID)
|
2018-09-21 17:50:06 -07:00
|
|
|
require.Equal(t, tt.expectedNumTxs, mempool.Size(), "mempool had the incorrect size, on test case %d", tcIndex)
|
|
|
|
mempool.Flush()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
mempool: remove only valid (Code==0) txs on Update (#3625)
* mempool: remove only valid (Code==0) txs on Update
so evil proposers can't drop valid txs in Commit stage.
Also remove invalid (Code!=0) txs from the cache so they can be
resubmitted.
Fixes #3322
@rickyyangz:
In the end of commit stage, we will update mempool to remove all the txs
in current block.
// Update mempool.
err = blockExec.mempool.Update(
block.Height,
block.Txs,
TxPreCheck(state),
TxPostCheck(state),
)
Assum an account has 3 transactions in the mempool, the sequences are
100, 101 and 102 separately, So an evil proposal can only package the
101 and 102 transactions into its proposal block, and leave 100 still in
mempool, then the two txs will be removed from all validators' mempool
when commit. So the account lost the two valid txs.
@ebuchman:
In the longer term we may want to do something like #2639 so we can
validate txs before we commit the block. But even in this case we'd only
want to run the equivalent of CheckTx, which means the DeliverTx could
still fail even if the CheckTx passes depending on how the app handles
the ABCI Code semantics. So more work will be required around the ABCI
code. See also #2185
* add changelog entry and tests
* improve changelog message
* reformat code
2019-05-07 12:25:35 +04:00
|
|
|
func TestMempoolUpdate(t *testing.T) {
|
2018-11-19 16:22:17 +04:00
|
|
|
app := kvstore.NewKVStoreApplication()
|
|
|
|
cc := proxy.NewLocalClientCreator(app)
|
2019-02-18 08:45:27 +01:00
|
|
|
mempool, cleanup := newMempoolWithApp(cc)
|
|
|
|
defer cleanup()
|
mempool: remove only valid (Code==0) txs on Update (#3625)
* mempool: remove only valid (Code==0) txs on Update
so evil proposers can't drop valid txs in Commit stage.
Also remove invalid (Code!=0) txs from the cache so they can be
resubmitted.
Fixes #3322
@rickyyangz:
In the end of commit stage, we will update mempool to remove all the txs
in current block.
// Update mempool.
err = blockExec.mempool.Update(
block.Height,
block.Txs,
TxPreCheck(state),
TxPostCheck(state),
)
Assum an account has 3 transactions in the mempool, the sequences are
100, 101 and 102 separately, So an evil proposal can only package the
101 and 102 transactions into its proposal block, and leave 100 still in
mempool, then the two txs will be removed from all validators' mempool
when commit. So the account lost the two valid txs.
@ebuchman:
In the longer term we may want to do something like #2639 so we can
validate txs before we commit the block. But even in this case we'd only
want to run the equivalent of CheckTx, which means the DeliverTx could
still fail even if the CheckTx passes depending on how the app handles
the ABCI Code semantics. So more work will be required around the ABCI
code. See also #2185
* add changelog entry and tests
* improve changelog message
* reformat code
2019-05-07 12:25:35 +04:00
|
|
|
|
|
|
|
// 1. Adds valid txs to the cache
|
|
|
|
{
|
|
|
|
mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
|
|
|
|
err := mempool.CheckTx([]byte{0x01}, nil)
|
|
|
|
if assert.Error(t, err) {
|
|
|
|
assert.Equal(t, ErrTxInCache, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// 2. Removes valid txs from the mempool
|
|
|
|
{
|
|
|
|
err := mempool.CheckTx([]byte{0x02}, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
mempool.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
|
|
|
|
assert.Zero(t, mempool.Size())
|
|
|
|
}
|
|
|
|
|
2019-06-03 21:46:02 +09:00
|
|
|
// 3. Removes invalid transactions from the cache and the mempool (if present)
|
mempool: remove only valid (Code==0) txs on Update (#3625)
* mempool: remove only valid (Code==0) txs on Update
so evil proposers can't drop valid txs in Commit stage.
Also remove invalid (Code!=0) txs from the cache so they can be
resubmitted.
Fixes #3322
@rickyyangz:
In the end of commit stage, we will update mempool to remove all the txs
in current block.
// Update mempool.
err = blockExec.mempool.Update(
block.Height,
block.Txs,
TxPreCheck(state),
TxPostCheck(state),
)
Assum an account has 3 transactions in the mempool, the sequences are
100, 101 and 102 separately, So an evil proposal can only package the
101 and 102 transactions into its proposal block, and leave 100 still in
mempool, then the two txs will be removed from all validators' mempool
when commit. So the account lost the two valid txs.
@ebuchman:
In the longer term we may want to do something like #2639 so we can
validate txs before we commit the block. But even in this case we'd only
want to run the equivalent of CheckTx, which means the DeliverTx could
still fail even if the CheckTx passes depending on how the app handles
the ABCI Code semantics. So more work will be required around the ABCI
code. See also #2185
* add changelog entry and tests
* improve changelog message
* reformat code
2019-05-07 12:25:35 +04:00
|
|
|
{
|
|
|
|
err := mempool.CheckTx([]byte{0x03}, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
mempool.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil)
|
2019-06-03 21:46:02 +09:00
|
|
|
assert.Zero(t, mempool.Size())
|
|
|
|
|
|
|
|
err = mempool.CheckTx([]byte{0x03}, nil)
|
|
|
|
assert.NoError(t, err)
|
2018-11-19 16:22:17 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-13 15:03:19 -04:00
|
|
|
func TestTxsAvailable(t *testing.T) {
|
2018-02-27 14:01:10 +00:00
|
|
|
app := kvstore.NewKVStoreApplication()
|
2017-07-13 15:03:19 -04:00
|
|
|
cc := proxy.NewLocalClientCreator(app)
|
2019-02-18 08:45:27 +01:00
|
|
|
mempool, cleanup := newMempoolWithApp(cc)
|
|
|
|
defer cleanup()
|
2017-07-25 13:57:11 -04:00
|
|
|
mempool.EnableTxsAvailable()
|
2017-07-13 15:03:19 -04:00
|
|
|
|
|
|
|
timeoutMS := 500
|
|
|
|
|
|
|
|
// with no txs, it shouldnt fire
|
|
|
|
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
|
|
|
|
|
|
|
|
// send a bunch of txs, it should only fire once
|
2019-03-26 01:27:29 -07:00
|
|
|
txs := checkTxs(t, mempool, 100, UnknownPeerID)
|
2017-07-13 15:03:19 -04:00
|
|
|
ensureFire(t, mempool.TxsAvailable(), timeoutMS)
|
|
|
|
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
|
|
|
|
|
|
|
|
// call update with half the txs.
|
|
|
|
// it should fire once now for the new height
|
|
|
|
// since there are still txs left
|
|
|
|
committedTxs, txs := txs[:50], txs[50:]
|
mempool: remove only valid (Code==0) txs on Update (#3625)
* mempool: remove only valid (Code==0) txs on Update
so evil proposers can't drop valid txs in Commit stage.
Also remove invalid (Code!=0) txs from the cache so they can be
resubmitted.
Fixes #3322
@rickyyangz:
In the end of commit stage, we will update mempool to remove all the txs
in current block.
// Update mempool.
err = blockExec.mempool.Update(
block.Height,
block.Txs,
TxPreCheck(state),
TxPostCheck(state),
)
Assum an account has 3 transactions in the mempool, the sequences are
100, 101 and 102 separately, So an evil proposal can only package the
101 and 102 transactions into its proposal block, and leave 100 still in
mempool, then the two txs will be removed from all validators' mempool
when commit. So the account lost the two valid txs.
@ebuchman:
In the longer term we may want to do something like #2639 so we can
validate txs before we commit the block. But even in this case we'd only
want to run the equivalent of CheckTx, which means the DeliverTx could
still fail even if the CheckTx passes depending on how the app handles
the ABCI Code semantics. So more work will be required around the ABCI
code. See also #2185
* add changelog entry and tests
* improve changelog message
* reformat code
2019-05-07 12:25:35 +04:00
|
|
|
if err := mempool.Update(1, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil {
|
2017-09-21 11:42:44 -04:00
|
|
|
t.Error(err)
|
|
|
|
}
|
2017-07-13 15:03:19 -04:00
|
|
|
ensureFire(t, mempool.TxsAvailable(), timeoutMS)
|
|
|
|
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
|
|
|
|
|
|
|
|
// send a bunch more txs. we already fired for this height so it shouldnt fire again
|
2019-03-26 01:27:29 -07:00
|
|
|
moreTxs := checkTxs(t, mempool, 50, UnknownPeerID)
|
2017-07-13 15:03:19 -04:00
|
|
|
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
|
|
|
|
|
|
|
|
// now call update with all the txs. it should not fire as there are no txs left
|
2019-08-02 08:53:52 +02:00
|
|
|
committedTxs = append(txs, moreTxs...) //nolint: gocritic
|
mempool: remove only valid (Code==0) txs on Update (#3625)
* mempool: remove only valid (Code==0) txs on Update
so evil proposers can't drop valid txs in Commit stage.
Also remove invalid (Code!=0) txs from the cache so they can be
resubmitted.
Fixes #3322
@rickyyangz:
In the end of commit stage, we will update mempool to remove all the txs
in current block.
// Update mempool.
err = blockExec.mempool.Update(
block.Height,
block.Txs,
TxPreCheck(state),
TxPostCheck(state),
)
Assum an account has 3 transactions in the mempool, the sequences are
100, 101 and 102 separately, So an evil proposal can only package the
101 and 102 transactions into its proposal block, and leave 100 still in
mempool, then the two txs will be removed from all validators' mempool
when commit. So the account lost the two valid txs.
@ebuchman:
In the longer term we may want to do something like #2639 so we can
validate txs before we commit the block. But even in this case we'd only
want to run the equivalent of CheckTx, which means the DeliverTx could
still fail even if the CheckTx passes depending on how the app handles
the ABCI Code semantics. So more work will be required around the ABCI
code. See also #2185
* add changelog entry and tests
* improve changelog message
* reformat code
2019-05-07 12:25:35 +04:00
|
|
|
if err := mempool.Update(2, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil {
|
2017-09-21 11:42:44 -04:00
|
|
|
t.Error(err)
|
|
|
|
}
|
2017-07-13 15:03:19 -04:00
|
|
|
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
|
|
|
|
|
|
|
|
// send a bunch more txs, it should only fire once
|
2019-03-26 01:27:29 -07:00
|
|
|
checkTxs(t, mempool, 100, UnknownPeerID)
|
2017-07-13 15:03:19 -04:00
|
|
|
ensureFire(t, mempool.TxsAvailable(), timeoutMS)
|
|
|
|
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestSerialReap(t *testing.T) {
|
|
|
|
app := counter.NewCounterApplication(true)
|
2018-07-27 06:23:19 +04:00
|
|
|
app.SetOption(abci.RequestSetOption{Key: "serial", Value: "on"})
|
2017-07-13 15:03:19 -04:00
|
|
|
cc := proxy.NewLocalClientCreator(app)
|
|
|
|
|
2019-02-18 08:45:27 +01:00
|
|
|
mempool, cleanup := newMempoolWithApp(cc)
|
|
|
|
defer cleanup()
|
|
|
|
|
2017-01-12 15:53:32 -05:00
|
|
|
appConnCon, _ := cc.NewABCIClient()
|
2017-05-16 19:06:35 +02:00
|
|
|
appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
|
2017-12-02 01:47:55 -05:00
|
|
|
err := appConnCon.Start()
|
2017-12-02 23:39:38 -05:00
|
|
|
require.Nil(t, err)
|
2015-12-01 20:12:01 -08:00
|
|
|
|
2017-12-02 01:47:55 -05:00
|
|
|
cacheMap := make(map[string]struct{})
|
2017-01-12 15:55:03 -05:00
|
|
|
deliverTxsRange := func(start, end int) {
|
|
|
|
// Deliver some txs.
|
2015-12-01 20:12:01 -08:00
|
|
|
for i := start; i < end; i++ {
|
|
|
|
|
|
|
|
// This will succeed
|
2016-01-25 14:34:08 -08:00
|
|
|
txBytes := make([]byte, 8)
|
|
|
|
binary.BigEndian.PutUint64(txBytes, uint64(i))
|
2016-02-08 00:48:58 -08:00
|
|
|
err := mempool.CheckTx(txBytes, nil)
|
2017-12-02 01:47:55 -05:00
|
|
|
_, cached := cacheMap[string(txBytes)]
|
|
|
|
if cached {
|
2017-12-02 23:39:38 -05:00
|
|
|
require.NotNil(t, err, "expected error for cached tx")
|
2017-12-02 01:47:55 -05:00
|
|
|
} else {
|
2017-12-02 23:39:38 -05:00
|
|
|
require.Nil(t, err, "expected no err for uncached tx")
|
2015-12-01 20:12:01 -08:00
|
|
|
}
|
2017-12-02 01:47:55 -05:00
|
|
|
cacheMap[string(txBytes)] = struct{}{}
|
2015-12-01 20:12:01 -08:00
|
|
|
|
2017-12-02 01:47:55 -05:00
|
|
|
// Duplicates are cached and should return error
|
2016-02-08 00:48:58 -08:00
|
|
|
err = mempool.CheckTx(txBytes, nil)
|
2017-12-02 23:39:38 -05:00
|
|
|
require.NotNil(t, err, "Expected error after CheckTx on duplicated tx")
|
2015-12-01 20:12:01 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
reapCheck := func(exp int) {
|
2018-09-12 13:41:19 -07:00
|
|
|
txs := mempool.ReapMaxBytesMaxGas(-1, -1)
|
2018-08-10 00:25:57 -05:00
|
|
|
require.Equal(t, len(txs), exp, fmt.Sprintf("Expected to reap %v txs but got %v", exp, len(txs)))
|
2015-12-01 20:12:01 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
updateRange := func(start, end int) {
|
|
|
|
txs := make([]types.Tx, 0)
|
|
|
|
for i := start; i < end; i++ {
|
2016-01-25 14:34:08 -08:00
|
|
|
txBytes := make([]byte, 8)
|
|
|
|
binary.BigEndian.PutUint64(txBytes, uint64(i))
|
2015-12-01 20:12:01 -08:00
|
|
|
txs = append(txs, txBytes)
|
|
|
|
}
|
mempool: remove only valid (Code==0) txs on Update (#3625)
* mempool: remove only valid (Code==0) txs on Update
so evil proposers can't drop valid txs in Commit stage.
Also remove invalid (Code!=0) txs from the cache so they can be
resubmitted.
Fixes #3322
@rickyyangz:
In the end of commit stage, we will update mempool to remove all the txs
in current block.
// Update mempool.
err = blockExec.mempool.Update(
block.Height,
block.Txs,
TxPreCheck(state),
TxPostCheck(state),
)
Assum an account has 3 transactions in the mempool, the sequences are
100, 101 and 102 separately, So an evil proposal can only package the
101 and 102 transactions into its proposal block, and leave 100 still in
mempool, then the two txs will be removed from all validators' mempool
when commit. So the account lost the two valid txs.
@ebuchman:
In the longer term we may want to do something like #2639 so we can
validate txs before we commit the block. But even in this case we'd only
want to run the equivalent of CheckTx, which means the DeliverTx could
still fail even if the CheckTx passes depending on how the app handles
the ABCI Code semantics. So more work will be required around the ABCI
code. See also #2185
* add changelog entry and tests
* improve changelog message
* reformat code
2019-05-07 12:25:35 +04:00
|
|
|
if err := mempool.Update(0, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil); err != nil {
|
2017-09-21 11:42:44 -04:00
|
|
|
t.Error(err)
|
|
|
|
}
|
2015-12-01 20:12:01 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
commitRange := func(start, end int) {
|
2017-01-12 15:55:03 -05:00
|
|
|
// Deliver some txs.
|
2015-12-01 20:12:01 -08:00
|
|
|
for i := start; i < end; i++ {
|
2016-01-25 14:34:08 -08:00
|
|
|
txBytes := make([]byte, 8)
|
|
|
|
binary.BigEndian.PutUint64(txBytes, uint64(i))
|
abci: Refactor CheckTx to notify of recheck (#3744)
As per #2127, this refactors the RequestCheckTx ProtoBuf struct to allow for a flag indicating whether a query is a recheck or not (and allows for possible future, more nuanced states).
In order to pass this extended information through to the ABCI app, the proxy.AppConnMempool (and, for consistency, the proxy.AppConnConsensus) interface seems to need to be refactored along with abcicli.Client.
And, as per this comment, I've made the following modification to the protobuf definition for the RequestCheckTx structure:
enum CheckTxType {
New = 0;
Recheck = 1;
}
message RequestCheckTx {
bytes tx = 1;
CheckTxType type = 2;
}
* Refactor ABCI CheckTx to notify of recheck
As per #2127, this refactors the `RequestCheckTx` ProtoBuf struct to allow for:
1. a flag indicating whether a query is a recheck or not (and allows for
possible future, more nuanced states)
2. an `additional_data` bytes array to provide information for those more
nuanced states.
In order to pass this extended information through to the ABCI app, the
`proxy.AppConnMempool` (and, for consistency, the
`proxy.AppConnConsensus`) interface seems to need to be refactored.
Commits:
* Fix linting issue
* Add CHANGELOG_PENDING entry
* Remove extraneous explicit initialization
* Update ABCI spec doc to include new CheckTx params
* Rename method param for consistency
* Rename CheckTxType enum values and remove additional_data param
2019-07-02 10:14:53 -04:00
|
|
|
res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes})
|
2017-11-22 18:55:09 -06:00
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Client error committing tx: %v", err)
|
|
|
|
}
|
|
|
|
if res.IsErr() {
|
2016-01-25 14:34:08 -08:00
|
|
|
t.Errorf("Error committing tx. Code:%v result:%X log:%v",
|
2016-03-20 17:10:59 -07:00
|
|
|
res.Code, res.Data, res.Log)
|
2015-12-01 20:12:01 -08:00
|
|
|
}
|
|
|
|
}
|
2017-11-22 18:55:09 -06:00
|
|
|
res, err := appConnCon.CommitSync()
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Client error committing: %v", err)
|
|
|
|
}
|
2016-03-23 02:49:30 -07:00
|
|
|
if len(res.Data) != 8 {
|
2018-01-06 01:26:51 -05:00
|
|
|
t.Errorf("Error committing. Hash:%X", res.Data)
|
2015-12-01 20:12:01 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//----------------------------------------
|
|
|
|
|
2017-01-12 15:55:03 -05:00
|
|
|
// Deliver some txs.
|
|
|
|
deliverTxsRange(0, 100)
|
2015-12-01 20:12:01 -08:00
|
|
|
|
|
|
|
// Reap the txs.
|
|
|
|
reapCheck(100)
|
|
|
|
|
|
|
|
// Reap again. We should get the same amount
|
|
|
|
reapCheck(100)
|
|
|
|
|
2017-01-12 15:55:03 -05:00
|
|
|
// Deliver 0 to 999, we should reap 900 new txs
|
2015-12-01 20:12:01 -08:00
|
|
|
// because 100 were already counted.
|
2017-01-12 15:55:03 -05:00
|
|
|
deliverTxsRange(0, 1000)
|
2015-12-01 20:12:01 -08:00
|
|
|
|
|
|
|
// Reap the txs.
|
|
|
|
reapCheck(1000)
|
|
|
|
|
|
|
|
// Reap again. We should get the same amount
|
|
|
|
reapCheck(1000)
|
|
|
|
|
2016-01-06 17:14:20 -08:00
|
|
|
// Commit from the conensus AppConn
|
2015-12-01 20:12:01 -08:00
|
|
|
commitRange(0, 500)
|
|
|
|
updateRange(0, 500)
|
|
|
|
|
|
|
|
// We should have 500 left.
|
|
|
|
reapCheck(500)
|
|
|
|
|
2017-01-12 15:55:03 -05:00
|
|
|
// Deliver 100 invalid txs and 100 valid txs
|
|
|
|
deliverTxsRange(900, 1100)
|
2016-01-06 17:14:20 -08:00
|
|
|
|
|
|
|
// We should have 600 now.
|
|
|
|
reapCheck(600)
|
2015-12-01 20:12:01 -08:00
|
|
|
}
|
2017-11-23 17:18:05 -07:00
|
|
|
|
|
|
|
func TestMempoolCloseWAL(t *testing.T) {
|
|
|
|
// 1. Create the temporary directory for mempool and WAL testing.
|
|
|
|
rootDir, err := ioutil.TempDir("", "mempool-test")
|
|
|
|
require.Nil(t, err, "expecting successful tmpdir creation")
|
|
|
|
|
|
|
|
// 2. Ensure that it doesn't contain any elements -- Sanity check
|
|
|
|
m1, err := filepath.Glob(filepath.Join(rootDir, "*"))
|
|
|
|
require.Nil(t, err, "successful globbing expected")
|
|
|
|
require.Equal(t, 0, len(m1), "no matches yet")
|
|
|
|
|
|
|
|
// 3. Create the mempool
|
2019-05-04 10:41:31 +04:00
|
|
|
wcfg := cfg.DefaultConfig()
|
|
|
|
wcfg.Mempool.RootDir = rootDir
|
2018-02-27 14:01:10 +00:00
|
|
|
app := kvstore.NewKVStoreApplication()
|
2017-11-23 17:18:05 -07:00
|
|
|
cc := proxy.NewLocalClientCreator(app)
|
2019-05-04 10:41:31 +04:00
|
|
|
mempool, cleanup := newMempoolWithAppAndConfig(cc, wcfg)
|
|
|
|
defer cleanup()
|
|
|
|
mempool.height = 10
|
2018-01-19 00:57:00 -05:00
|
|
|
mempool.InitWAL()
|
2017-11-23 17:18:05 -07:00
|
|
|
|
|
|
|
// 4. Ensure that the directory contains the WAL file
|
|
|
|
m2, err := filepath.Glob(filepath.Join(rootDir, "*"))
|
|
|
|
require.Nil(t, err, "successful globbing expected")
|
|
|
|
require.Equal(t, 1, len(m2), "expecting the wal match in")
|
|
|
|
|
|
|
|
// 5. Write some contents to the WAL
|
|
|
|
mempool.CheckTx(types.Tx([]byte("foo")), nil)
|
|
|
|
walFilepath := mempool.wal.Path
|
|
|
|
sum1 := checksumFile(walFilepath, t)
|
|
|
|
|
|
|
|
// 6. Sanity check to ensure that the written TX matches the expectation.
|
|
|
|
require.Equal(t, sum1, checksumIt([]byte("foo\n")), "foo with a newline should be written")
|
|
|
|
|
|
|
|
// 7. Invoke CloseWAL() and ensure it discards the
|
|
|
|
// WAL thus any other write won't go through.
|
2018-11-06 07:39:05 +01:00
|
|
|
mempool.CloseWAL()
|
2017-11-23 17:18:05 -07:00
|
|
|
mempool.CheckTx(types.Tx([]byte("bar")), nil)
|
|
|
|
sum2 := checksumFile(walFilepath, t)
|
|
|
|
require.Equal(t, sum1, sum2, "expected no change to the WAL after invoking CloseWAL() since it was discarded")
|
|
|
|
|
2018-11-06 07:39:05 +01:00
|
|
|
// 8. Sanity check to ensure that the WAL file still exists
|
2017-11-23 17:18:05 -07:00
|
|
|
m3, err := filepath.Glob(filepath.Join(rootDir, "*"))
|
|
|
|
require.Nil(t, err, "successful globbing expected")
|
|
|
|
require.Equal(t, 1, len(m3), "expecting the wal match in")
|
|
|
|
}
|
|
|
|
|
2019-01-20 17:27:49 -05:00
|
|
|
// Size of the amino encoded TxMessage is the length of the
|
|
|
|
// encoded byte array, plus 1 for the struct field, plus 4
|
|
|
|
// for the amino prefix.
|
|
|
|
func txMessageSize(tx types.Tx) int {
|
|
|
|
return amino.ByteSliceSize(tx) + 1 + 4
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestMempoolMaxMsgSize(t *testing.T) {
|
|
|
|
app := kvstore.NewKVStoreApplication()
|
|
|
|
cc := proxy.NewLocalClientCreator(app)
|
2019-02-18 08:45:27 +01:00
|
|
|
mempl, cleanup := newMempoolWithApp(cc)
|
|
|
|
defer cleanup()
|
2019-01-20 17:27:49 -05:00
|
|
|
|
2019-08-06 01:01:30 +09:00
|
|
|
maxTxSize := mempl.config.MaxTxBytes
|
|
|
|
maxMsgSize := calcMaxMsgSize(maxTxSize)
|
2019-07-23 00:17:10 +09:00
|
|
|
|
2019-01-20 17:27:49 -05:00
|
|
|
testCases := []struct {
|
|
|
|
len int
|
|
|
|
err bool
|
|
|
|
}{
|
|
|
|
// check small txs. no error
|
|
|
|
{10, false},
|
|
|
|
{1000, false},
|
|
|
|
{1000000, false},
|
|
|
|
|
|
|
|
// check around maxTxSize
|
|
|
|
// changes from no error to error
|
|
|
|
{maxTxSize - 2, false},
|
|
|
|
{maxTxSize - 1, false},
|
|
|
|
{maxTxSize, false},
|
|
|
|
{maxTxSize + 1, true},
|
|
|
|
{maxTxSize + 2, true},
|
|
|
|
|
|
|
|
// check around maxMsgSize. all error
|
|
|
|
{maxMsgSize - 1, true},
|
|
|
|
{maxMsgSize, true},
|
|
|
|
{maxMsgSize + 1, true},
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, testCase := range testCases {
|
|
|
|
caseString := fmt.Sprintf("case %d, len %d", i, testCase.len)
|
|
|
|
|
|
|
|
tx := cmn.RandBytes(testCase.len)
|
|
|
|
err := mempl.CheckTx(tx, nil)
|
|
|
|
msg := &TxMessage{tx}
|
|
|
|
encoded := cdc.MustMarshalBinaryBare(msg)
|
|
|
|
require.Equal(t, len(encoded), txMessageSize(tx), caseString)
|
|
|
|
if !testCase.err {
|
|
|
|
require.True(t, len(encoded) <= maxMsgSize, caseString)
|
|
|
|
require.NoError(t, err, caseString)
|
|
|
|
} else {
|
|
|
|
require.True(t, len(encoded) > maxMsgSize, caseString)
|
2019-07-23 00:17:10 +09:00
|
|
|
require.Equal(t, err, ErrTxTooLarge{maxTxSize, testCase.len}, caseString)
|
2019-01-20 17:27:49 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2019-02-23 19:32:31 +04:00
|
|
|
func TestMempoolTxsBytes(t *testing.T) {
|
|
|
|
app := kvstore.NewKVStoreApplication()
|
|
|
|
cc := proxy.NewLocalClientCreator(app)
|
|
|
|
config := cfg.ResetTestRoot("mempool_test")
|
|
|
|
config.Mempool.MaxTxsBytes = 10
|
|
|
|
mempool, cleanup := newMempoolWithAppAndConfig(cc, config)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
// 1. zero by default
|
|
|
|
assert.EqualValues(t, 0, mempool.TxsBytes())
|
|
|
|
|
|
|
|
// 2. len(tx) after CheckTx
|
|
|
|
err := mempool.CheckTx([]byte{0x01}, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.EqualValues(t, 1, mempool.TxsBytes())
|
|
|
|
|
|
|
|
// 3. zero again after tx is removed by Update
|
mempool: remove only valid (Code==0) txs on Update (#3625)
* mempool: remove only valid (Code==0) txs on Update
so evil proposers can't drop valid txs in Commit stage.
Also remove invalid (Code!=0) txs from the cache so they can be
resubmitted.
Fixes #3322
@rickyyangz:
In the end of commit stage, we will update mempool to remove all the txs
in current block.
// Update mempool.
err = blockExec.mempool.Update(
block.Height,
block.Txs,
TxPreCheck(state),
TxPostCheck(state),
)
Assum an account has 3 transactions in the mempool, the sequences are
100, 101 and 102 separately, So an evil proposal can only package the
101 and 102 transactions into its proposal block, and leave 100 still in
mempool, then the two txs will be removed from all validators' mempool
when commit. So the account lost the two valid txs.
@ebuchman:
In the longer term we may want to do something like #2639 so we can
validate txs before we commit the block. But even in this case we'd only
want to run the equivalent of CheckTx, which means the DeliverTx could
still fail even if the CheckTx passes depending on how the app handles
the ABCI Code semantics. So more work will be required around the ABCI
code. See also #2185
* add changelog entry and tests
* improve changelog message
* reformat code
2019-05-07 12:25:35 +04:00
|
|
|
mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil)
|
2019-02-23 19:32:31 +04:00
|
|
|
assert.EqualValues(t, 0, mempool.TxsBytes())
|
|
|
|
|
|
|
|
// 4. zero after Flush
|
|
|
|
err = mempool.CheckTx([]byte{0x02, 0x03}, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.EqualValues(t, 2, mempool.TxsBytes())
|
|
|
|
|
|
|
|
mempool.Flush()
|
|
|
|
assert.EqualValues(t, 0, mempool.TxsBytes())
|
|
|
|
|
|
|
|
// 5. ErrMempoolIsFull is returned when/if MaxTxsBytes limit is reached.
|
|
|
|
err = mempool.CheckTx([]byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04}, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
err = mempool.CheckTx([]byte{0x05}, nil)
|
|
|
|
if assert.Error(t, err) {
|
|
|
|
assert.IsType(t, ErrMempoolIsFull{}, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// 6. zero after tx is rechecked and removed due to not being valid anymore
|
|
|
|
app2 := counter.NewCounterApplication(true)
|
|
|
|
cc = proxy.NewLocalClientCreator(app2)
|
|
|
|
mempool, cleanup = newMempoolWithApp(cc)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
txBytes := make([]byte, 8)
|
|
|
|
binary.BigEndian.PutUint64(txBytes, uint64(0))
|
|
|
|
|
|
|
|
err = mempool.CheckTx(txBytes, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.EqualValues(t, 8, mempool.TxsBytes())
|
|
|
|
|
|
|
|
appConnCon, _ := cc.NewABCIClient()
|
|
|
|
appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
|
|
|
|
err = appConnCon.Start()
|
|
|
|
require.Nil(t, err)
|
|
|
|
defer appConnCon.Stop()
|
abci: Refactor CheckTx to notify of recheck (#3744)
As per #2127, this refactors the RequestCheckTx ProtoBuf struct to allow for a flag indicating whether a query is a recheck or not (and allows for possible future, more nuanced states).
In order to pass this extended information through to the ABCI app, the proxy.AppConnMempool (and, for consistency, the proxy.AppConnConsensus) interface seems to need to be refactored along with abcicli.Client.
And, as per this comment, I've made the following modification to the protobuf definition for the RequestCheckTx structure:
enum CheckTxType {
New = 0;
Recheck = 1;
}
message RequestCheckTx {
bytes tx = 1;
CheckTxType type = 2;
}
* Refactor ABCI CheckTx to notify of recheck
As per #2127, this refactors the `RequestCheckTx` ProtoBuf struct to allow for:
1. a flag indicating whether a query is a recheck or not (and allows for
possible future, more nuanced states)
2. an `additional_data` bytes array to provide information for those more
nuanced states.
In order to pass this extended information through to the ABCI app, the
`proxy.AppConnMempool` (and, for consistency, the
`proxy.AppConnConsensus`) interface seems to need to be refactored.
Commits:
* Fix linting issue
* Add CHANGELOG_PENDING entry
* Remove extraneous explicit initialization
* Update ABCI spec doc to include new CheckTx params
* Rename method param for consistency
* Rename CheckTxType enum values and remove additional_data param
2019-07-02 10:14:53 -04:00
|
|
|
res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes})
|
2019-02-23 19:32:31 +04:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, 0, res.Code)
|
|
|
|
res2, err := appConnCon.CommitSync()
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotEmpty(t, res2.Data)
|
|
|
|
|
|
|
|
// Pretend like we committed nothing so txBytes gets rechecked and removed.
|
mempool: remove only valid (Code==0) txs on Update (#3625)
* mempool: remove only valid (Code==0) txs on Update
so evil proposers can't drop valid txs in Commit stage.
Also remove invalid (Code!=0) txs from the cache so they can be
resubmitted.
Fixes #3322
@rickyyangz:
In the end of commit stage, we will update mempool to remove all the txs
in current block.
// Update mempool.
err = blockExec.mempool.Update(
block.Height,
block.Txs,
TxPreCheck(state),
TxPostCheck(state),
)
Assum an account has 3 transactions in the mempool, the sequences are
100, 101 and 102 separately, So an evil proposal can only package the
101 and 102 transactions into its proposal block, and leave 100 still in
mempool, then the two txs will be removed from all validators' mempool
when commit. So the account lost the two valid txs.
@ebuchman:
In the longer term we may want to do something like #2639 so we can
validate txs before we commit the block. But even in this case we'd only
want to run the equivalent of CheckTx, which means the DeliverTx could
still fail even if the CheckTx passes depending on how the app handles
the ABCI Code semantics. So more work will be required around the ABCI
code. See also #2185
* add changelog entry and tests
* improve changelog message
* reformat code
2019-05-07 12:25:35 +04:00
|
|
|
mempool.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil)
|
2019-02-23 19:32:31 +04:00
|
|
|
assert.EqualValues(t, 0, mempool.TxsBytes())
|
|
|
|
}
|
|
|
|
|
2019-03-31 07:14:18 -04:00
|
|
|
// This will non-deterministically catch some concurrency failures like
|
|
|
|
// https://github.com/tendermint/tendermint/issues/3509
|
|
|
|
// TODO: all of the tests should probably also run using the remote proxy app
|
|
|
|
// since otherwise we're not actually testing the concurrency of the mempool here!
|
|
|
|
func TestMempoolRemoteAppConcurrency(t *testing.T) {
|
|
|
|
sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", cmn.RandStr(6))
|
|
|
|
app := kvstore.NewKVStoreApplication()
|
|
|
|
cc, server := newRemoteApp(t, sockPath, app)
|
|
|
|
defer server.Stop()
|
|
|
|
config := cfg.ResetTestRoot("mempool_test")
|
|
|
|
mempool, cleanup := newMempoolWithAppAndConfig(cc, config)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
// generate small number of txs
|
|
|
|
nTxs := 10
|
|
|
|
txLen := 200
|
|
|
|
txs := make([]types.Tx, nTxs)
|
|
|
|
for i := 0; i < nTxs; i++ {
|
|
|
|
txs[i] = cmn.RandBytes(txLen)
|
|
|
|
}
|
|
|
|
|
|
|
|
// simulate a group of peers sending them over and over
|
|
|
|
N := config.Mempool.Size
|
|
|
|
maxPeers := 5
|
|
|
|
for i := 0; i < N; i++ {
|
|
|
|
peerID := mrand.Intn(maxPeers)
|
|
|
|
txNum := mrand.Intn(nTxs)
|
2019-09-10 03:31:44 -04:00
|
|
|
tx := txs[txNum]
|
2019-03-31 07:14:18 -04:00
|
|
|
|
|
|
|
// this will err with ErrTxInCache many times ...
|
2019-05-04 10:41:31 +04:00
|
|
|
mempool.CheckTxWithInfo(tx, nil, TxInfo{SenderID: uint16(peerID)})
|
2019-03-31 07:14:18 -04:00
|
|
|
}
|
|
|
|
err := mempool.FlushAppConn()
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// caller must close server
|
|
|
|
func newRemoteApp(t *testing.T, addr string, app abci.Application) (clientCreator proxy.ClientCreator, server cmn.Service) {
|
|
|
|
clientCreator = proxy.NewRemoteClientCreator(addr, "socket", true)
|
|
|
|
|
|
|
|
// Start server
|
|
|
|
server = abciserver.NewSocketServer(addr, app)
|
|
|
|
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
|
|
|
|
if err := server.Start(); err != nil {
|
|
|
|
t.Fatalf("Error starting socket server: %v", err.Error())
|
|
|
|
}
|
|
|
|
return clientCreator, server
|
|
|
|
}
|
2017-11-23 17:18:05 -07:00
|
|
|
func checksumIt(data []byte) string {
|
2019-02-12 05:54:12 +01:00
|
|
|
h := sha256.New()
|
2017-11-23 17:18:05 -07:00
|
|
|
h.Write(data)
|
|
|
|
return fmt.Sprintf("%x", h.Sum(nil))
|
|
|
|
}
|
|
|
|
|
|
|
|
func checksumFile(p string, t *testing.T) string {
|
|
|
|
data, err := ioutil.ReadFile(p)
|
|
|
|
require.Nil(t, err, "expecting successful read of %q", p)
|
|
|
|
return checksumIt(data)
|
|
|
|
}
|
mempool: remove only valid (Code==0) txs on Update (#3625)
* mempool: remove only valid (Code==0) txs on Update
so evil proposers can't drop valid txs in Commit stage.
Also remove invalid (Code!=0) txs from the cache so they can be
resubmitted.
Fixes #3322
@rickyyangz:
In the end of commit stage, we will update mempool to remove all the txs
in current block.
// Update mempool.
err = blockExec.mempool.Update(
block.Height,
block.Txs,
TxPreCheck(state),
TxPostCheck(state),
)
Assum an account has 3 transactions in the mempool, the sequences are
100, 101 and 102 separately, So an evil proposal can only package the
101 and 102 transactions into its proposal block, and leave 100 still in
mempool, then the two txs will be removed from all validators' mempool
when commit. So the account lost the two valid txs.
@ebuchman:
In the longer term we may want to do something like #2639 so we can
validate txs before we commit the block. But even in this case we'd only
want to run the equivalent of CheckTx, which means the DeliverTx could
still fail even if the CheckTx passes depending on how the app handles
the ABCI Code semantics. So more work will be required around the ABCI
code. See also #2185
* add changelog entry and tests
* improve changelog message
* reformat code
2019-05-07 12:25:35 +04:00
|
|
|
|
|
|
|
func abciResponses(n int, code uint32) []*abci.ResponseDeliverTx {
|
|
|
|
responses := make([]*abci.ResponseDeliverTx, 0, n)
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
responses = append(responses, &abci.ResponseDeliverTx{Code: code})
|
|
|
|
}
|
|
|
|
return responses
|
|
|
|
}
|