mirror of
https://github.com/fluencelabs/tendermint
synced 2025-06-13 13:21:20 +00:00
rpc: add support for batched requests/responses (#3534)
Continues from #3280 in building support for batched requests/responses in the JSON RPC (as per issue #3213). * Add JSON RPC batching for client and server As per #3213, this adds support for [JSON RPC batch requests and responses](https://www.jsonrpc.org/specification#batch). * Add additional checks to ensure client responses are the same as results * Fix case where a notification is sent and no response is expected * Add test to check that JSON RPC notifications in a batch are left out in responses * Update CHANGELOG_PENDING.md * Update PR number now that PR has been created * Make errors start with lowercase letter * Refactor batch functionality to be standalone This refactors the batching functionality to rather act in a standalone way. In light of supporting concurrent goroutines making use of the same client, it would make sense to have batching functionality where one could create a batch of requests per goroutine and send that batch without interfering with a batch from another goroutine. * Add examples for simple and batch HTTP client usage * Check errors from writer and remove nolinter directives * Make error strings start with lowercase letter * Refactor examples to make them testable * Use safer deferred shutdown for example Tendermint test node * Recompose rpcClient interface from pre-existing interface components * Rename WaitGroup for brevity * Replace empty ID string with request ID * Remove extraneous test case * Convert first letter of errors.Wrap() messages to lowercase * Remove extraneous function parameter * Make variable declaration terse * Reorder WaitGroup.Done call to help prevent race conditions in the face of failure * Swap mutex to value representation and remove initialization * Restore empty JSONRPC string ID in response to prevent nil * Make JSONRPCBufferedRequest private * Revert PR hard link in CHANGELOG_PENDING * Add client ID for JSONRPCClient This adds code to automatically generate a randomized client ID for the JSONRPCClient, and adds a check of the IDs in the responses (if one was set in the requests). * Extract response ID validation into separate function * Remove extraneous comments * Reorder fields to indicate clearly which are protected by the mutex * Refactor for loop to remove indexing * Restructure and combine loop * Flatten conditional block for better readability * Make multi-variable declaration slightly more readable * Change for loop style * Compress error check statements * Make function description more generic to show that we support different protocols * Preallocate memory for request and result objects
This commit is contained in:
committed by
Anton Kaliaev
parent
621c0e629d
commit
90465f727f
@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -11,7 +12,9 @@ import (
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/rpc/client"
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
rpctest "github.com/tendermint/tendermint/rpc/test"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@ -441,3 +444,100 @@ func TestTxSearch(t *testing.T) {
|
||||
require.Len(t, result.Txs, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchedJSONRPCCalls(t *testing.T) {
|
||||
c := getHTTPClient()
|
||||
testBatchedJSONRPCCalls(t, c)
|
||||
}
|
||||
|
||||
func testBatchedJSONRPCCalls(t *testing.T, c *client.HTTP) {
|
||||
k1, v1, tx1 := MakeTxKV()
|
||||
k2, v2, tx2 := MakeTxKV()
|
||||
|
||||
batch := c.NewBatch()
|
||||
r1, err := batch.BroadcastTxCommit(tx1)
|
||||
require.NoError(t, err)
|
||||
r2, err := batch.BroadcastTxCommit(tx2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, batch.Count())
|
||||
bresults, err := batch.Send()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, bresults, 2)
|
||||
require.Equal(t, 0, batch.Count())
|
||||
|
||||
bresult1, ok := bresults[0].(*ctypes.ResultBroadcastTxCommit)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, *bresult1, *r1)
|
||||
bresult2, ok := bresults[1].(*ctypes.ResultBroadcastTxCommit)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, *bresult2, *r2)
|
||||
apph := cmn.MaxInt64(bresult1.Height, bresult2.Height) + 1
|
||||
|
||||
client.WaitForHeight(c, apph, nil)
|
||||
|
||||
q1, err := batch.ABCIQuery("/key", k1)
|
||||
require.NoError(t, err)
|
||||
q2, err := batch.ABCIQuery("/key", k2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, batch.Count())
|
||||
qresults, err := batch.Send()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, qresults, 2)
|
||||
require.Equal(t, 0, batch.Count())
|
||||
|
||||
qresult1, ok := qresults[0].(*ctypes.ResultABCIQuery)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, *qresult1, *q1)
|
||||
qresult2, ok := qresults[1].(*ctypes.ResultABCIQuery)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, *qresult2, *q2)
|
||||
|
||||
require.Equal(t, qresult1.Response.Key, k1)
|
||||
require.Equal(t, qresult2.Response.Key, k2)
|
||||
require.Equal(t, qresult1.Response.Value, v1)
|
||||
require.Equal(t, qresult2.Response.Value, v2)
|
||||
}
|
||||
|
||||
func TestBatchedJSONRPCCallsCancellation(t *testing.T) {
|
||||
c := getHTTPClient()
|
||||
_, _, tx1 := MakeTxKV()
|
||||
_, _, tx2 := MakeTxKV()
|
||||
|
||||
batch := c.NewBatch()
|
||||
_, err := batch.BroadcastTxCommit(tx1)
|
||||
require.NoError(t, err)
|
||||
_, err = batch.BroadcastTxCommit(tx2)
|
||||
require.NoError(t, err)
|
||||
// we should have 2 requests waiting
|
||||
require.Equal(t, 2, batch.Count())
|
||||
// we want to make sure we cleared 2 pending requests
|
||||
require.Equal(t, 2, batch.Clear())
|
||||
// now there should be no batched requests
|
||||
require.Equal(t, 0, batch.Count())
|
||||
}
|
||||
|
||||
func TestSendingEmptyJSONRPCRequestBatch(t *testing.T) {
|
||||
c := getHTTPClient()
|
||||
batch := c.NewBatch()
|
||||
_, err := batch.Send()
|
||||
require.Error(t, err, "sending an empty batch of JSON RPC requests should result in an error")
|
||||
}
|
||||
|
||||
func TestClearingEmptyJSONRPCRequestBatch(t *testing.T) {
|
||||
c := getHTTPClient()
|
||||
batch := c.NewBatch()
|
||||
require.Zero(t, batch.Clear(), "clearing an empty batch of JSON RPC requests should result in a 0 result")
|
||||
}
|
||||
|
||||
func TestConcurrentJSONRPCBatching(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
c := getHTTPClient()
|
||||
for i := 0; i < 50; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
testBatchedJSONRPCCalls(t, c)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
Reference in New Issue
Block a user