mirror of
https://github.com/fluencelabs/tendermint
synced 2025-06-20 00:21:21 +00:00
limit number of /subscribe clients and queries per client (#3269)
* limit number of /subscribe clients and queries per client Add the following config variables (under [rpc] section): * max_subscription_clients * max_subscriptions_per_client * timeout_broadcast_tx_commit Fixes #2826 new HTTPClient interface for subscriptions finalize HTTPClient events interface remove EventSubscriber fix data race ``` WARNING: DATA RACE Read at 0x00c000a36060 by goroutine 129: github.com/tendermint/tendermint/rpc/client.(*Local).Subscribe.func1() /go/src/github.com/tendermint/tendermint/rpc/client/localclient.go:168 +0x1f0 Previous write at 0x00c000a36060 by goroutine 132: github.com/tendermint/tendermint/rpc/client.(*Local).Subscribe() /go/src/github.com/tendermint/tendermint/rpc/client/localclient.go:191 +0x4e0 github.com/tendermint/tendermint/rpc/client.WaitForOneEvent() /go/src/github.com/tendermint/tendermint/rpc/client/helpers.go:64 +0x178 github.com/tendermint/tendermint/rpc/client_test.TestTxEventsSentWithBroadcastTxSync.func1() /go/src/github.com/tendermint/tendermint/rpc/client/event_test.go:139 +0x298 testing.tRunner() /usr/local/go/src/testing/testing.go:827 +0x162 Goroutine 129 (running) created at: github.com/tendermint/tendermint/rpc/client.(*Local).Subscribe() /go/src/github.com/tendermint/tendermint/rpc/client/localclient.go:164 +0x4b7 github.com/tendermint/tendermint/rpc/client.WaitForOneEvent() /go/src/github.com/tendermint/tendermint/rpc/client/helpers.go:64 +0x178 github.com/tendermint/tendermint/rpc/client_test.TestTxEventsSentWithBroadcastTxSync.func1() /go/src/github.com/tendermint/tendermint/rpc/client/event_test.go:139 +0x298 testing.tRunner() /usr/local/go/src/testing/testing.go:827 +0x162 Goroutine 132 (running) created at: testing.(*T).Run() /usr/local/go/src/testing/testing.go:878 +0x659 github.com/tendermint/tendermint/rpc/client_test.TestTxEventsSentWithBroadcastTxSync() /go/src/github.com/tendermint/tendermint/rpc/client/event_test.go:119 +0x186 testing.tRunner() /usr/local/go/src/testing/testing.go:827 +0x162 ================== ``` lite client works (tested manually) godoc comments httpclient: do not close the out channel use TimeoutBroadcastTxCommit no timeout for unsubscribe but 1s Local (5s HTTP) timeout for resubscribe format code change Subscribe#out cap to 1 and replace config vars with RPCConfig TimeoutBroadcastTxCommit can't be greater than rpcserver.WriteTimeout rpc: Context as first parameter to all functions reformat code fixes after my own review fixes after Ethan's review add test stubs fix config.toml * fixes after manual testing - rpc: do not recommend to use BroadcastTxCommit because it's slow and wastes Tendermint resources (pubsub) - rpc: better error in Subscribe and BroadcastTxCommit - HTTPClient: do not resubscribe if err = ErrAlreadySubscribed * fixes after Ismail's review * Update rpc/grpc/grpc_test.go Co-Authored-By: melekes <anton.kalyaev@gmail.com>
This commit is contained in:
@ -2,12 +2,18 @@ package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
tmquery "github.com/tendermint/tendermint/libs/pubsub/query"
|
||||
nm "github.com/tendermint/tendermint/node"
|
||||
"github.com/tendermint/tendermint/rpc/core"
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
rpctypes "github.com/tendermint/tendermint/rpc/lib/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@ -24,9 +30,17 @@ are compiled in process.
|
||||
|
||||
For real clients, you probably want to use client.HTTP. For more
|
||||
powerful control during testing, you probably want the "client/mock" package.
|
||||
|
||||
You can subscribe for any event published by Tendermint using Subscribe method.
|
||||
Note delivery is best-effort. If you don't read events fast enough, Tendermint
|
||||
might cancel the subscription. The client will attempt to resubscribe (you
|
||||
don't need to do anything). It will keep trying indefinitely with exponential
|
||||
backoff (10ms -> 20ms -> 40ms) until successful.
|
||||
*/
|
||||
type Local struct {
|
||||
*types.EventBus
|
||||
Logger log.Logger
|
||||
ctx *rpctypes.Context
|
||||
}
|
||||
|
||||
// NewLocal configures a client that calls the Node directly.
|
||||
@ -39,113 +53,189 @@ func NewLocal(node *nm.Node) *Local {
|
||||
node.ConfigureRPC()
|
||||
return &Local{
|
||||
EventBus: node.EventBus(),
|
||||
Logger: log.NewNopLogger(),
|
||||
ctx: &rpctypes.Context{},
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
_ Client = (*Local)(nil)
|
||||
_ NetworkClient = Local{}
|
||||
_ NetworkClient = (*Local)(nil)
|
||||
_ EventsClient = (*Local)(nil)
|
||||
)
|
||||
|
||||
func (Local) Status() (*ctypes.ResultStatus, error) {
|
||||
return core.Status()
|
||||
// SetLogger allows to set a logger on the client.
|
||||
func (c *Local) SetLogger(l log.Logger) {
|
||||
c.Logger = l
|
||||
}
|
||||
|
||||
func (Local) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
|
||||
return core.ABCIInfo()
|
||||
func (c *Local) Status() (*ctypes.ResultStatus, error) {
|
||||
return core.Status(c.ctx)
|
||||
}
|
||||
|
||||
func (c *Local) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
|
||||
return core.ABCIInfo(c.ctx)
|
||||
}
|
||||
|
||||
func (c *Local) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) {
|
||||
return c.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions)
|
||||
}
|
||||
|
||||
func (Local) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
|
||||
return core.ABCIQuery(path, data, opts.Height, opts.Prove)
|
||||
func (c *Local) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
|
||||
return core.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove)
|
||||
}
|
||||
|
||||
func (Local) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
|
||||
return core.BroadcastTxCommit(tx)
|
||||
func (c *Local) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
|
||||
return core.BroadcastTxCommit(c.ctx, tx)
|
||||
}
|
||||
|
||||
func (Local) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
|
||||
return core.BroadcastTxAsync(tx)
|
||||
func (c *Local) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
|
||||
return core.BroadcastTxAsync(c.ctx, tx)
|
||||
}
|
||||
|
||||
func (Local) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
|
||||
return core.BroadcastTxSync(tx)
|
||||
func (c *Local) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
|
||||
return core.BroadcastTxSync(c.ctx, tx)
|
||||
}
|
||||
|
||||
func (Local) UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) {
|
||||
return core.UnconfirmedTxs(limit)
|
||||
func (c *Local) UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) {
|
||||
return core.UnconfirmedTxs(c.ctx, limit)
|
||||
}
|
||||
|
||||
func (Local) NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) {
|
||||
return core.NumUnconfirmedTxs()
|
||||
func (c *Local) NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) {
|
||||
return core.NumUnconfirmedTxs(c.ctx)
|
||||
}
|
||||
|
||||
func (Local) NetInfo() (*ctypes.ResultNetInfo, error) {
|
||||
return core.NetInfo()
|
||||
func (c *Local) NetInfo() (*ctypes.ResultNetInfo, error) {
|
||||
return core.NetInfo(c.ctx)
|
||||
}
|
||||
|
||||
func (Local) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
|
||||
return core.DumpConsensusState()
|
||||
func (c *Local) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
|
||||
return core.DumpConsensusState(c.ctx)
|
||||
}
|
||||
|
||||
func (Local) ConsensusState() (*ctypes.ResultConsensusState, error) {
|
||||
return core.ConsensusState()
|
||||
func (c *Local) ConsensusState() (*ctypes.ResultConsensusState, error) {
|
||||
return core.ConsensusState(c.ctx)
|
||||
}
|
||||
|
||||
func (Local) Health() (*ctypes.ResultHealth, error) {
|
||||
return core.Health()
|
||||
func (c *Local) Health() (*ctypes.ResultHealth, error) {
|
||||
return core.Health(c.ctx)
|
||||
}
|
||||
|
||||
func (Local) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) {
|
||||
return core.UnsafeDialSeeds(seeds)
|
||||
func (c *Local) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) {
|
||||
return core.UnsafeDialSeeds(c.ctx, seeds)
|
||||
}
|
||||
|
||||
func (Local) DialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, error) {
|
||||
return core.UnsafeDialPeers(peers, persistent)
|
||||
func (c *Local) DialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, error) {
|
||||
return core.UnsafeDialPeers(c.ctx, peers, persistent)
|
||||
}
|
||||
|
||||
func (Local) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
|
||||
return core.BlockchainInfo(minHeight, maxHeight)
|
||||
func (c *Local) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
|
||||
return core.BlockchainInfo(c.ctx, minHeight, maxHeight)
|
||||
}
|
||||
|
||||
func (Local) Genesis() (*ctypes.ResultGenesis, error) {
|
||||
return core.Genesis()
|
||||
func (c *Local) Genesis() (*ctypes.ResultGenesis, error) {
|
||||
return core.Genesis(c.ctx)
|
||||
}
|
||||
|
||||
func (Local) Block(height *int64) (*ctypes.ResultBlock, error) {
|
||||
return core.Block(height)
|
||||
func (c *Local) Block(height *int64) (*ctypes.ResultBlock, error) {
|
||||
return core.Block(c.ctx, height)
|
||||
}
|
||||
|
||||
func (Local) BlockResults(height *int64) (*ctypes.ResultBlockResults, error) {
|
||||
return core.BlockResults(height)
|
||||
func (c *Local) BlockResults(height *int64) (*ctypes.ResultBlockResults, error) {
|
||||
return core.BlockResults(c.ctx, height)
|
||||
}
|
||||
|
||||
func (Local) Commit(height *int64) (*ctypes.ResultCommit, error) {
|
||||
return core.Commit(height)
|
||||
func (c *Local) Commit(height *int64) (*ctypes.ResultCommit, error) {
|
||||
return core.Commit(c.ctx, height)
|
||||
}
|
||||
|
||||
func (Local) Validators(height *int64) (*ctypes.ResultValidators, error) {
|
||||
return core.Validators(height)
|
||||
func (c *Local) Validators(height *int64) (*ctypes.ResultValidators, error) {
|
||||
return core.Validators(c.ctx, height)
|
||||
}
|
||||
|
||||
func (Local) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
|
||||
return core.Tx(hash, prove)
|
||||
func (c *Local) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
|
||||
return core.Tx(c.ctx, hash, prove)
|
||||
}
|
||||
|
||||
func (Local) TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error) {
|
||||
return core.TxSearch(query, prove, page, perPage)
|
||||
func (c *Local) TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error) {
|
||||
return core.TxSearch(c.ctx, query, prove, page, perPage)
|
||||
}
|
||||
|
||||
func (c *Local) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, outCapacity ...int) (types.Subscription, error) {
|
||||
return c.EventBus.Subscribe(ctx, subscriber, query, outCapacity...)
|
||||
func (c *Local) Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) {
|
||||
q, err := tmquery.New(query)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse query")
|
||||
}
|
||||
sub, err := c.EventBus.Subscribe(ctx, subscriber, q)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to subscribe")
|
||||
}
|
||||
|
||||
outCap := 1
|
||||
if len(outCapacity) > 0 {
|
||||
outCap = outCapacity[0]
|
||||
}
|
||||
|
||||
outc := make(chan ctypes.ResultEvent, outCap)
|
||||
go c.eventsRoutine(sub, subscriber, q, outc)
|
||||
|
||||
return outc, nil
|
||||
}
|
||||
|
||||
func (c *Local) Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error {
|
||||
return c.EventBus.Unsubscribe(ctx, subscriber, query)
|
||||
func (c *Local) eventsRoutine(sub types.Subscription, subscriber string, q tmpubsub.Query, outc chan<- ctypes.ResultEvent) {
|
||||
for {
|
||||
select {
|
||||
case msg := <-sub.Out():
|
||||
result := ctypes.ResultEvent{Query: q.String(), Data: msg.Data(), Tags: msg.Tags()}
|
||||
if cap(outc) == 0 {
|
||||
outc <- result
|
||||
} else {
|
||||
select {
|
||||
case outc <- result:
|
||||
default:
|
||||
c.Logger.Error("wanted to publish ResultEvent, but out channel is full", "result", result, "query", result.Query)
|
||||
}
|
||||
}
|
||||
case <-sub.Cancelled():
|
||||
if sub.Err() == tmpubsub.ErrUnsubscribed {
|
||||
return
|
||||
}
|
||||
|
||||
c.Logger.Error("subscription was cancelled, resubscribing...", "err", sub.Err(), "query", q.String())
|
||||
sub = c.resubscribe(subscriber, q)
|
||||
if sub == nil { // client was stopped
|
||||
return
|
||||
}
|
||||
case <-c.Quit():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try to resubscribe with exponential backoff.
|
||||
func (c *Local) resubscribe(subscriber string, q tmpubsub.Query) types.Subscription {
|
||||
attempts := 0
|
||||
for {
|
||||
if !c.IsRunning() {
|
||||
return nil
|
||||
}
|
||||
|
||||
sub, err := c.EventBus.Subscribe(context.Background(), subscriber, q)
|
||||
if err == nil {
|
||||
return sub
|
||||
}
|
||||
|
||||
attempts++
|
||||
time.Sleep((10 << uint(attempts)) * time.Millisecond) // 10ms -> 20ms -> 40ms
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Local) Unsubscribe(ctx context.Context, subscriber, query string) error {
|
||||
q, err := tmquery.New(query)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse query")
|
||||
}
|
||||
return c.EventBus.Unsubscribe(ctx, subscriber, q)
|
||||
}
|
||||
|
||||
func (c *Local) UnsubscribeAll(ctx context.Context, subscriber string) error {
|
||||
|
Reference in New Issue
Block a user