mirror of
https://github.com/fluencelabs/tendermint
synced 2025-04-25 14:52:17 +00:00
* use READ lock/unlock in ConsensusState#GetLastHeight Refs #2721 * do not use defers when there's no need * fix peer formatting (output its address instead of the pointer) ``` [54310]: E[11-02|11:59:39.851] Connection failed @ sendRoutine module=p2p peer=0xb78f00 conn=MConn{74.207.236.148:26656} err="pong timeout" ``` https://github.com/tendermint/tendermint/issues/2721#issuecomment-435326581 * panic if peer has no state https://github.com/tendermint/tendermint/issues/2721#issuecomment-435347165 It's confusing that sometimes we check if peer has a state, but most of the times we expect it to be there 1.add79700b5/mempool/reactor.go (L138)
2.add79700b5/rpc/core/consensus.go (L196)
(edited) I will change everything to always assume peer has a state and panic otherwise that should help identify issues earlier * abci/localclient: extend lock on app callback App callback should be protected by lock as well (note this was already done for InitChainAsync, why not for others???). Otherwise, when we execute the block, tx might come in and call the callback in the same time we're updating it in execBlockOnProxyApp => DATA RACE Fixes #2721 Consensus state is locked ``` goroutine 113333 [semacquire, 309 minutes]: sync.runtime_SemacquireMutex(0xc00180009c, 0xc0000c7e00) /usr/local/go/src/runtime/sema.go:71 +0x3d sync.(*RWMutex).RLock(0xc001800090) /usr/local/go/src/sync/rwmutex.go:50 +0x4e github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).GetRoundState(0xc001800000, 0x0) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:218 +0x46 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusReactor).queryMaj23Routine(0xc0017def80, 0x11104a0, 0xc0072488f0, 0xc007248 9c0) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/reactor.go:735 +0x16d created by github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusReactor).AddPeer /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/reactor.go:172 +0x236 ``` because localClient is locked ``` goroutine 1899 [semacquire, 309 minutes]: sync.runtime_SemacquireMutex(0xc00003363c, 0xc0000cb500) /usr/local/go/src/runtime/sema.go:71 +0x3d sync.(*Mutex).Lock(0xc000033638) /usr/local/go/src/sync/mutex.go:134 +0xff github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/abci/client.(*localClient).SetResponseCallback(0xc0001fb560, 0xc007868540) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/abci/client/local_client.go:32 +0x33 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/proxy.(*appConnConsensus).SetResponseCallback(0xc00002f750, 0xc007868540) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/proxy/app_conn.go:57 +0x40 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/state.execBlockOnProxyApp(0x1104e20, 0xc002ca0ba0, 0x11092a0, 0xc00002f750, 0xc0001fe960, 0xc000bfc660, 0x110cfe0, 0xc000090330, 0xc9d12, 0xc000d9d5a0, ...) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/state/execution.go:230 +0x1fd github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/state.(*BlockExecutor).ApplyBlock(0xc002c2a230, 0x7, 0x0, 0xc000eae880, 0x6, 0xc002e52c60, 0x16, 0x1f927, 0xc9d12, 0xc000d9d5a0, ...) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/state/execution.go:96 +0x142 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).finalizeCommit(0xc001800000, 0x1f928) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1339 +0xa3e github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).tryFinalizeCommit(0xc001800000, 0x1f928) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1270 +0x451 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).enterCommit.func1(0xc001800000, 0x0, 0x1f928) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1218 +0x90 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).enterCommit(0xc001800000, 0x1f928, 0x0) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1247 +0x6b8 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).addVote(0xc001800000, 0xc003d8dea0, 0xc000cf4cc0, 0x28, 0xf1, 0xc003bc7ad0, 0xc003bc7b10) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1659 +0xbad github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).tryAddVote(0xc001800000, 0xc003d8dea0, 0xc000cf4cc0, 0x28, 0xf1, 0xf1, 0xf1) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1517 +0x59 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).handleMsg(0xc001800000, 0xd98200, 0xc0070dbed0, 0xc000cf4cc0, 0x28) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:660 +0x64b github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).receiveRoutine(0xc001800000, 0x0) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:617 +0x670 created by github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).OnStart /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:311 +0x132 ``` tx comes in and CheckTx is executed right when we execute the block ``` goroutine 111044 [semacquire, 309 minutes]: sync.runtime_SemacquireMutex(0xc00003363c, 0x0) /usr/local/go/src/runtime/sema.go:71 +0x3d sync.(*Mutex).Lock(0xc000033638) /usr/local/go/src/sync/mutex.go:134 +0xff github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/abci/client.(*localClient).CheckTxAsync(0xc0001fb0e0, 0xc002d94500, 0x13f, 0x280, 0x0) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/abci/client/local_client.go:85 +0x47 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/proxy.(*appConnMempool).CheckTxAsync(0xc00002f720, 0xc002d94500, 0x13f, 0x280, 0x1) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/proxy/app_conn.go:114 +0x51 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/mempool.(*Mempool).CheckTx(0xc002d3a320, 0xc002d94500, 0x13f, 0x280, 0xc0072355f0, 0x0, 0x0) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/mempool/mempool.go:316 +0x17b github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/core.BroadcastTxSync(0xc002d94500, 0x13f, 0x280, 0x0, 0x0, 0x0) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/core/mempool.go:93 +0xb8 reflect.Value.call(0xd85560, 0x10326c0, 0x13, 0xec7b8b, 0x4, 0xc00663f180, 0x1, 0x1, 0xc00663f180, 0xc00663f188, ...) /usr/local/go/src/reflect/value.go:447 +0x449 reflect.Value.Call(0xd85560, 0x10326c0, 0x13, 0xc00663f180, 0x1, 0x1, 0x0, 0x0, 0xc005cc9344) /usr/local/go/src/reflect/value.go:308 +0xa4 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server.makeHTTPHandler.func2(0x1102060, 0xc00663f100, 0xc0082d7900) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server/handlers.go:269 +0x188 net/http.HandlerFunc.ServeHTTP(0xc002c81f20, 0x1102060, 0xc00663f100, 0xc0082d7900) /usr/local/go/src/net/http/server.go:1964 +0x44 net/http.(*ServeMux).ServeHTTP(0xc002c81b60, 0x1102060, 0xc00663f100, 0xc0082d7900) /usr/local/go/src/net/http/server.go:2361 +0x127 github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server.maxBytesHandler.ServeHTTP(0x10f8a40, 0xc002c81b60, 0xf4240, 0x1102060, 0xc00663f100, 0xc0082d7900) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_server.go:219 +0xcf github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server.RecoverAndLogHandler.func1(0x1103220, 0xc00121e620, 0xc0082d7900) /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_server.go:192 +0x394 net/http.HandlerFunc.ServeHTTP(0xc002c06ea0, 0x1103220, 0xc00121e620, 0xc0082d7900) /usr/local/go/src/net/http/server.go:1964 +0x44 net/http.serverHandler.ServeHTTP(0xc001a1aa90, 0x1103220, 0xc00121e620, 0xc0082d7900) /usr/local/go/src/net/http/server.go:2741 +0xab net/http.(*conn).serve(0xc00785a3c0, 0x11041a0, 0xc000f844c0) /usr/local/go/src/net/http/server.go:1847 +0x646 created by net/http.(*Server).Serve /usr/local/go/src/net/http/server.go:2851 +0x2f5 ``` * consensus: use read lock in Receive#VoteMessage * use defer to unlock mutex because application might panic * use defer in every method of the localClient * add a changelog entry * drain channels before Unsubscribe(All) Read55362ed766/libs/pubsub/pubsub.go (L13)
for the detailed explanation of the issue. We'll need to fix it someday. Make sure to keep an eye on https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md * retry instead of panic when peer has no state in reactors other than consensus in /dump_consensus_state RPC endpoint, skip a peer with no state * rpc/core/mempool: simplify error messages * rpc/core/mempool: use time.After instead of timer also, do not log DeliverTx result (to be consistent with other memthods) * unlock before calling the callback in reqRes#SetCallback
407 lines
10 KiB
Go
407 lines
10 KiB
Go
package abcicli
|
|
|
|
import (
|
|
"bufio"
|
|
"container/list"
|
|
"errors"
|
|
"fmt"
|
|
"net"
|
|
"reflect"
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/tendermint/tendermint/abci/types"
|
|
cmn "github.com/tendermint/tendermint/libs/common"
|
|
)
|
|
|
|
const reqQueueSize = 256 // TODO make configurable
|
|
// const maxResponseSize = 1048576 // 1MB TODO make configurable
|
|
const flushThrottleMS = 20 // Don't wait longer than...
|
|
|
|
var _ Client = (*socketClient)(nil)
|
|
|
|
// This is goroutine-safe, but users should beware that
|
|
// the application in general is not meant to be interfaced
|
|
// with concurrent callers.
|
|
type socketClient struct {
|
|
cmn.BaseService
|
|
|
|
reqQueue chan *ReqRes
|
|
flushTimer *cmn.ThrottleTimer
|
|
mustConnect bool
|
|
|
|
mtx sync.Mutex
|
|
addr string
|
|
conn net.Conn
|
|
err error
|
|
reqSent *list.List
|
|
resCb func(*types.Request, *types.Response) // listens to all callbacks
|
|
|
|
}
|
|
|
|
func NewSocketClient(addr string, mustConnect bool) *socketClient {
|
|
cli := &socketClient{
|
|
reqQueue: make(chan *ReqRes, reqQueueSize),
|
|
flushTimer: cmn.NewThrottleTimer("socketClient", flushThrottleMS),
|
|
mustConnect: mustConnect,
|
|
|
|
addr: addr,
|
|
reqSent: list.New(),
|
|
resCb: nil,
|
|
}
|
|
cli.BaseService = *cmn.NewBaseService(nil, "socketClient", cli)
|
|
return cli
|
|
}
|
|
|
|
func (cli *socketClient) OnStart() error {
|
|
if err := cli.BaseService.OnStart(); err != nil {
|
|
return err
|
|
}
|
|
|
|
var err error
|
|
var conn net.Conn
|
|
RETRY_LOOP:
|
|
for {
|
|
conn, err = cmn.Connect(cli.addr)
|
|
if err != nil {
|
|
if cli.mustConnect {
|
|
return err
|
|
}
|
|
cli.Logger.Error(fmt.Sprintf("abci.socketClient failed to connect to %v. Retrying...", cli.addr))
|
|
time.Sleep(time.Second * dialRetryIntervalSeconds)
|
|
continue RETRY_LOOP
|
|
}
|
|
cli.conn = conn
|
|
|
|
go cli.sendRequestsRoutine(conn)
|
|
go cli.recvResponseRoutine(conn)
|
|
|
|
return nil
|
|
}
|
|
}
|
|
|
|
func (cli *socketClient) OnStop() {
|
|
cli.BaseService.OnStop()
|
|
|
|
cli.mtx.Lock()
|
|
defer cli.mtx.Unlock()
|
|
if cli.conn != nil {
|
|
cli.conn.Close()
|
|
}
|
|
|
|
cli.flushQueue()
|
|
}
|
|
|
|
// Stop the client and set the error
|
|
func (cli *socketClient) StopForError(err error) {
|
|
if !cli.IsRunning() {
|
|
return
|
|
}
|
|
|
|
cli.mtx.Lock()
|
|
if cli.err == nil {
|
|
cli.err = err
|
|
}
|
|
cli.mtx.Unlock()
|
|
|
|
cli.Logger.Error(fmt.Sprintf("Stopping abci.socketClient for error: %v", err.Error()))
|
|
cli.Stop()
|
|
}
|
|
|
|
func (cli *socketClient) Error() error {
|
|
cli.mtx.Lock()
|
|
defer cli.mtx.Unlock()
|
|
return cli.err
|
|
}
|
|
|
|
// Set listener for all responses
|
|
// NOTE: callback may get internally generated flush responses.
|
|
func (cli *socketClient) SetResponseCallback(resCb Callback) {
|
|
cli.mtx.Lock()
|
|
cli.resCb = resCb
|
|
cli.mtx.Unlock()
|
|
}
|
|
|
|
//----------------------------------------
|
|
|
|
func (cli *socketClient) sendRequestsRoutine(conn net.Conn) {
|
|
|
|
w := bufio.NewWriter(conn)
|
|
for {
|
|
select {
|
|
case <-cli.flushTimer.Ch:
|
|
select {
|
|
case cli.reqQueue <- NewReqRes(types.ToRequestFlush()):
|
|
default:
|
|
// Probably will fill the buffer, or retry later.
|
|
}
|
|
case <-cli.Quit():
|
|
return
|
|
case reqres := <-cli.reqQueue:
|
|
cli.willSendReq(reqres)
|
|
err := types.WriteMessage(reqres.Request, w)
|
|
if err != nil {
|
|
cli.StopForError(fmt.Errorf("Error writing msg: %v", err))
|
|
return
|
|
}
|
|
// cli.Logger.Debug("Sent request", "requestType", reflect.TypeOf(reqres.Request), "request", reqres.Request)
|
|
if _, ok := reqres.Request.Value.(*types.Request_Flush); ok {
|
|
err = w.Flush()
|
|
if err != nil {
|
|
cli.StopForError(fmt.Errorf("Error flushing writer: %v", err))
|
|
return
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
func (cli *socketClient) recvResponseRoutine(conn net.Conn) {
|
|
|
|
r := bufio.NewReader(conn) // Buffer reads
|
|
for {
|
|
var res = &types.Response{}
|
|
err := types.ReadMessage(r, res)
|
|
if err != nil {
|
|
cli.StopForError(err)
|
|
return
|
|
}
|
|
switch r := res.Value.(type) {
|
|
case *types.Response_Exception:
|
|
// XXX After setting cli.err, release waiters (e.g. reqres.Done())
|
|
cli.StopForError(errors.New(r.Exception.Error))
|
|
return
|
|
default:
|
|
// cli.Logger.Debug("Received response", "responseType", reflect.TypeOf(res), "response", res)
|
|
err := cli.didRecvResponse(res)
|
|
if err != nil {
|
|
cli.StopForError(err)
|
|
return
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
func (cli *socketClient) willSendReq(reqres *ReqRes) {
|
|
cli.mtx.Lock()
|
|
defer cli.mtx.Unlock()
|
|
cli.reqSent.PushBack(reqres)
|
|
}
|
|
|
|
func (cli *socketClient) didRecvResponse(res *types.Response) error {
|
|
cli.mtx.Lock()
|
|
defer cli.mtx.Unlock()
|
|
|
|
// Get the first ReqRes
|
|
next := cli.reqSent.Front()
|
|
if next == nil {
|
|
return fmt.Errorf("Unexpected result type %v when nothing expected", reflect.TypeOf(res.Value))
|
|
}
|
|
reqres := next.Value.(*ReqRes)
|
|
if !resMatchesReq(reqres.Request, res) {
|
|
return fmt.Errorf("Unexpected result type %v when response to %v expected",
|
|
reflect.TypeOf(res.Value), reflect.TypeOf(reqres.Request.Value))
|
|
}
|
|
|
|
reqres.Response = res // Set response
|
|
reqres.Done() // Release waiters
|
|
cli.reqSent.Remove(next) // Pop first item from linked list
|
|
|
|
// Notify reqRes listener if set
|
|
if cb := reqres.GetCallback(); cb != nil {
|
|
cb(res)
|
|
}
|
|
|
|
// Notify client listener if set
|
|
if cli.resCb != nil {
|
|
cli.resCb(reqres.Request, res)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
//----------------------------------------
|
|
|
|
func (cli *socketClient) EchoAsync(msg string) *ReqRes {
|
|
return cli.queueRequest(types.ToRequestEcho(msg))
|
|
}
|
|
|
|
func (cli *socketClient) FlushAsync() *ReqRes {
|
|
return cli.queueRequest(types.ToRequestFlush())
|
|
}
|
|
|
|
func (cli *socketClient) InfoAsync(req types.RequestInfo) *ReqRes {
|
|
return cli.queueRequest(types.ToRequestInfo(req))
|
|
}
|
|
|
|
func (cli *socketClient) SetOptionAsync(req types.RequestSetOption) *ReqRes {
|
|
return cli.queueRequest(types.ToRequestSetOption(req))
|
|
}
|
|
|
|
func (cli *socketClient) DeliverTxAsync(tx []byte) *ReqRes {
|
|
return cli.queueRequest(types.ToRequestDeliverTx(tx))
|
|
}
|
|
|
|
func (cli *socketClient) CheckTxAsync(tx []byte) *ReqRes {
|
|
return cli.queueRequest(types.ToRequestCheckTx(tx))
|
|
}
|
|
|
|
func (cli *socketClient) QueryAsync(req types.RequestQuery) *ReqRes {
|
|
return cli.queueRequest(types.ToRequestQuery(req))
|
|
}
|
|
|
|
func (cli *socketClient) CommitAsync() *ReqRes {
|
|
return cli.queueRequest(types.ToRequestCommit())
|
|
}
|
|
|
|
func (cli *socketClient) InitChainAsync(req types.RequestInitChain) *ReqRes {
|
|
return cli.queueRequest(types.ToRequestInitChain(req))
|
|
}
|
|
|
|
func (cli *socketClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
|
|
return cli.queueRequest(types.ToRequestBeginBlock(req))
|
|
}
|
|
|
|
func (cli *socketClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes {
|
|
return cli.queueRequest(types.ToRequestEndBlock(req))
|
|
}
|
|
|
|
//----------------------------------------
|
|
|
|
func (cli *socketClient) FlushSync() error {
|
|
reqRes := cli.queueRequest(types.ToRequestFlush())
|
|
if err := cli.Error(); err != nil {
|
|
return err
|
|
}
|
|
reqRes.Wait() // NOTE: if we don't flush the queue, its possible to get stuck here
|
|
return cli.Error()
|
|
}
|
|
|
|
func (cli *socketClient) EchoSync(msg string) (*types.ResponseEcho, error) {
|
|
reqres := cli.queueRequest(types.ToRequestEcho(msg))
|
|
cli.FlushSync()
|
|
return reqres.Response.GetEcho(), cli.Error()
|
|
}
|
|
|
|
func (cli *socketClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
|
|
reqres := cli.queueRequest(types.ToRequestInfo(req))
|
|
cli.FlushSync()
|
|
return reqres.Response.GetInfo(), cli.Error()
|
|
}
|
|
|
|
func (cli *socketClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
|
|
reqres := cli.queueRequest(types.ToRequestSetOption(req))
|
|
cli.FlushSync()
|
|
return reqres.Response.GetSetOption(), cli.Error()
|
|
}
|
|
|
|
func (cli *socketClient) DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error) {
|
|
reqres := cli.queueRequest(types.ToRequestDeliverTx(tx))
|
|
cli.FlushSync()
|
|
return reqres.Response.GetDeliverTx(), cli.Error()
|
|
}
|
|
|
|
func (cli *socketClient) CheckTxSync(tx []byte) (*types.ResponseCheckTx, error) {
|
|
reqres := cli.queueRequest(types.ToRequestCheckTx(tx))
|
|
cli.FlushSync()
|
|
return reqres.Response.GetCheckTx(), cli.Error()
|
|
}
|
|
|
|
func (cli *socketClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
|
|
reqres := cli.queueRequest(types.ToRequestQuery(req))
|
|
cli.FlushSync()
|
|
return reqres.Response.GetQuery(), cli.Error()
|
|
}
|
|
|
|
func (cli *socketClient) CommitSync() (*types.ResponseCommit, error) {
|
|
reqres := cli.queueRequest(types.ToRequestCommit())
|
|
cli.FlushSync()
|
|
return reqres.Response.GetCommit(), cli.Error()
|
|
}
|
|
|
|
func (cli *socketClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) {
|
|
reqres := cli.queueRequest(types.ToRequestInitChain(req))
|
|
cli.FlushSync()
|
|
return reqres.Response.GetInitChain(), cli.Error()
|
|
}
|
|
|
|
func (cli *socketClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
|
reqres := cli.queueRequest(types.ToRequestBeginBlock(req))
|
|
cli.FlushSync()
|
|
return reqres.Response.GetBeginBlock(), cli.Error()
|
|
}
|
|
|
|
func (cli *socketClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
|
reqres := cli.queueRequest(types.ToRequestEndBlock(req))
|
|
cli.FlushSync()
|
|
return reqres.Response.GetEndBlock(), cli.Error()
|
|
}
|
|
|
|
//----------------------------------------
|
|
|
|
func (cli *socketClient) queueRequest(req *types.Request) *ReqRes {
|
|
reqres := NewReqRes(req)
|
|
|
|
// TODO: set cli.err if reqQueue times out
|
|
cli.reqQueue <- reqres
|
|
|
|
// Maybe auto-flush, or unset auto-flush
|
|
switch req.Value.(type) {
|
|
case *types.Request_Flush:
|
|
cli.flushTimer.Unset()
|
|
default:
|
|
cli.flushTimer.Set()
|
|
}
|
|
|
|
return reqres
|
|
}
|
|
|
|
func (cli *socketClient) flushQueue() {
|
|
// mark all in-flight messages as resolved (they will get cli.Error())
|
|
for req := cli.reqSent.Front(); req != nil; req = req.Next() {
|
|
reqres := req.Value.(*ReqRes)
|
|
reqres.Done()
|
|
}
|
|
|
|
// mark all queued messages as resolved
|
|
LOOP:
|
|
for {
|
|
select {
|
|
case reqres := <-cli.reqQueue:
|
|
reqres.Done()
|
|
default:
|
|
break LOOP
|
|
}
|
|
}
|
|
}
|
|
|
|
//----------------------------------------
|
|
|
|
func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
|
switch req.Value.(type) {
|
|
case *types.Request_Echo:
|
|
_, ok = res.Value.(*types.Response_Echo)
|
|
case *types.Request_Flush:
|
|
_, ok = res.Value.(*types.Response_Flush)
|
|
case *types.Request_Info:
|
|
_, ok = res.Value.(*types.Response_Info)
|
|
case *types.Request_SetOption:
|
|
_, ok = res.Value.(*types.Response_SetOption)
|
|
case *types.Request_DeliverTx:
|
|
_, ok = res.Value.(*types.Response_DeliverTx)
|
|
case *types.Request_CheckTx:
|
|
_, ok = res.Value.(*types.Response_CheckTx)
|
|
case *types.Request_Commit:
|
|
_, ok = res.Value.(*types.Response_Commit)
|
|
case *types.Request_Query:
|
|
_, ok = res.Value.(*types.Response_Query)
|
|
case *types.Request_InitChain:
|
|
_, ok = res.Value.(*types.Response_InitChain)
|
|
case *types.Request_BeginBlock:
|
|
_, ok = res.Value.(*types.Response_BeginBlock)
|
|
case *types.Request_EndBlock:
|
|
_, ok = res.Value.(*types.Response_EndBlock)
|
|
}
|
|
return ok
|
|
}
|