From f25d727035f4615cbb4b43452191fd83218fc1e1 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Thu, 7 Mar 2019 09:10:34 +0400 Subject: [PATCH 01/41] make dupl linter pass (#3385) Refs #3262 --- .golangci.yml | 1 - Makefile | 4 +- libs/db/db_test.go | 149 ++++++++++++++++++--------------------- rpc/client/event_test.go | 58 ++++++--------- 4 files changed, 91 insertions(+), 121 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 45cabe20..cf8bf165 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -9,7 +9,6 @@ linters: - maligned - errcheck - staticcheck - - dupl - ineffassign - interfacer - unconvert diff --git a/Makefile b/Makefile index 08373644..79ae6aab 100644 --- a/Makefile +++ b/Makefile @@ -214,11 +214,11 @@ vagrant_test: ### go tests test: @echo "--> Running go test" - go test -p 1 $(PACKAGES) + @go test -p 1 $(PACKAGES) test_race: @echo "--> Running go test --race" - go test -p 1 -v -race $(PACKAGES) + @go test -p 1 -v -race $(PACKAGES) # uses https://github.com/sasha-s/go-deadlock/ to detect potential deadlocks test_with_deadlock: diff --git a/libs/db/db_test.go b/libs/db/db_test.go index ffa7bb6a..7cb721b2 100644 --- a/libs/db/db_test.go +++ b/libs/db/db_test.go @@ -121,86 +121,75 @@ func TestDBIteratorNonemptyBeginAfter(t *testing.T) { } } -func TestDBBatchWrite1(t *testing.T) { - mdb := newMockDB() - ddb := NewDebugDB(t.Name(), mdb) - batch := ddb.NewBatch() +func TestDBBatchWrite(t *testing.T) { + testCases := []struct { + modify func(batch Batch) + calls map[string]int + }{ + 0: { + func(batch Batch) { + batch.Set(bz("1"), bz("1")) + batch.Set(bz("2"), bz("2")) + batch.Delete(bz("3")) + batch.Set(bz("4"), bz("4")) + batch.Write() + }, + map[string]int{ + "Set": 0, "SetSync": 0, "SetNoLock": 3, "SetNoLockSync": 0, + "Delete": 0, "DeleteSync": 0, "DeleteNoLock": 1, "DeleteNoLockSync": 0, + }, + }, + 1: { + func(batch Batch) { + batch.Set(bz("1"), bz("1")) + batch.Set(bz("2"), bz("2")) + batch.Set(bz("4"), bz("4")) + batch.Delete(bz("3")) + batch.Write() + }, + map[string]int{ + "Set": 0, "SetSync": 0, "SetNoLock": 3, "SetNoLockSync": 0, + "Delete": 0, "DeleteSync": 0, "DeleteNoLock": 1, "DeleteNoLockSync": 0, + }, + }, + 2: { + func(batch Batch) { + batch.Set(bz("1"), bz("1")) + batch.Set(bz("2"), bz("2")) + batch.Delete(bz("3")) + batch.Set(bz("4"), bz("4")) + batch.WriteSync() + }, + map[string]int{ + "Set": 0, "SetSync": 0, "SetNoLock": 2, "SetNoLockSync": 1, + "Delete": 0, "DeleteSync": 0, "DeleteNoLock": 1, "DeleteNoLockSync": 0, + }, + }, + 3: { + func(batch Batch) { + batch.Set(bz("1"), bz("1")) + batch.Set(bz("2"), bz("2")) + batch.Set(bz("4"), bz("4")) + batch.Delete(bz("3")) + batch.WriteSync() + }, + map[string]int{ + "Set": 0, "SetSync": 0, "SetNoLock": 3, "SetNoLockSync": 0, + "Delete": 0, "DeleteSync": 0, "DeleteNoLock": 0, "DeleteNoLockSync": 1, + }, + }, + } - batch.Set(bz("1"), bz("1")) - batch.Set(bz("2"), bz("2")) - batch.Delete(bz("3")) - batch.Set(bz("4"), bz("4")) - batch.Write() + for i, tc := range testCases { + mdb := newMockDB() + ddb := NewDebugDB(t.Name(), mdb) + batch := ddb.NewBatch() - assert.Equal(t, 0, mdb.calls["Set"]) - assert.Equal(t, 0, mdb.calls["SetSync"]) - assert.Equal(t, 3, mdb.calls["SetNoLock"]) - assert.Equal(t, 0, mdb.calls["SetNoLockSync"]) - assert.Equal(t, 0, mdb.calls["Delete"]) - assert.Equal(t, 0, mdb.calls["DeleteSync"]) - assert.Equal(t, 1, mdb.calls["DeleteNoLock"]) - assert.Equal(t, 0, mdb.calls["DeleteNoLockSync"]) -} - -func TestDBBatchWrite2(t *testing.T) { - mdb := newMockDB() - ddb := NewDebugDB(t.Name(), mdb) - batch := ddb.NewBatch() - - batch.Set(bz("1"), bz("1")) - batch.Set(bz("2"), bz("2")) - batch.Set(bz("4"), bz("4")) - batch.Delete(bz("3")) - batch.Write() - - assert.Equal(t, 0, mdb.calls["Set"]) - assert.Equal(t, 0, mdb.calls["SetSync"]) - assert.Equal(t, 3, mdb.calls["SetNoLock"]) - assert.Equal(t, 0, mdb.calls["SetNoLockSync"]) - assert.Equal(t, 0, mdb.calls["Delete"]) - assert.Equal(t, 0, mdb.calls["DeleteSync"]) - assert.Equal(t, 1, mdb.calls["DeleteNoLock"]) - assert.Equal(t, 0, mdb.calls["DeleteNoLockSync"]) -} - -func TestDBBatchWriteSync1(t *testing.T) { - mdb := newMockDB() - ddb := NewDebugDB(t.Name(), mdb) - batch := ddb.NewBatch() - - batch.Set(bz("1"), bz("1")) - batch.Set(bz("2"), bz("2")) - batch.Delete(bz("3")) - batch.Set(bz("4"), bz("4")) - batch.WriteSync() - - assert.Equal(t, 0, mdb.calls["Set"]) - assert.Equal(t, 0, mdb.calls["SetSync"]) - assert.Equal(t, 2, mdb.calls["SetNoLock"]) - assert.Equal(t, 1, mdb.calls["SetNoLockSync"]) - assert.Equal(t, 0, mdb.calls["Delete"]) - assert.Equal(t, 0, mdb.calls["DeleteSync"]) - assert.Equal(t, 1, mdb.calls["DeleteNoLock"]) - assert.Equal(t, 0, mdb.calls["DeleteNoLockSync"]) -} - -func TestDBBatchWriteSync2(t *testing.T) { - mdb := newMockDB() - ddb := NewDebugDB(t.Name(), mdb) - batch := ddb.NewBatch() - - batch.Set(bz("1"), bz("1")) - batch.Set(bz("2"), bz("2")) - batch.Set(bz("4"), bz("4")) - batch.Delete(bz("3")) - batch.WriteSync() - - assert.Equal(t, 0, mdb.calls["Set"]) - assert.Equal(t, 0, mdb.calls["SetSync"]) - assert.Equal(t, 3, mdb.calls["SetNoLock"]) - assert.Equal(t, 0, mdb.calls["SetNoLockSync"]) - assert.Equal(t, 0, mdb.calls["Delete"]) - assert.Equal(t, 0, mdb.calls["DeleteSync"]) - assert.Equal(t, 0, mdb.calls["DeleteNoLock"]) - assert.Equal(t, 1, mdb.calls["DeleteNoLockSync"]) + tc.modify(batch) + + for call, exp := range tc.calls { + got := mdb.calls[call] + assert.Equal(t, exp, got, "#%v - key: %s", i, call) + } + } } diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go index da4625d5..7b00d6ea 100644 --- a/rpc/client/event_test.go +++ b/rpc/client/event_test.go @@ -1,6 +1,7 @@ package client_test import ( + "fmt" "reflect" "testing" "time" @@ -10,6 +11,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/rpc/client" + ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" ) @@ -78,7 +80,10 @@ func TestBlockEvents(t *testing.T) { } } -func TestTxEventsSentWithBroadcastTxAsync(t *testing.T) { +func TestTxEventsSentWithBroadcastTxAsync(t *testing.T) { testTxEventsSent(t, "async") } +func TestTxEventsSentWithBroadcastTxSync(t *testing.T) { testTxEventsSent(t, "sync") } + +func testTxEventsSent(t *testing.T, broadcastMethod string) { for i, c := range GetClients() { i, c := i, c // capture params t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { @@ -95,45 +100,22 @@ func TestTxEventsSentWithBroadcastTxAsync(t *testing.T) { _, _, tx := MakeTxKV() evtTyp := types.EventTx - // send async - txres, err := c.BroadcastTxAsync(tx) - require.Nil(t, err, "%+v", err) - require.Equal(t, txres.Code, abci.CodeTypeOK) // FIXME - - // and wait for confirmation - evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout) - require.Nil(t, err, "%d: %+v", i, err) - // and make sure it has the proper info - txe, ok := evt.(types.EventDataTx) - require.True(t, ok, "%d: %#v", i, evt) - // make sure this is the proper tx - require.EqualValues(t, tx, txe.Tx) - require.True(t, txe.Result.IsOK()) - }) - } -} - -func TestTxEventsSentWithBroadcastTxSync(t *testing.T) { - for i, c := range GetClients() { - i, c := i, c // capture params - t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { - - // start for this test it if it wasn't already running - if !c.IsRunning() { - // if so, then we start it, listen, and stop it. - err := c.Start() - require.Nil(t, err, "%d: %+v", i, err) - defer c.Stop() + // send + var ( + txres *ctypes.ResultBroadcastTx + err error + ) + switch broadcastMethod { + case "async": + txres, err = c.BroadcastTxAsync(tx) + case "sync": + txres, err = c.BroadcastTxSync(tx) + default: + panic(fmt.Sprintf("Unknown broadcastMethod %s", broadcastMethod)) } - // make the tx - _, _, tx := MakeTxKV() - evtTyp := types.EventTx - - // send sync - txres, err := c.BroadcastTxSync(tx) - require.Nil(t, err, "%+v", err) - require.Equal(t, txres.Code, abci.CodeTypeOK) // FIXME + require.NoError(t, err) + require.Equal(t, txres.Code, abci.CodeTypeOK) // and wait for confirmation evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout) From 91b488f9a541f5dd69f0bb2db4249356733a2ed3 Mon Sep 17 00:00:00 2001 From: YOSHIDA Masanori Date: Thu, 7 Mar 2019 22:02:13 +0900 Subject: [PATCH 02/41] docs: fix the reverse of meaning in spec (#3387) https://tools.ietf.org/html/rfc6962#section-2.1 "The largest power of two less than the number of items" is actually correct! For n > 1, let k be the largest power of two smaller than n (i.e., k < n <= 2k). --- docs/spec/blockchain/encoding.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/spec/blockchain/encoding.md b/docs/spec/blockchain/encoding.md index 1b999335..e8258e4a 100644 --- a/docs/spec/blockchain/encoding.md +++ b/docs/spec/blockchain/encoding.md @@ -175,7 +175,7 @@ The differences between RFC 6962 and the simplest form a merkle tree are that: The leaf nodes are `SHA256(0x00 || leaf_data)`, and inner nodes are `SHA256(0x01 || left_hash || right_hash)`. 2) When the number of items isn't a power of two, the left half of the tree is as big as it could be. - (The smallest power of two less than the number of items) This allows new leaves to be added with less + (The largest power of two less than the number of items) This allows new leaves to be added with less recomputation. For example: ``` From 3ebfa99f2c2403c17348e41190406892abf21d2a Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Thu, 7 Mar 2019 19:35:04 +0400 Subject: [PATCH 03/41] do not pin repos without releases to exact revisions (#3382) We're pinning repos without releases because it's very easy to upgrade all the dependencies by executing dep ensure --upgrade. Instead, we should just never run this command directly, only dep ensure --upgrade . And we can defend that in PRs. Refs #3374 The problem with pinning to exact revisions: people who import Tendermint as a library (e.g. abci/types) are stuck with these revisions even though the code they import may not even use them. --- Gopkg.toml | 34 +++++++--------------------------- 1 file changed, 7 insertions(+), 27 deletions(-) diff --git a/Gopkg.toml b/Gopkg.toml index c334ab71..db97cb09 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -76,34 +76,14 @@ version = "^0.9.1" ################################### -## Some repos dont have releases. -## Pin to revision +## Repos which don't have releases. -[[constraint]] - name = "github.com/btcsuite/btcd" - revision = "ed77733ec07dfc8a513741138419b8d9d3de9d2d" - -[[constraint]] - name = "golang.org/x/crypto" - revision = "505ab145d0a99da450461ae2c1a9f6cd10d1f447" - -[[override]] - name = "github.com/jmhodges/levigo" - revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9" - -# last revision used by go-crypto -[[constraint]] - name = "github.com/btcsuite/btcutil" - revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4" - - -[[constraint]] - name = "github.com/rcrowley/go-metrics" - revision = "e2704e165165ec55d062f5919b4b29494e9fa790" - -[[constraint]] - name = "golang.org/x/net" - revision = "292b43bbf7cb8d35ddf40f8d5100ef3837cced3f" +## - github.com/btcsuite/btcd +## - golang.org/x/crypto +## - github.com/jmhodges/levigo +## - github.com/btcsuite/btcutil +## - github.com/rcrowley/go-metrics +## - golang.org/x/net [prune] go-tests = true From 28e9e9e7145b357cbd7078f4c83dcf66c13d4f7d Mon Sep 17 00:00:00 2001 From: Ismail Khoffi Date: Thu, 7 Mar 2019 17:03:57 +0100 Subject: [PATCH 04/41] update levigo to 1.0.0 (#3389) Although the version we were pinning to is from Nov. 2016 there were no substantial changes: jmhodges/levigo@2b8c778 added go-modules support (no code changes) jmhodges/levigo@853d788 added a badge to the readme closes #3381 --- Gopkg.lock | 5 +++-- Gopkg.toml | 5 ++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 146c9420..b5d022ae 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -154,11 +154,12 @@ version = "v1.0" [[projects]] - digest = "1:39b27d1381a30421f9813967a5866fba35dc1d4df43a6eefe3b7a5444cb07214" + digest = "1:a74b5a8e34ee5843cd6e65f698f3e75614f812ff170c2243425d75bc091e9af2" name = "github.com/jmhodges/levigo" packages = ["."] pruneopts = "UT" - revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9" + revision = "853d788c5c416eaaee5b044570784a96c7a26975" + version = "v1.0.0" [[projects]] branch = "master" diff --git a/Gopkg.toml b/Gopkg.toml index db97cb09..505f0da4 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -75,12 +75,15 @@ name = "github.com/prometheus/client_golang" version = "^0.9.1" +[[constraint]] + name = "github.com/jmhodges/levigo" + version = "^1.0.0" + ################################### ## Repos which don't have releases. ## - github.com/btcsuite/btcd ## - golang.org/x/crypto -## - github.com/jmhodges/levigo ## - github.com/btcsuite/btcutil ## - github.com/rcrowley/go-metrics ## - golang.org/x/net From e415c326f93e5fa473d83a0ea6fa495ad0ea87f3 Mon Sep 17 00:00:00 2001 From: Ismail Khoffi Date: Fri, 8 Mar 2019 06:40:59 +0100 Subject: [PATCH 05/41] update golang.org/x/crypto (#3392) Update Gopkg.lock via dep ensure --update golang.org/x/crypto see #3391 (comment) (nothing to review here really). --- Gopkg.lock | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index b5d022ae..530cd89d 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -369,7 +369,8 @@ version = "v0.14.1" [[projects]] - digest = "1:00d2b3e64cdc3fa69aa250dfbe4cc38c4837d4f37e62279be2ae52107ffbbb44" + branch = "master" + digest = "1:f4edb30d5ff238e2abba10457010f74cd55ae20bbda8c54db1a07155fa020490" name = "golang.org/x/crypto" packages = [ "bcrypt", @@ -390,7 +391,7 @@ "salsa20/salsa", ] pruneopts = "UT" - revision = "505ab145d0a99da450461ae2c1a9f6cd10d1f447" + revision = "8dd112bcdc25174059e45e07517d9fc663123347" [[projects]] digest = "1:d36f55a999540d29b6ea3c2ea29d71c76b1d9853fdcd3e5c5cb4836f2ba118f1" From b6a510a3e7cefca56af4bdb009413e3950c6d59e Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 8 Mar 2019 09:46:09 +0400 Subject: [PATCH 06/41] make ineffassign linter pass (#3386) Refs #3262 This fixes two small bugs: 1) lite/dbprovider: return `ok` instead of true in parse* functions. It's weird that we're ignoring `ok` value before. 2) consensus/state: previously because of the shadowing we almost never output "Error with msg". Now we declare both `added` and `err` in the beginning of the function, so there's no shadowing. --- .golangci.yml | 1 - consensus/state.go | 12 ++++++++---- lite/dbprovider.go | 7 ++++--- lite/dynamic_verifier_test.go | 1 + lite/proxy/query_test.go | 2 ++ p2p/conn/connection_test.go | 8 +++++--- rpc/core/status.go | 2 +- state/execution_test.go | 1 + state/state_test.go | 6 ++++++ 9 files changed, 28 insertions(+), 12 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index cf8bf165..a051e1a4 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -9,7 +9,6 @@ linters: - maligned - errcheck - staticcheck - - ineffassign - interfacer - unconvert - goconst diff --git a/consensus/state.go b/consensus/state.go index cf32afe7..d4a12a0c 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -670,7 +670,10 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) { cs.mtx.Lock() defer cs.mtx.Unlock() - var err error + var ( + err error + added bool + ) msg, peerID := mi.Msg, mi.PeerID switch msg := msg.(type) { case *ProposalMessage: @@ -679,7 +682,7 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) { err = cs.setProposal(msg.Proposal) case *BlockPartMessage: // if the proposal is complete, we'll enterPrevote or tryFinalizeCommit - added, err := cs.addProposalBlockPart(msg, peerID) + added, err = cs.addProposalBlockPart(msg, peerID) if added { cs.statsMsgQueue <- mi } @@ -691,7 +694,7 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) { case *VoteMessage: // attempt to add the vote and dupeout the validator if its a duplicate signature // if the vote gives us a 2/3-any or 2/3-one, we transition - added, err := cs.tryAddVote(msg.Vote, peerID) + added, err = cs.tryAddVote(msg.Vote, peerID) if added { cs.statsMsgQueue <- mi } @@ -714,7 +717,8 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) { cs.Logger.Error("Unknown msg type", reflect.TypeOf(msg)) } if err != nil { - cs.Logger.Error("Error with msg", "height", cs.Height, "round", cs.Round, "type", reflect.TypeOf(msg), "peer", peerID, "err", err, "msg", msg) + cs.Logger.Error("Error with msg", "height", cs.Height, "round", cs.Round, + "peer", peerID, "err", err, "msg", msg) } } diff --git a/lite/dbprovider.go b/lite/dbprovider.go index ef1b2a59..5582a963 100644 --- a/lite/dbprovider.go +++ b/lite/dbprovider.go @@ -258,14 +258,15 @@ func parseKey(key []byte) (chainID string, height int64, part string, ok bool) { } func parseSignedHeaderKey(key []byte) (chainID string, height int64, ok bool) { - chainID, height, part, ok := parseKey(key) + var part string + chainID, height, part, ok = parseKey(key) if part != "sh" { return "", 0, false } - return chainID, height, true + return } func parseChainKeyPrefix(key []byte) (chainID string, height int64, ok bool) { chainID, height, _, ok = parseKey(key) - return chainID, height, true + return } diff --git a/lite/dynamic_verifier_test.go b/lite/dynamic_verifier_test.go index 386de513..e85cb7de 100644 --- a/lite/dynamic_verifier_test.go +++ b/lite/dynamic_verifier_test.go @@ -255,6 +255,7 @@ func TestConcurrencyInquirerVerify(t *testing.T) { cert.SetLogger(log.TestingLogger()) err = source.SaveFullCommit(fcz[7]) + require.Nil(err, "%+v", err) err = source.SaveFullCommit(fcz[8]) require.Nil(err, "%+v", err) sh := fcz[8].SignedHeader diff --git a/lite/proxy/query_test.go b/lite/proxy/query_test.go index c1450a5e..db2b6e46 100644 --- a/lite/proxy/query_test.go +++ b/lite/proxy/query_test.go @@ -93,6 +93,8 @@ func _TestAppProofs(t *testing.T) { // verify a query before the tx block has no data (and valid non-exist proof) bs, height, proof, err := GetWithProof(prt, k, brh-1, cl, cert) require.NoError(err, "%#v", err) + require.NotNil(proof) + require.Equal(height, brh-1) // require.NotNil(proof) // TODO: Ensure that *some* keys will be there, ensuring that proof is nil, // (currently there's a race condition) diff --git a/p2p/conn/connection_test.go b/p2p/conn/connection_test.go index afad69d1..283b00eb 100644 --- a/p2p/conn/connection_test.go +++ b/p2p/conn/connection_test.go @@ -223,7 +223,10 @@ func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) { serverGotPing := make(chan struct{}) go func() { // read ping (one byte) - var packet, err = Packet(nil), error(nil) + var ( + packet Packet + err error + ) _, err = cdc.UnmarshalBinaryLengthPrefixedReader(server, &packet, maxPingPongPacketSize) require.Nil(t, err) serverGotPing <- struct{}{} @@ -492,8 +495,7 @@ func TestMConnectionReadErrorUnknownMsgType(t *testing.T) { defer mconnServer.Stop() // send msg with unknown msg type - err := error(nil) - err = amino.EncodeUvarint(mconnClient.conn, 4) + err := amino.EncodeUvarint(mconnClient.conn, 4) assert.Nil(t, err) _, err = mconnClient.conn.Write([]byte{0xFF, 0xFF, 0xFF, 0xFF}) assert.Nil(t, err) diff --git a/rpc/core/status.go b/rpc/core/status.go index 224857d0..ae22ecd3 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -71,7 +71,7 @@ import ( // } // ``` func Status() (*ctypes.ResultStatus, error) { - var latestHeight int64 = -1 + var latestHeight int64 if consensusReactor.FastSync() { latestHeight = blockStore.Height() } else { diff --git a/state/execution_test.go b/state/execution_test.go index 94336851..a9fdfe27 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -43,6 +43,7 @@ func TestApplyBlock(t *testing.T) { block := makeBlock(state, 1) blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} + //nolint:ineffassign state, err = blockExec.ApplyBlock(state, blockID, block) require.Nil(t, err) diff --git a/state/state_test.go b/state/state_test.go index 4566d93e..ff8eed02 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -668,6 +668,7 @@ func TestLargeGenesisValidator(t *testing.T) { blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} updatedState, err := updateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) + require.NoError(t, err) // no changes in voting power (ProposerPrio += VotingPower == Voting in 1st round; than shiftByAvg == 0, // than -Total == -Voting) // -> no change in ProposerPrio (stays zero): @@ -692,6 +693,7 @@ func TestLargeGenesisValidator(t *testing.T) { block := makeBlock(oldState, oldState.LastBlockHeight+1) blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} updatedState, err := updateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) + require.NoError(t, err) lastState := updatedState for i := 0; i < 200; i++ { @@ -706,6 +708,7 @@ func TestLargeGenesisValidator(t *testing.T) { blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} updatedStateInner, err := updateState(lastState, blockID, &block.Header, abciResponses, validatorUpdates) + require.NoError(t, err) lastState = updatedStateInner } // set state to last state of above iteration @@ -735,6 +738,7 @@ func TestLargeGenesisValidator(t *testing.T) { block := makeBlock(oldState, oldState.LastBlockHeight+1) blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} state, err = updateState(state, blockID, &block.Header, abciResponses, validatorUpdates) + require.NoError(t, err) } require.Equal(t, 10+2, len(state.NextValidators.Validators)) @@ -766,6 +770,7 @@ func TestLargeGenesisValidator(t *testing.T) { block = makeBlock(curState, curState.LastBlockHeight+1) blockID = types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} curState, err = updateState(curState, blockID, &block.Header, abciResponses, validatorUpdates) + require.NoError(t, err) if !bytes.Equal(curState.Validators.Proposer.Address, curState.NextValidators.Proposer.Address) { isProposerUnchanged = false } @@ -790,6 +795,7 @@ func TestLargeGenesisValidator(t *testing.T) { blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} updatedState, err = updateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) + require.NoError(t, err) if i > numVals { // expect proposers to cycle through after the first iteration (of numVals blocks): if proposers[i%numVals] == nil { proposers[i%numVals] = updatedState.NextValidators.Proposer From 90794260bcd23989e4bd8530d596e39d9509e7ca Mon Sep 17 00:00:00 2001 From: mircea-c Date: Sat, 9 Mar 2019 10:13:36 -0500 Subject: [PATCH 07/41] circleci: removed complexity from docs deployment job (#3396) --- .circleci/config.yml | 19 +++++++++++++++++-- CHANGELOG_PENDING.md | 2 ++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 025fc48e..9c51bc48 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -10,7 +10,7 @@ defaults: &defaults docs_update_config: &docs_update_config working_directory: ~/repo docker: - - image: tendermint/docs_deployment + - image: tendermintdev/jq_curl environment: AWS_REGION: us-east-1 @@ -239,7 +239,22 @@ jobs: - run: name: Trigger website build command: | - chamber exec tendermint -- start_website_build + curl --silent \ + --show-error \ + -X POST \ + --header "Content-Type: application/json" \ + -d "{\"branch\": \"$CIRCLE_BRANCH\"}" \ + "https://circleci.com/api/v1.1/project/github/$CIRCLE_PROJECT_USERNAME/$WEBSITE_REPO_NAME/build?circle-token=$TENDERBOT_API_TOKEN" > response.json + + RESULT=`jq -r '.status' response.json` + MESSAGE=`jq -r '.message' response.json` + + if [[ ${RESULT} == "null" ]] || [[ ${RESULT} -ne "200" ]]; then + echo "CircleCI API call failed: $MESSAGE" + exit 1 + else + echo "Website build started" + fi workflows: version: 2 diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index cbb4077c..9ca5ab64 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -40,6 +40,8 @@ Special thanks to external contributors on this release: - leveldb.alivesnaps - leveldb.aliveiters +CI/CD: * [\#3396](https://github.com/tendermint/tendermint/pull/3396) + ### BUG FIXES: - [p2p/conn] \#3347 Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection From b021f1e505482fb34f8a8e57cd86b171e4a57344 Mon Sep 17 00:00:00 2001 From: Yumin Xia Date: Sun, 10 Mar 2019 00:46:32 -0800 Subject: [PATCH 08/41] libs/db: close batch (#3397) ClevelDB requires closing when WriteBatch is no longer needed, https://godoc.org/github.com/jmhodges/levigo#WriteBatch.Close Fixes the memory leak in https://github.com/cosmos/cosmos-sdk/issues/3842 --- libs/db/c_level_db.go | 5 +++++ libs/db/debug_db.go | 5 +++++ libs/db/go_level_db.go | 4 ++++ libs/db/mem_batch.go | 4 ++++ libs/db/prefix_db.go | 4 ++++ libs/db/remotedb/grpcdb/server.go | 1 + libs/db/remotedb/remotedb.go | 4 ++++ libs/db/types.go | 2 ++ lite/dbprovider.go | 1 + state/txindex/kv/kv.go | 2 ++ 10 files changed, 32 insertions(+) diff --git a/libs/db/c_level_db.go b/libs/db/c_level_db.go index e411dfdd..116e51bc 100644 --- a/libs/db/c_level_db.go +++ b/libs/db/c_level_db.go @@ -187,6 +187,11 @@ func (mBatch *cLevelDBBatch) WriteSync() { } } +// Implements Batch. +func (mBatch *cLevelDBBatch) Close() { + mBatch.batch.Close() +} + //---------------------------------------- // Iterator // NOTE This is almost identical to db/go_level_db.Iterator diff --git a/libs/db/debug_db.go b/libs/db/debug_db.go index bb361a26..658cd055 100644 --- a/libs/db/debug_db.go +++ b/libs/db/debug_db.go @@ -250,3 +250,8 @@ func (dbch debugBatch) WriteSync() { fmt.Printf("%v.batch.WriteSync()\n", dbch.label) dbch.bch.WriteSync() } + +// Implements Batch. +func (dbch debugBatch) Close() { + dbch.bch.Close() +} diff --git a/libs/db/go_level_db.go b/libs/db/go_level_db.go index fd487a4d..9a4358f6 100644 --- a/libs/db/go_level_db.go +++ b/libs/db/go_level_db.go @@ -184,6 +184,10 @@ func (mBatch *goLevelDBBatch) WriteSync() { } } +// Implements Batch. +// Close is no-op for goLevelDBBatch. +func (mBatch *goLevelDBBatch) Close() {} + //---------------------------------------- // Iterator // NOTE This is almost identical to db/c_level_db.Iterator diff --git a/libs/db/mem_batch.go b/libs/db/mem_batch.go index 5c5d0c13..ebba43f5 100644 --- a/libs/db/mem_batch.go +++ b/libs/db/mem_batch.go @@ -46,6 +46,10 @@ func (mBatch *memBatch) WriteSync() { mBatch.write(true) } +func (mBatch *memBatch) Close() { + mBatch.ops = nil +} + func (mBatch *memBatch) write(doSync bool) { if mtx := mBatch.db.Mutex(); mtx != nil { mtx.Lock() diff --git a/libs/db/prefix_db.go b/libs/db/prefix_db.go index 40d72560..0dd06ef9 100644 --- a/libs/db/prefix_db.go +++ b/libs/db/prefix_db.go @@ -248,6 +248,10 @@ func (pb prefixBatch) WriteSync() { pb.source.WriteSync() } +func (pb prefixBatch) Close() { + pb.source.Close() +} + //---------------------------------------- // prefixIterator diff --git a/libs/db/remotedb/grpcdb/server.go b/libs/db/remotedb/grpcdb/server.go index 3a9955dd..bfe65e61 100644 --- a/libs/db/remotedb/grpcdb/server.go +++ b/libs/db/remotedb/grpcdb/server.go @@ -180,6 +180,7 @@ func (s *server) BatchWriteSync(c context.Context, b *protodb.Batch) (*protodb.N func (s *server) batchWrite(c context.Context, b *protodb.Batch, sync bool) (*protodb.Nothing, error) { bat := s.db.NewBatch() + defer bat.Close() for _, op := range b.Ops { switch op.Type { case protodb.Operation_SET: diff --git a/libs/db/remotedb/remotedb.go b/libs/db/remotedb/remotedb.go index 2b60d815..c70d54b9 100644 --- a/libs/db/remotedb/remotedb.go +++ b/libs/db/remotedb/remotedb.go @@ -260,3 +260,7 @@ func (bat *batch) WriteSync() { panic(fmt.Sprintf("RemoteDB.BatchWriteSync: %v", err)) } } + +func (bat *batch) Close() { + bat.ops = nil +} diff --git a/libs/db/types.go b/libs/db/types.go index 9b9c6d0b..30f8afd1 100644 --- a/libs/db/types.go +++ b/libs/db/types.go @@ -57,10 +57,12 @@ type DB interface { //---------------------------------------- // Batch +// Batch Close must be called when the program no longer needs the object. type Batch interface { SetDeleter Write() WriteSync() + Close() } type SetDeleter interface { diff --git a/lite/dbprovider.go b/lite/dbprovider.go index 5582a963..4e76e365 100644 --- a/lite/dbprovider.go +++ b/lite/dbprovider.go @@ -54,6 +54,7 @@ func (dbp *DBProvider) SaveFullCommit(fc FullCommit) error { dbp.logger.Info("DBProvider.SaveFullCommit()...", "fc", fc) batch := dbp.db.NewBatch() + defer batch.Close() // Save the fc.validators. // We might be overwriting what we already have, but diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go index 93249b7f..84208b8c 100644 --- a/state/txindex/kv/kv.go +++ b/state/txindex/kv/kv.go @@ -78,6 +78,7 @@ func (txi *TxIndex) Get(hash []byte) (*types.TxResult, error) { // AddBatch indexes a batch of transactions using the given list of tags. func (txi *TxIndex) AddBatch(b *txindex.Batch) error { storeBatch := txi.store.NewBatch() + defer storeBatch.Close() for _, result := range b.Ops { hash := result.Tx.Hash() @@ -109,6 +110,7 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { // Index indexes a single transaction using the given list of tags. func (txi *TxIndex) Index(result *types.TxResult) error { b := txi.store.NewBatch() + defer b.Close() hash := result.Tx.Hash() From 36d7180ca216f0d7ff62851fa441de2b0371d699 Mon Sep 17 00:00:00 2001 From: Yumin Xia Date: Sun, 10 Mar 2019 00:46:32 -0800 Subject: [PATCH 09/41] libs/db: close batch (#3397) ClevelDB requires closing when WriteBatch is no longer needed, https://godoc.org/github.com/jmhodges/levigo#WriteBatch.Close Fixes the memory leak in https://github.com/cosmos/cosmos-sdk/issues/3842 --- libs/db/c_level_db.go | 5 +++++ libs/db/debug_db.go | 5 +++++ libs/db/go_level_db.go | 4 ++++ libs/db/mem_batch.go | 4 ++++ libs/db/prefix_db.go | 4 ++++ libs/db/remotedb/grpcdb/server.go | 1 + libs/db/remotedb/remotedb.go | 4 ++++ libs/db/types.go | 2 ++ lite/dbprovider.go | 1 + state/txindex/kv/kv.go | 2 ++ 10 files changed, 32 insertions(+) diff --git a/libs/db/c_level_db.go b/libs/db/c_level_db.go index 7f74b2a7..decb1af5 100644 --- a/libs/db/c_level_db.go +++ b/libs/db/c_level_db.go @@ -179,6 +179,11 @@ func (mBatch *cLevelDBBatch) WriteSync() { } } +// Implements Batch. +func (mBatch *cLevelDBBatch) Close() { + mBatch.batch.Close() +} + //---------------------------------------- // Iterator // NOTE This is almost identical to db/go_level_db.Iterator diff --git a/libs/db/debug_db.go b/libs/db/debug_db.go index bb361a26..658cd055 100644 --- a/libs/db/debug_db.go +++ b/libs/db/debug_db.go @@ -250,3 +250,8 @@ func (dbch debugBatch) WriteSync() { fmt.Printf("%v.batch.WriteSync()\n", dbch.label) dbch.bch.WriteSync() } + +// Implements Batch. +func (dbch debugBatch) Close() { + dbch.bch.Close() +} diff --git a/libs/db/go_level_db.go b/libs/db/go_level_db.go index fd487a4d..9a4358f6 100644 --- a/libs/db/go_level_db.go +++ b/libs/db/go_level_db.go @@ -184,6 +184,10 @@ func (mBatch *goLevelDBBatch) WriteSync() { } } +// Implements Batch. +// Close is no-op for goLevelDBBatch. +func (mBatch *goLevelDBBatch) Close() {} + //---------------------------------------- // Iterator // NOTE This is almost identical to db/c_level_db.Iterator diff --git a/libs/db/mem_batch.go b/libs/db/mem_batch.go index 5c5d0c13..ebba43f5 100644 --- a/libs/db/mem_batch.go +++ b/libs/db/mem_batch.go @@ -46,6 +46,10 @@ func (mBatch *memBatch) WriteSync() { mBatch.write(true) } +func (mBatch *memBatch) Close() { + mBatch.ops = nil +} + func (mBatch *memBatch) write(doSync bool) { if mtx := mBatch.db.Mutex(); mtx != nil { mtx.Lock() diff --git a/libs/db/prefix_db.go b/libs/db/prefix_db.go index 40d72560..0dd06ef9 100644 --- a/libs/db/prefix_db.go +++ b/libs/db/prefix_db.go @@ -248,6 +248,10 @@ func (pb prefixBatch) WriteSync() { pb.source.WriteSync() } +func (pb prefixBatch) Close() { + pb.source.Close() +} + //---------------------------------------- // prefixIterator diff --git a/libs/db/remotedb/grpcdb/server.go b/libs/db/remotedb/grpcdb/server.go index 3a9955dd..bfe65e61 100644 --- a/libs/db/remotedb/grpcdb/server.go +++ b/libs/db/remotedb/grpcdb/server.go @@ -180,6 +180,7 @@ func (s *server) BatchWriteSync(c context.Context, b *protodb.Batch) (*protodb.N func (s *server) batchWrite(c context.Context, b *protodb.Batch, sync bool) (*protodb.Nothing, error) { bat := s.db.NewBatch() + defer bat.Close() for _, op := range b.Ops { switch op.Type { case protodb.Operation_SET: diff --git a/libs/db/remotedb/remotedb.go b/libs/db/remotedb/remotedb.go index 2b60d815..c70d54b9 100644 --- a/libs/db/remotedb/remotedb.go +++ b/libs/db/remotedb/remotedb.go @@ -260,3 +260,7 @@ func (bat *batch) WriteSync() { panic(fmt.Sprintf("RemoteDB.BatchWriteSync: %v", err)) } } + +func (bat *batch) Close() { + bat.ops = nil +} diff --git a/libs/db/types.go b/libs/db/types.go index 9b9c6d0b..30f8afd1 100644 --- a/libs/db/types.go +++ b/libs/db/types.go @@ -57,10 +57,12 @@ type DB interface { //---------------------------------------- // Batch +// Batch Close must be called when the program no longer needs the object. type Batch interface { SetDeleter Write() WriteSync() + Close() } type SetDeleter interface { diff --git a/lite/dbprovider.go b/lite/dbprovider.go index ef1b2a59..9a3636d5 100644 --- a/lite/dbprovider.go +++ b/lite/dbprovider.go @@ -54,6 +54,7 @@ func (dbp *DBProvider) SaveFullCommit(fc FullCommit) error { dbp.logger.Info("DBProvider.SaveFullCommit()...", "fc", fc) batch := dbp.db.NewBatch() + defer batch.Close() // Save the fc.validators. // We might be overwriting what we already have, but diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go index 93249b7f..84208b8c 100644 --- a/state/txindex/kv/kv.go +++ b/state/txindex/kv/kv.go @@ -78,6 +78,7 @@ func (txi *TxIndex) Get(hash []byte) (*types.TxResult, error) { // AddBatch indexes a batch of transactions using the given list of tags. func (txi *TxIndex) AddBatch(b *txindex.Batch) error { storeBatch := txi.store.NewBatch() + defer storeBatch.Close() for _, result := range b.Ops { hash := result.Tx.Hash() @@ -109,6 +110,7 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { // Index indexes a single transaction using the given list of tags. func (txi *TxIndex) Index(result *types.TxResult) error { b := txi.store.NewBatch() + defer b.Close() hash := result.Tx.Hash() From f996b10f479d7c9a6d81cca5a02c47b29a52b3f3 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Sun, 10 Mar 2019 13:06:34 +0400 Subject: [PATCH 10/41] update changelog and bump version to 0.30.2 --- CHANGELOG.md | 19 +++++++++++++++++++ version/version.go | 2 +- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 42e8761a..44ecdf38 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## v0.30.2 + +*March 10th, 2019* + +This release fixes a CLevelDB memory leak. It was happening because we were not +closing the WriteBatch object after use. See [levigo's +godoc](https://godoc.org/github.com/jmhodges/levigo#WriteBatch.Close) for the +Close method. Special thanks goes to @Stumble who both reported an issue in +[cosmos-sdk](https://github.com/cosmos/cosmos-sdk/issues/3842) and provided a +fix here. + +### BREAKING CHANGES: + +* Go API +- [libs/db] [\#3842](https://github.com/cosmos/cosmos-sdk/issues/3842) Add Close() method to Batch interface (@Stumble) + +### BUG FIXES: +- [libs/db] [\#3842](https://github.com/cosmos/cosmos-sdk/issues/3842) Fix CLevelDB memory leak (@Stumble) + ## v0.30.1 *February 20th, 2019* diff --git a/version/version.go b/version/version.go index 1f30978c..1b0a36ae 100644 --- a/version/version.go +++ b/version/version.go @@ -20,7 +20,7 @@ const ( // Must be a string because scripts like dist.sh read this file. // XXX: Don't change the name of this variable or you will break // automation :) - TMCoreSemVer = "0.30.1" + TMCoreSemVer = "0.30.2" // ABCISemVer is the semantic version of the ABCI library ABCISemVer = "0.15.0" From 100ff08de93ff1907bf810f584ec5bdc7a2a5260 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 11 Mar 2019 15:31:53 +0400 Subject: [PATCH 11/41] p2p: do not panic when filter times out (#3384) Fixes #3369 --- CHANGELOG_PENDING.md | 1 + p2p/switch.go | 9 ++++++++- p2p/switch_test.go | 39 +++++++++++++++++++++++++++++++++++++++ p2p/transport.go | 2 +- 4 files changed, 49 insertions(+), 2 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 9ca5ab64..5120e263 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -46,3 +46,4 @@ CI/CD: * [\#3396](https://github.com/tendermint/tendermint/pull/3396) - [p2p/conn] \#3347 Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection - [libs/pubsub] \#951, \#1880 use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md) +- [p2p] \#3369 do not panic when filter times out diff --git a/p2p/switch.go b/p2p/switch.go index ccd6d40f..a07f70ce 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -497,7 +497,14 @@ func (sw *Switch) acceptRoutine() { ) continue - case *ErrTransportClosed: + case ErrFilterTimeout: + sw.Logger.Error( + "Peer filter timed out", + "err", err, + ) + + continue + case ErrTransportClosed: sw.Logger.Error( "Stopped accept routine, as transport is closed", "numPeers", sw.peers.Size(), diff --git a/p2p/switch_test.go b/p2p/switch_test.go index 47cfed55..d5dd178b 100644 --- a/p2p/switch_test.go +++ b/p2p/switch_test.go @@ -2,6 +2,7 @@ package p2p import ( "bytes" + "errors" "fmt" "io" "io/ioutil" @@ -532,6 +533,44 @@ func TestSwitchAcceptRoutine(t *testing.T) { } } +type errorTransport struct { + acceptErr error +} + +func (et errorTransport) Accept(c peerConfig) (Peer, error) { + return nil, et.acceptErr +} +func (errorTransport) Dial(NetAddress, peerConfig) (Peer, error) { + panic("not implemented") +} +func (errorTransport) Cleanup(Peer) { + panic("not implemented") +} + +func TestSwitchAcceptRoutineErrorCases(t *testing.T) { + sw := NewSwitch(cfg, errorTransport{ErrFilterTimeout{}}) + assert.NotPanics(t, func() { + err := sw.Start() + assert.NoError(t, err) + sw.Stop() + }) + + sw = NewSwitch(cfg, errorTransport{ErrRejected{conn: nil, err: errors.New("filtered"), isFiltered: true}}) + assert.NotPanics(t, func() { + err := sw.Start() + assert.NoError(t, err) + sw.Stop() + }) + // TODO(melekes) check we remove our address from addrBook + + sw = NewSwitch(cfg, errorTransport{ErrTransportClosed{}}) + assert.NotPanics(t, func() { + err := sw.Start() + assert.NoError(t, err) + sw.Stop() + }) +} + func BenchmarkSwitchBroadcast(b *testing.B) { s1, s2 := MakeSwitchPair(b, func(i int, sw *Switch) *Switch { // Make bar reactors of bar channels each diff --git a/p2p/transport.go b/p2p/transport.go index d1bccf9b..d36065ab 100644 --- a/p2p/transport.go +++ b/p2p/transport.go @@ -175,7 +175,7 @@ func (mt *MultiplexTransport) Accept(cfg peerConfig) (Peer, error) { return mt.wrapPeer(a.conn, a.nodeInfo, cfg, nil), nil case <-mt.closec: - return nil, &ErrTransportClosed{} + return nil, ErrTransportClosed{} } } From dc359bd3a51c52803a6af820a36ee41796284e87 Mon Sep 17 00:00:00 2001 From: Anca Zamfir Date: Mon, 11 Mar 2019 16:17:25 +0200 Subject: [PATCH 12/41] types: remove check for priority order of existing validators (#3407) When scaling and averaging is invoked, it is possible to have validators with close priorities ending up with same priority. With the current code, this makes it impossible to verify the priority orders before and after updates. Fixes #3383 --- types/validator_set_test.go | 28 ---------------------------- 1 file changed, 28 deletions(-) diff --git a/types/validator_set_test.go b/types/validator_set_test.go index 22f373c7..9fc2d346 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -1113,15 +1113,6 @@ func applyChangesToValSet(t *testing.T, valSet *ValidatorSet, valsLists ...[]tes assert.NoError(t, err) } -func isAddressInList(address []byte, valsList []testVal) bool { - for _, val := range valsList { - if bytes.Equal([]byte(val.name), address) { - return true - } - } - return false -} - func TestValSetUpdatePriorityOrderTests(t *testing.T) { const nMaxElections = 5000 @@ -1206,25 +1197,6 @@ func verifyValSetUpdatePriorityOrder(t *testing.T, valSet *ValidatorSet, cfg tes assert.Equal(t, expectedPri, val.ProposerPriority) } } - - // check that the priority order for validators that remained is the same - // as in the original set - remainingValsPriSlice := updatedValsPriSorted[len(cfg.addedVals):] - - for len(remainingValsPriSlice) > 0 { - addressInChanged := remainingValsPriSlice[0].Address - addressInOld := origValsPriSorted[0].Address - - // skip validators in original list that have been removed - if isAddressInList(addressInOld, cfg.deletedVals) { - origValsPriSorted = origValsPriSorted[1:] - continue - } - assert.Equal(t, addressInOld, addressInChanged, "wrong priority order") - - remainingValsPriSlice = remainingValsPriSlice[1:] - origValsPriSorted = origValsPriSorted[1:] - } } //--------------------- From 15f621141dce76d992f6c8dcfbd4c522878b6108 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 11 Mar 2019 22:21:17 +0400 Subject: [PATCH 13/41] remove TimeIotaMs from ABCI consensus params (#3403) Also - init substructures to avoid panic in pb2tm.ConsensusParams Before: if csp.Block is nil and we later try to access/write to it, we'll panic. After: if csp.Block is nil and we later try to access/write to it, there'll be no panic. --- CHANGELOG_PENDING.md | 4 +- abci/types/types.pb.go | 410 ++++++++++++++++++----------------------- abci/types/types.proto | 2 - consensus/replay.go | 7 +- docs/spec/abci/abci.md | 2 - state/state_test.go | 7 +- types/params.go | 2 +- types/params_test.go | 9 +- types/protobuf.go | 16 +- types/protobuf_test.go | 2 + 10 files changed, 209 insertions(+), 252 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 5120e263..29400929 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -25,7 +25,7 @@ Special thanks to external contributors on this release: mempool's current `txs_total_bytes` is exposed via `total_bytes` field in `/num_unconfirmed_txs` and `/unconfirmed_txs` RPC endpoints. - [config] \#2920 Remove `consensus.blocktime_iota` parameter -- [genesis] \#2920 Add `time_iota_ms` to block's consensus parameters +- [genesis] \#2920 Add `time_iota_ms` to block's consensus parameters (not exposed to the application) - [genesis] \#2920 Rename `consensus_params.block_size` to `consensus_params.block` ### IMPROVEMENTS: @@ -40,8 +40,6 @@ Special thanks to external contributors on this release: - leveldb.alivesnaps - leveldb.aliveiters -CI/CD: * [\#3396](https://github.com/tendermint/tendermint/pull/3396) - ### BUG FIXES: - [p2p/conn] \#3347 Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 79af610c..b09213a5 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -61,7 +61,7 @@ func (m *Request) Reset() { *m = Request{} } func (m *Request) String() string { return proto.CompactTextString(m) } func (*Request) ProtoMessage() {} func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{0} + return fileDescriptor_types_a177e47fab90f91d, []int{0} } func (m *Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -483,7 +483,7 @@ func (m *RequestEcho) Reset() { *m = RequestEcho{} } func (m *RequestEcho) String() string { return proto.CompactTextString(m) } func (*RequestEcho) ProtoMessage() {} func (*RequestEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{1} + return fileDescriptor_types_a177e47fab90f91d, []int{1} } func (m *RequestEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -529,7 +529,7 @@ func (m *RequestFlush) Reset() { *m = RequestFlush{} } func (m *RequestFlush) String() string { return proto.CompactTextString(m) } func (*RequestFlush) ProtoMessage() {} func (*RequestFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{2} + return fileDescriptor_types_a177e47fab90f91d, []int{2} } func (m *RequestFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -571,7 +571,7 @@ func (m *RequestInfo) Reset() { *m = RequestInfo{} } func (m *RequestInfo) String() string { return proto.CompactTextString(m) } func (*RequestInfo) ProtoMessage() {} func (*RequestInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{3} + return fileDescriptor_types_a177e47fab90f91d, []int{3} } func (m *RequestInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -634,7 +634,7 @@ func (m *RequestSetOption) Reset() { *m = RequestSetOption{} } func (m *RequestSetOption) String() string { return proto.CompactTextString(m) } func (*RequestSetOption) ProtoMessage() {} func (*RequestSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{4} + return fileDescriptor_types_a177e47fab90f91d, []int{4} } func (m *RequestSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -692,7 +692,7 @@ func (m *RequestInitChain) Reset() { *m = RequestInitChain{} } func (m *RequestInitChain) String() string { return proto.CompactTextString(m) } func (*RequestInitChain) ProtoMessage() {} func (*RequestInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{5} + return fileDescriptor_types_a177e47fab90f91d, []int{5} } func (m *RequestInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -770,7 +770,7 @@ func (m *RequestQuery) Reset() { *m = RequestQuery{} } func (m *RequestQuery) String() string { return proto.CompactTextString(m) } func (*RequestQuery) ProtoMessage() {} func (*RequestQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{6} + return fileDescriptor_types_a177e47fab90f91d, []int{6} } func (m *RequestQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -841,7 +841,7 @@ func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} } func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) } func (*RequestBeginBlock) ProtoMessage() {} func (*RequestBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{7} + return fileDescriptor_types_a177e47fab90f91d, []int{7} } func (m *RequestBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -909,7 +909,7 @@ func (m *RequestCheckTx) Reset() { *m = RequestCheckTx{} } func (m *RequestCheckTx) String() string { return proto.CompactTextString(m) } func (*RequestCheckTx) ProtoMessage() {} func (*RequestCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{8} + return fileDescriptor_types_a177e47fab90f91d, []int{8} } func (m *RequestCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -956,7 +956,7 @@ func (m *RequestDeliverTx) Reset() { *m = RequestDeliverTx{} } func (m *RequestDeliverTx) String() string { return proto.CompactTextString(m) } func (*RequestDeliverTx) ProtoMessage() {} func (*RequestDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{9} + return fileDescriptor_types_a177e47fab90f91d, []int{9} } func (m *RequestDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1003,7 +1003,7 @@ func (m *RequestEndBlock) Reset() { *m = RequestEndBlock{} } func (m *RequestEndBlock) String() string { return proto.CompactTextString(m) } func (*RequestEndBlock) ProtoMessage() {} func (*RequestEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{10} + return fileDescriptor_types_a177e47fab90f91d, []int{10} } func (m *RequestEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1049,7 +1049,7 @@ func (m *RequestCommit) Reset() { *m = RequestCommit{} } func (m *RequestCommit) String() string { return proto.CompactTextString(m) } func (*RequestCommit) ProtoMessage() {} func (*RequestCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{11} + return fileDescriptor_types_a177e47fab90f91d, []int{11} } func (m *RequestCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1102,7 +1102,7 @@ func (m *Response) Reset() { *m = Response{} } func (m *Response) String() string { return proto.CompactTextString(m) } func (*Response) ProtoMessage() {} func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{12} + return fileDescriptor_types_a177e47fab90f91d, []int{12} } func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1555,7 +1555,7 @@ func (m *ResponseException) Reset() { *m = ResponseException{} } func (m *ResponseException) String() string { return proto.CompactTextString(m) } func (*ResponseException) ProtoMessage() {} func (*ResponseException) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{13} + return fileDescriptor_types_a177e47fab90f91d, []int{13} } func (m *ResponseException) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1602,7 +1602,7 @@ func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } func (*ResponseEcho) ProtoMessage() {} func (*ResponseEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{14} + return fileDescriptor_types_a177e47fab90f91d, []int{14} } func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1648,7 +1648,7 @@ func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } func (*ResponseFlush) ProtoMessage() {} func (*ResponseFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{15} + return fileDescriptor_types_a177e47fab90f91d, []int{15} } func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1692,7 +1692,7 @@ func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } func (*ResponseInfo) ProtoMessage() {} func (*ResponseInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{16} + return fileDescriptor_types_a177e47fab90f91d, []int{16} } func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1771,7 +1771,7 @@ func (m *ResponseSetOption) Reset() { *m = ResponseSetOption{} } func (m *ResponseSetOption) String() string { return proto.CompactTextString(m) } func (*ResponseSetOption) ProtoMessage() {} func (*ResponseSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{17} + return fileDescriptor_types_a177e47fab90f91d, []int{17} } func (m *ResponseSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1833,7 +1833,7 @@ func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } func (*ResponseInitChain) ProtoMessage() {} func (*ResponseInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{18} + return fileDescriptor_types_a177e47fab90f91d, []int{18} } func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1896,7 +1896,7 @@ func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } func (*ResponseQuery) ProtoMessage() {} func (*ResponseQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{19} + return fileDescriptor_types_a177e47fab90f91d, []int{19} } func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1999,7 +1999,7 @@ func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } func (*ResponseBeginBlock) ProtoMessage() {} func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{20} + return fileDescriptor_types_a177e47fab90f91d, []int{20} } func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2053,7 +2053,7 @@ func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } func (*ResponseCheckTx) ProtoMessage() {} func (*ResponseCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{21} + return fileDescriptor_types_a177e47fab90f91d, []int{21} } func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2156,7 +2156,7 @@ func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } func (*ResponseDeliverTx) ProtoMessage() {} func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{22} + return fileDescriptor_types_a177e47fab90f91d, []int{22} } func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2254,7 +2254,7 @@ func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } func (*ResponseEndBlock) ProtoMessage() {} func (*ResponseEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{23} + return fileDescriptor_types_a177e47fab90f91d, []int{23} } func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2316,7 +2316,7 @@ func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } func (*ResponseCommit) ProtoMessage() {} func (*ResponseCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{24} + return fileDescriptor_types_a177e47fab90f91d, []int{24} } func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2367,7 +2367,7 @@ func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } func (m *ConsensusParams) String() string { return proto.CompactTextString(m) } func (*ConsensusParams) ProtoMessage() {} func (*ConsensusParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{25} + return fileDescriptor_types_a177e47fab90f91d, []int{25} } func (m *ConsensusParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2422,9 +2422,7 @@ type BlockParams struct { // Note: must be greater than 0 MaxBytes int64 `protobuf:"varint,1,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` // Note: must be greater or equal to -1 - MaxGas int64 `protobuf:"varint,2,opt,name=max_gas,json=maxGas,proto3" json:"max_gas,omitempty"` - // Note: must be greater than 0 - TimeIotaMs int64 `protobuf:"varint,3,opt,name=time_iota_ms,json=timeIotaMs,proto3" json:"time_iota_ms,omitempty"` + MaxGas int64 `protobuf:"varint,2,opt,name=max_gas,json=maxGas,proto3" json:"max_gas,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2434,7 +2432,7 @@ func (m *BlockParams) Reset() { *m = BlockParams{} } func (m *BlockParams) String() string { return proto.CompactTextString(m) } func (*BlockParams) ProtoMessage() {} func (*BlockParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{26} + return fileDescriptor_types_a177e47fab90f91d, []int{26} } func (m *BlockParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2477,13 +2475,6 @@ func (m *BlockParams) GetMaxGas() int64 { return 0 } -func (m *BlockParams) GetTimeIotaMs() int64 { - if m != nil { - return m.TimeIotaMs - } - return 0 -} - // EvidenceParams contains limits on the evidence. type EvidenceParams struct { // Note: must be greater than 0 @@ -2497,7 +2488,7 @@ func (m *EvidenceParams) Reset() { *m = EvidenceParams{} } func (m *EvidenceParams) String() string { return proto.CompactTextString(m) } func (*EvidenceParams) ProtoMessage() {} func (*EvidenceParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{27} + return fileDescriptor_types_a177e47fab90f91d, []int{27} } func (m *EvidenceParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2545,7 +2536,7 @@ func (m *ValidatorParams) Reset() { *m = ValidatorParams{} } func (m *ValidatorParams) String() string { return proto.CompactTextString(m) } func (*ValidatorParams) ProtoMessage() {} func (*ValidatorParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{28} + return fileDescriptor_types_a177e47fab90f91d, []int{28} } func (m *ValidatorParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2593,7 +2584,7 @@ func (m *LastCommitInfo) Reset() { *m = LastCommitInfo{} } func (m *LastCommitInfo) String() string { return proto.CompactTextString(m) } func (*LastCommitInfo) ProtoMessage() {} func (*LastCommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{29} + return fileDescriptor_types_a177e47fab90f91d, []int{29} } func (m *LastCommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2667,7 +2658,7 @@ func (m *Header) Reset() { *m = Header{} } func (m *Header) String() string { return proto.CompactTextString(m) } func (*Header) ProtoMessage() {} func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{30} + return fileDescriptor_types_a177e47fab90f91d, []int{30} } func (m *Header) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2820,7 +2811,7 @@ func (m *Version) Reset() { *m = Version{} } func (m *Version) String() string { return proto.CompactTextString(m) } func (*Version) ProtoMessage() {} func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{31} + return fileDescriptor_types_a177e47fab90f91d, []int{31} } func (m *Version) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2875,7 +2866,7 @@ func (m *BlockID) Reset() { *m = BlockID{} } func (m *BlockID) String() string { return proto.CompactTextString(m) } func (*BlockID) ProtoMessage() {} func (*BlockID) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{32} + return fileDescriptor_types_a177e47fab90f91d, []int{32} } func (m *BlockID) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2930,7 +2921,7 @@ func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } func (*PartSetHeader) ProtoMessage() {} func (*PartSetHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{33} + return fileDescriptor_types_a177e47fab90f91d, []int{33} } func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2987,7 +2978,7 @@ func (m *Validator) Reset() { *m = Validator{} } func (m *Validator) String() string { return proto.CompactTextString(m) } func (*Validator) ProtoMessage() {} func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{34} + return fileDescriptor_types_a177e47fab90f91d, []int{34} } func (m *Validator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3043,7 +3034,7 @@ func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } func (*ValidatorUpdate) ProtoMessage() {} func (*ValidatorUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{35} + return fileDescriptor_types_a177e47fab90f91d, []int{35} } func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3099,7 +3090,7 @@ func (m *VoteInfo) Reset() { *m = VoteInfo{} } func (m *VoteInfo) String() string { return proto.CompactTextString(m) } func (*VoteInfo) ProtoMessage() {} func (*VoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{36} + return fileDescriptor_types_a177e47fab90f91d, []int{36} } func (m *VoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3154,7 +3145,7 @@ func (m *PubKey) Reset() { *m = PubKey{} } func (m *PubKey) String() string { return proto.CompactTextString(m) } func (*PubKey) ProtoMessage() {} func (*PubKey) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{37} + return fileDescriptor_types_a177e47fab90f91d, []int{37} } func (m *PubKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3212,7 +3203,7 @@ func (m *Evidence) Reset() { *m = Evidence{} } func (m *Evidence) String() string { return proto.CompactTextString(m) } func (*Evidence) ProtoMessage() {} func (*Evidence) Descriptor() ([]byte, []int) { - return fileDescriptor_types_e441973ce6650a0d, []int{38} + return fileDescriptor_types_a177e47fab90f91d, []int{38} } func (m *Evidence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4816,9 +4807,6 @@ func (this *BlockParams) Equal(that interface{}) bool { if this.MaxGas != that1.MaxGas { return false } - if this.TimeIotaMs != that1.TimeIotaMs { - return false - } if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { return false } @@ -7023,11 +7011,6 @@ func (m *BlockParams) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintTypes(dAtA, i, uint64(m.MaxGas)) } - if m.TimeIotaMs != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintTypes(dAtA, i, uint64(m.TimeIotaMs)) - } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -8150,12 +8133,8 @@ func NewPopulatedBlockParams(r randyTypes, easy bool) *BlockParams { if r.Intn(2) == 0 { this.MaxGas *= -1 } - this.TimeIotaMs = int64(r.Int63()) - if r.Intn(2) == 0 { - this.TimeIotaMs *= -1 - } if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 4) + this.XXX_unrecognized = randUnrecognizedTypes(r, 3) } return this } @@ -9335,9 +9314,6 @@ func (m *BlockParams) Size() (n int) { if m.MaxGas != 0 { n += 1 + sovTypes(uint64(m.MaxGas)) } - if m.TimeIotaMs != 0 { - n += 1 + sovTypes(uint64(m.TimeIotaMs)) - } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -13604,25 +13580,6 @@ func (m *BlockParams) Unmarshal(dAtA []byte) error { break } } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeIotaMs", wireType) - } - m.TimeIotaMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TimeIotaMs |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -15400,150 +15357,149 @@ var ( ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_e441973ce6650a0d) } +func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_a177e47fab90f91d) } func init() { - golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_e441973ce6650a0d) + golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_a177e47fab90f91d) } -var fileDescriptor_types_e441973ce6650a0d = []byte{ - // 2223 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x73, 0x1c, 0x47, - 0x15, 0xd7, 0xec, 0xf7, 0xbc, 0xd5, 0x7e, 0xa4, 0x2d, 0xdb, 0xeb, 0x25, 0x48, 0xae, 0x31, 0x24, - 0x12, 0x51, 0x56, 0x89, 0x82, 0x29, 0x39, 0x0e, 0x54, 0x69, 0x6d, 0x83, 0x54, 0x49, 0x40, 0x8c, - 0x6d, 0x71, 0xa1, 0x6a, 0xaa, 0x77, 0xa7, 0xb5, 0x3b, 0xa5, 0xdd, 0x99, 0xc9, 0x4c, 0xaf, 0xb2, - 0xe2, 0xc8, 0x39, 0x87, 0x1c, 0xf8, 0x13, 0x38, 0xf0, 0x27, 0xe4, 0xc8, 0x89, 0xca, 0x91, 0x03, - 0x67, 0x03, 0xa2, 0x38, 0xc0, 0x95, 0xa2, 0x8a, 0x23, 0xd5, 0xaf, 0x7b, 0x3e, 0x35, 0x6b, 0xe2, - 0xc0, 0x89, 0xcb, 0x6e, 0xf7, 0xfb, 0xe8, 0x8f, 0x37, 0xef, 0xbd, 0xdf, 0x7b, 0x0d, 0xb7, 0xe8, - 0x68, 0xec, 0xec, 0xf1, 0x4b, 0x9f, 0x85, 0xf2, 0x77, 0xe0, 0x07, 0x1e, 0xf7, 0x48, 0x15, 0x27, - 0xfd, 0xb7, 0x27, 0x0e, 0x9f, 0x2e, 0x46, 0x83, 0xb1, 0x37, 0xdf, 0x9b, 0x78, 0x13, 0x6f, 0x0f, - 0xb9, 0xa3, 0xc5, 0x19, 0xce, 0x70, 0x82, 0x23, 0xa9, 0xd5, 0x7f, 0x98, 0x12, 0xe7, 0xcc, 0xb5, - 0x59, 0x30, 0x77, 0x5c, 0x9e, 0x1e, 0x8e, 0x83, 0x4b, 0x9f, 0x7b, 0x7b, 0x73, 0x16, 0x9c, 0xcf, - 0x98, 0xfa, 0x53, 0xca, 0x07, 0xff, 0x51, 0x79, 0xe6, 0x8c, 0xc2, 0xbd, 0xb1, 0x37, 0x9f, 0x7b, - 0x6e, 0xfa, 0xb0, 0xfd, 0xad, 0x89, 0xe7, 0x4d, 0x66, 0x2c, 0x39, 0x1c, 0x77, 0xe6, 0x2c, 0xe4, - 0x74, 0xee, 0x4b, 0x01, 0xe3, 0x77, 0x15, 0xa8, 0x9b, 0xec, 0x93, 0x05, 0x0b, 0x39, 0xd9, 0x86, - 0x0a, 0x1b, 0x4f, 0xbd, 0x5e, 0xe9, 0xae, 0xb6, 0xdd, 0xdc, 0x27, 0x03, 0xb9, 0x90, 0xe2, 0x3e, - 0x19, 0x4f, 0xbd, 0xa3, 0x35, 0x13, 0x25, 0xc8, 0x5b, 0x50, 0x3d, 0x9b, 0x2d, 0xc2, 0x69, 0xaf, - 0x8c, 0xa2, 0x37, 0xb2, 0xa2, 0x3f, 0x14, 0xac, 0xa3, 0x35, 0x53, 0xca, 0x88, 0x65, 0x1d, 0xf7, - 0xcc, 0xeb, 0x55, 0x8a, 0x96, 0x3d, 0x76, 0xcf, 0x70, 0x59, 0x21, 0x41, 0x0e, 0x00, 0x42, 0xc6, - 0x2d, 0xcf, 0xe7, 0x8e, 0xe7, 0xf6, 0xaa, 0x28, 0x7f, 0x3b, 0x2b, 0xff, 0x94, 0xf1, 0x9f, 0x20, - 0xfb, 0x68, 0xcd, 0xd4, 0xc3, 0x68, 0x22, 0x34, 0x1d, 0xd7, 0xe1, 0xd6, 0x78, 0x4a, 0x1d, 0xb7, - 0x57, 0x2b, 0xd2, 0x3c, 0x76, 0x1d, 0xfe, 0x48, 0xb0, 0x85, 0xa6, 0x13, 0x4d, 0xc4, 0x55, 0x3e, - 0x59, 0xb0, 0xe0, 0xb2, 0x57, 0x2f, 0xba, 0xca, 0x4f, 0x05, 0x4b, 0x5c, 0x05, 0x65, 0xc8, 0x43, - 0x68, 0x8e, 0xd8, 0xc4, 0x71, 0xad, 0xd1, 0xcc, 0x1b, 0x9f, 0xf7, 0x1a, 0xa8, 0xd2, 0xcb, 0xaa, - 0x0c, 0x85, 0xc0, 0x50, 0xf0, 0x8f, 0xd6, 0x4c, 0x18, 0xc5, 0x33, 0xb2, 0x0f, 0x8d, 0xf1, 0x94, - 0x8d, 0xcf, 0x2d, 0xbe, 0xec, 0xe9, 0xa8, 0x79, 0x33, 0xab, 0xf9, 0x48, 0x70, 0x9f, 0x2d, 0x8f, - 0xd6, 0xcc, 0xfa, 0x58, 0x0e, 0xc9, 0x7d, 0xd0, 0x99, 0x6b, 0xab, 0xed, 0x9a, 0xa8, 0x74, 0x2b, - 0xf7, 0x5d, 0x5c, 0x3b, 0xda, 0xac, 0xc1, 0xd4, 0x98, 0x0c, 0xa0, 0x26, 0x9c, 0xc1, 0xe1, 0xbd, - 0x75, 0xd4, 0xd9, 0xc8, 0x6d, 0x84, 0xbc, 0xa3, 0x35, 0x53, 0x49, 0x09, 0xf3, 0xd9, 0x6c, 0xe6, - 0x5c, 0xb0, 0x40, 0x1c, 0xee, 0x46, 0x91, 0xf9, 0x1e, 0x4b, 0x3e, 0x1e, 0x4f, 0xb7, 0xa3, 0xc9, - 0xb0, 0x0e, 0xd5, 0x0b, 0x3a, 0x5b, 0x30, 0xe3, 0x4d, 0x68, 0xa6, 0x3c, 0x85, 0xf4, 0xa0, 0x3e, - 0x67, 0x61, 0x48, 0x27, 0xac, 0xa7, 0xdd, 0xd5, 0xb6, 0x75, 0x33, 0x9a, 0x1a, 0x6d, 0x58, 0x4f, - 0xfb, 0x89, 0x31, 0x8f, 0x15, 0x85, 0x2f, 0x08, 0xc5, 0x0b, 0x16, 0x84, 0xc2, 0x01, 0x94, 0xa2, - 0x9a, 0x92, 0x7b, 0xd0, 0x42, 0x3b, 0x58, 0x11, 0x5f, 0xf8, 0x69, 0xc5, 0x5c, 0x47, 0xe2, 0xa9, - 0x12, 0xda, 0x82, 0xa6, 0xbf, 0xef, 0xc7, 0x22, 0x65, 0x14, 0x01, 0x7f, 0xdf, 0x57, 0x02, 0xc6, - 0xfb, 0xd0, 0xcd, 0xbb, 0x12, 0xe9, 0x42, 0xf9, 0x9c, 0x5d, 0xaa, 0xfd, 0xc4, 0x90, 0x6c, 0xa8, - 0x6b, 0xe1, 0x1e, 0xba, 0xa9, 0xee, 0xf8, 0x79, 0x29, 0x56, 0x8e, 0xbd, 0x89, 0x1c, 0x40, 0x45, - 0x04, 0x15, 0x6a, 0x37, 0xf7, 0xfb, 0x03, 0x19, 0x71, 0x83, 0x28, 0xe2, 0x06, 0xcf, 0xa2, 0x88, - 0x1b, 0x36, 0xbe, 0x7c, 0xb1, 0xb5, 0xf6, 0xf9, 0x1f, 0xb7, 0x34, 0x13, 0x35, 0xc8, 0x1d, 0xe1, - 0x10, 0xd4, 0x71, 0x2d, 0xc7, 0x56, 0xfb, 0xd4, 0x71, 0x7e, 0x6c, 0x93, 0x43, 0xe8, 0x8e, 0x3d, - 0x37, 0x64, 0x6e, 0xb8, 0x08, 0x2d, 0x9f, 0x06, 0x74, 0x1e, 0xaa, 0x58, 0x8b, 0x3e, 0xff, 0xa3, - 0x88, 0x7d, 0x82, 0x5c, 0xb3, 0x33, 0xce, 0x12, 0xc8, 0x07, 0x00, 0x17, 0x74, 0xe6, 0xd8, 0x94, - 0x7b, 0x41, 0xd8, 0xab, 0xdc, 0x2d, 0xa7, 0x94, 0x4f, 0x23, 0xc6, 0x73, 0xdf, 0xa6, 0x9c, 0x0d, - 0x2b, 0xe2, 0x64, 0x66, 0x4a, 0x9e, 0xbc, 0x01, 0x1d, 0xea, 0xfb, 0x56, 0xc8, 0x29, 0x67, 0xd6, - 0xe8, 0x92, 0xb3, 0x10, 0xe3, 0x71, 0xdd, 0x6c, 0x51, 0xdf, 0x7f, 0x2a, 0xa8, 0x43, 0x41, 0x34, - 0xec, 0xf8, 0x6b, 0x62, 0xa8, 0x10, 0x02, 0x15, 0x9b, 0x72, 0x8a, 0xd6, 0x58, 0x37, 0x71, 0x2c, - 0x68, 0x3e, 0xe5, 0x53, 0x75, 0x47, 0x1c, 0x93, 0x5b, 0x50, 0x9b, 0x32, 0x67, 0x32, 0xe5, 0x78, - 0xad, 0xb2, 0xa9, 0x66, 0xc2, 0xf0, 0x7e, 0xe0, 0x5d, 0x30, 0xcc, 0x16, 0x0d, 0x53, 0x4e, 0x8c, - 0xbf, 0x6a, 0xf0, 0xda, 0xb5, 0xf0, 0x12, 0xeb, 0x4e, 0x69, 0x38, 0x8d, 0xf6, 0x12, 0x63, 0xf2, - 0x96, 0x58, 0x97, 0xda, 0x2c, 0x50, 0x59, 0xac, 0xa5, 0x6e, 0x7c, 0x84, 0x44, 0x75, 0x51, 0x25, - 0x42, 0x9e, 0x40, 0x77, 0x46, 0x43, 0x6e, 0xc9, 0x28, 0xb0, 0x30, 0x4b, 0x95, 0x33, 0x91, 0xf9, - 0x11, 0x8d, 0xa2, 0x45, 0x38, 0xa7, 0x52, 0x6f, 0xcf, 0x32, 0x54, 0x72, 0x04, 0x1b, 0xa3, 0xcb, - 0x5f, 0x50, 0x97, 0x3b, 0x2e, 0xb3, 0xae, 0xd9, 0xbc, 0xa3, 0x96, 0x7a, 0x72, 0xe1, 0xd8, 0xcc, - 0x1d, 0x47, 0xc6, 0xbe, 0x11, 0xab, 0xc4, 0x1f, 0x23, 0x34, 0xee, 0x42, 0x3b, 0x9b, 0x0b, 0x48, - 0x1b, 0x4a, 0x7c, 0xa9, 0x6e, 0x58, 0xe2, 0x4b, 0xc3, 0x88, 0x3d, 0x30, 0x0e, 0xc8, 0x6b, 0x32, - 0x3b, 0xd0, 0xc9, 0x25, 0x87, 0x94, 0xb9, 0xb5, 0xb4, 0xb9, 0x8d, 0x0e, 0xb4, 0x32, 0x39, 0xc1, - 0xf8, 0xac, 0x0a, 0x0d, 0x93, 0x85, 0xbe, 0x70, 0x26, 0x72, 0x00, 0x3a, 0x5b, 0x8e, 0x99, 0x4c, - 0xc7, 0x5a, 0x2e, 0xd9, 0x49, 0x99, 0x27, 0x11, 0x5f, 0xa4, 0x85, 0x58, 0x98, 0xec, 0x64, 0xa0, - 0xe4, 0x46, 0x5e, 0x29, 0x8d, 0x25, 0xbb, 0x59, 0x2c, 0xd9, 0xc8, 0xc9, 0xe6, 0xc0, 0x64, 0x27, - 0x03, 0x26, 0xf9, 0x85, 0x33, 0x68, 0xf2, 0xa0, 0x00, 0x4d, 0xf2, 0xc7, 0x5f, 0x01, 0x27, 0x0f, - 0x0a, 0xe0, 0xa4, 0x77, 0x6d, 0xaf, 0x42, 0x3c, 0xd9, 0xcd, 0xe2, 0x49, 0xfe, 0x3a, 0x39, 0x40, - 0xf9, 0xa0, 0x08, 0x50, 0xee, 0xe4, 0x74, 0x56, 0x22, 0xca, 0x7b, 0xd7, 0x10, 0xe5, 0x56, 0x4e, - 0xb5, 0x00, 0x52, 0x1e, 0x64, 0x72, 0x3d, 0x14, 0xde, 0xad, 0x38, 0xd9, 0x93, 0xef, 0x5d, 0x47, - 0xa3, 0xdb, 0xf9, 0x4f, 0x5b, 0x04, 0x47, 0x7b, 0x39, 0x38, 0xba, 0x99, 0x3f, 0x65, 0x0e, 0x8f, - 0x12, 0x54, 0xd9, 0x11, 0x71, 0x9f, 0xf3, 0x34, 0x91, 0x23, 0x58, 0x10, 0x78, 0x81, 0x4a, 0xd8, - 0x72, 0x62, 0x6c, 0x8b, 0x4c, 0x94, 0xf8, 0xd7, 0x4b, 0x10, 0x08, 0x9d, 0x3e, 0xe5, 0x5d, 0xc6, - 0x17, 0x5a, 0xa2, 0x8b, 0x11, 0x9d, 0xce, 0x62, 0xba, 0xca, 0x62, 0x29, 0x60, 0x2a, 0x65, 0x81, - 0x69, 0x0b, 0x9a, 0x22, 0x57, 0xe6, 0x30, 0x87, 0xfa, 0x11, 0xe6, 0x90, 0xef, 0xc0, 0x6b, 0x98, - 0x67, 0x24, 0x7c, 0xa9, 0x40, 0xac, 0x60, 0x20, 0x76, 0x04, 0x43, 0x5a, 0x4c, 0x26, 0xc0, 0xb7, - 0xe1, 0x46, 0x4a, 0x56, 0xac, 0x8b, 0x39, 0x4e, 0x26, 0xdf, 0x6e, 0x2c, 0x7d, 0xe8, 0xfb, 0x47, - 0x34, 0x9c, 0x1a, 0x1f, 0x27, 0x06, 0x4a, 0xf0, 0x8c, 0x40, 0x65, 0xec, 0xd9, 0xf2, 0xde, 0x2d, - 0x13, 0xc7, 0x02, 0xe3, 0x66, 0xde, 0x04, 0x0f, 0xa7, 0x9b, 0x62, 0x28, 0xa4, 0xe2, 0x50, 0xd2, - 0x65, 0xcc, 0x18, 0xbf, 0xd2, 0x92, 0xf5, 0x12, 0x88, 0x2b, 0x42, 0x23, 0xed, 0xbf, 0x41, 0xa3, - 0xd2, 0xab, 0xa1, 0x91, 0x71, 0xa5, 0x25, 0x9f, 0x2c, 0xc6, 0x99, 0xaf, 0x77, 0x45, 0xe1, 0x3d, - 0x8e, 0x6b, 0xb3, 0x25, 0x9a, 0xb4, 0x6c, 0xca, 0x49, 0x54, 0x02, 0xd4, 0xd0, 0xcc, 0xd9, 0x12, - 0xa0, 0x8e, 0x34, 0x39, 0x21, 0xf7, 0x10, 0x9f, 0xbc, 0x33, 0x15, 0xaa, 0xad, 0x81, 0x2a, 0xd4, - 0x4f, 0x04, 0xd1, 0x94, 0xbc, 0x54, 0xb6, 0xd5, 0x33, 0xe0, 0xf6, 0x3a, 0xe8, 0xe2, 0xa0, 0xa1, - 0x4f, 0xc7, 0x0c, 0x23, 0x4f, 0x37, 0x13, 0x82, 0x71, 0x02, 0xe4, 0x7a, 0xc4, 0x93, 0xf7, 0xa1, - 0xc2, 0xe9, 0x44, 0xd8, 0x5b, 0x98, 0xac, 0x3d, 0x90, 0x45, 0xfe, 0xe0, 0xc3, 0xd3, 0x13, 0xea, - 0x04, 0xc3, 0x5b, 0xc2, 0x54, 0x7f, 0x7f, 0xb1, 0xd5, 0x16, 0x32, 0xbb, 0xde, 0xdc, 0xe1, 0x6c, - 0xee, 0xf3, 0x4b, 0x13, 0x75, 0x8c, 0x7f, 0x68, 0x02, 0x09, 0x32, 0x99, 0xa0, 0xd0, 0x70, 0x91, - 0xbb, 0x97, 0x52, 0xa0, 0xfd, 0xd5, 0x8c, 0xf9, 0x4d, 0x80, 0x09, 0x0d, 0xad, 0x4f, 0xa9, 0xcb, - 0x99, 0xad, 0x2c, 0xaa, 0x4f, 0x68, 0xf8, 0x33, 0x24, 0x88, 0x0a, 0x47, 0xb0, 0x17, 0x21, 0xb3, - 0xd1, 0xb4, 0x65, 0xb3, 0x3e, 0xa1, 0xe1, 0xf3, 0x90, 0xd9, 0xf1, 0xbd, 0xea, 0xaf, 0x7e, 0xaf, - 0xac, 0x1d, 0x1b, 0x79, 0x3b, 0xfe, 0x33, 0xe5, 0xc3, 0x09, 0x48, 0xfe, 0xff, 0xdf, 0xfb, 0x6f, - 0x9a, 0xa8, 0x0d, 0xb2, 0x69, 0x98, 0x1c, 0xc3, 0x6b, 0x71, 0x1c, 0x59, 0x0b, 0x8c, 0xaf, 0xc8, - 0x97, 0x5e, 0x1e, 0x7e, 0xdd, 0x8b, 0x2c, 0x39, 0x24, 0x3f, 0x86, 0xdb, 0xb9, 0x2c, 0x10, 0x2f, - 0x58, 0x7a, 0x69, 0x32, 0xb8, 0x99, 0x4d, 0x06, 0xd1, 0x7a, 0x91, 0x25, 0xca, 0x5f, 0xc3, 0xb3, - 0xbf, 0x25, 0x0a, 0xa5, 0x34, 0x78, 0x14, 0x7d, 0x4b, 0xe3, 0xd7, 0x1a, 0x74, 0x72, 0x87, 0x21, - 0xdb, 0x50, 0x95, 0xf8, 0xa5, 0x65, 0xda, 0x51, 0xb4, 0x96, 0x3a, 0xaf, 0x14, 0x20, 0xef, 0x42, - 0x83, 0xa9, 0x9a, 0x4d, 0x5d, 0xf0, 0x66, 0xae, 0x94, 0x53, 0xf2, 0xb1, 0x18, 0xf9, 0x2e, 0xe8, - 0xb1, 0xd9, 0x72, 0xf5, 0x7a, 0x6c, 0x65, 0xa5, 0x94, 0x08, 0x1a, 0x0c, 0x9a, 0xa9, 0xed, 0xc9, - 0x37, 0x40, 0x9f, 0xd3, 0xa5, 0x2a, 0xba, 0x65, 0xb9, 0xd6, 0x98, 0xd3, 0x25, 0xd6, 0xdb, 0xe4, - 0x36, 0xd4, 0x05, 0x73, 0x42, 0xa5, 0xd1, 0xcb, 0x66, 0x6d, 0x4e, 0x97, 0x3f, 0xa2, 0x21, 0xb9, - 0x0b, 0xeb, 0xa2, 0xa9, 0xb0, 0x1c, 0x8f, 0x53, 0x4b, 0x75, 0x0b, 0x65, 0x13, 0x04, 0xed, 0xd8, - 0xe3, 0xf4, 0xe3, 0xd0, 0xd8, 0x81, 0x76, 0xf6, 0xe0, 0xd1, 0x62, 0x11, 0x44, 0xca, 0xc5, 0x0e, - 0x27, 0xcc, 0xb8, 0x0f, 0x9d, 0xdc, 0x79, 0x89, 0x01, 0x2d, 0x7f, 0x31, 0xb2, 0xce, 0xd9, 0xa5, - 0x85, 0x17, 0x42, 0x27, 0xd2, 0xcd, 0xa6, 0xbf, 0x18, 0x7d, 0xc8, 0x2e, 0x9f, 0x09, 0x92, 0xf1, - 0x14, 0xda, 0xd9, 0x82, 0x59, 0x24, 0xd1, 0xc0, 0x5b, 0xb8, 0x36, 0xae, 0x5f, 0x35, 0xe5, 0x44, - 0xf4, 0xdc, 0x17, 0x9e, 0xf4, 0x9b, 0x74, 0x85, 0x7c, 0xea, 0x71, 0x96, 0x2a, 0xb3, 0xa5, 0x8c, - 0xf1, 0xcb, 0x2a, 0xd4, 0x64, 0xf5, 0x4e, 0x06, 0xd9, 0xde, 0x50, 0x38, 0x8d, 0xd2, 0x94, 0x54, - 0xa5, 0x18, 0x03, 0xf3, 0x1b, 0xf9, 0x06, 0x6b, 0xd8, 0xbc, 0x7a, 0xb1, 0x55, 0x47, 0x50, 0x3b, - 0x7e, 0x9c, 0x74, 0x5b, 0xab, 0x9a, 0x91, 0xa8, 0xb5, 0xab, 0xbc, 0x72, 0x6b, 0x77, 0x1b, 0xea, - 0xee, 0x62, 0x6e, 0xf1, 0x65, 0xa8, 0x92, 0x43, 0xcd, 0x5d, 0xcc, 0x9f, 0x2d, 0xf1, 0xe3, 0x72, - 0x8f, 0xd3, 0x19, 0xb2, 0x64, 0x6a, 0x68, 0x20, 0x41, 0x30, 0x0f, 0xa0, 0x95, 0xc2, 0x7e, 0xc7, - 0x56, 0x35, 0x64, 0x3b, 0xed, 0xa3, 0xc7, 0x8f, 0xd5, 0x2d, 0x9b, 0x71, 0x2d, 0x70, 0x6c, 0x93, - 0xed, 0x6c, 0x27, 0x83, 0x25, 0x43, 0x03, 0x23, 0x21, 0xd5, 0xac, 0x88, 0x82, 0x41, 0x1c, 0x40, - 0xc4, 0x86, 0x14, 0xd1, 0x51, 0xa4, 0x21, 0x08, 0xc8, 0x7c, 0x13, 0x3a, 0x09, 0xea, 0x4a, 0x11, - 0x90, 0xab, 0x24, 0x64, 0x14, 0x7c, 0x07, 0x36, 0x5c, 0xb6, 0xe4, 0x56, 0x5e, 0xba, 0x89, 0xd2, - 0x44, 0xf0, 0x4e, 0xb3, 0x1a, 0xdf, 0x86, 0x76, 0x92, 0x3d, 0x50, 0x76, 0x5d, 0xf6, 0x93, 0x31, - 0x15, 0xc5, 0xee, 0x40, 0x23, 0xae, 0x79, 0x5a, 0x28, 0x50, 0xa7, 0xb2, 0xd4, 0x89, 0xab, 0xa8, - 0x80, 0x85, 0x8b, 0x19, 0x57, 0x8b, 0xb4, 0x51, 0x06, 0xab, 0x28, 0x53, 0xd2, 0x51, 0xf6, 0x1e, - 0xb4, 0xa2, 0xa0, 0x94, 0x72, 0x1d, 0x94, 0x5b, 0x8f, 0x88, 0x28, 0xb4, 0x03, 0x5d, 0x3f, 0xf0, - 0x7c, 0x2f, 0x64, 0x81, 0x45, 0x6d, 0x3b, 0x60, 0x61, 0xd8, 0xeb, 0xca, 0xf5, 0x22, 0xfa, 0xa1, - 0x24, 0x1b, 0xef, 0x42, 0x3d, 0x2a, 0xe6, 0x36, 0xa0, 0x3a, 0x8c, 0x13, 0x48, 0xc5, 0x94, 0x13, - 0x01, 0x1b, 0x87, 0xbe, 0xaf, 0x9e, 0x24, 0xc4, 0xd0, 0xf8, 0x39, 0xd4, 0xd5, 0x07, 0x2b, 0x6c, - 0x54, 0xbf, 0x0f, 0xeb, 0x3e, 0x0d, 0xc4, 0x35, 0xd2, 0xed, 0x6a, 0xd4, 0x2e, 0x9c, 0xd0, 0x80, - 0x3f, 0x65, 0x3c, 0xd3, 0xb5, 0x36, 0x51, 0x5e, 0x92, 0x8c, 0x07, 0xd0, 0xca, 0xc8, 0x88, 0x63, - 0xa1, 0x1f, 0x45, 0x91, 0x86, 0x93, 0x78, 0xe7, 0x52, 0xb2, 0xb3, 0xf1, 0x10, 0xf4, 0xf8, 0xdb, - 0x88, 0xaa, 0x36, 0xba, 0xba, 0xa6, 0xcc, 0x2d, 0xa7, 0xd8, 0x89, 0x7b, 0x9f, 0xb2, 0x40, 0xc5, - 0x84, 0x9c, 0x18, 0xcf, 0x53, 0x99, 0x41, 0x26, 0x72, 0xb2, 0x0b, 0x75, 0x95, 0x19, 0x54, 0x54, - 0x46, 0x3d, 0xf7, 0x09, 0xa6, 0x86, 0xa8, 0xe7, 0x96, 0x89, 0x22, 0x59, 0xb6, 0x94, 0x5e, 0x76, - 0x06, 0x8d, 0x28, 0xfa, 0xb3, 0x49, 0x54, 0xae, 0xd8, 0xcd, 0x27, 0x51, 0xb5, 0x68, 0x22, 0x28, - 0xbc, 0x23, 0x74, 0x26, 0x2e, 0xb3, 0xad, 0x24, 0x84, 0x70, 0x8f, 0x86, 0xd9, 0x91, 0x8c, 0x8f, - 0xa2, 0x78, 0x31, 0xde, 0x81, 0x9a, 0x3c, 0x9b, 0xb0, 0x8f, 0x58, 0x39, 0x2a, 0xf4, 0xc5, 0xb8, - 0x10, 0x49, 0xfe, 0xa0, 0x41, 0x23, 0x4a, 0x9e, 0x85, 0x4a, 0x99, 0x43, 0x97, 0xbe, 0xea, 0xa1, - 0xff, 0xf7, 0x89, 0x67, 0x17, 0x88, 0xcc, 0x2f, 0x17, 0x1e, 0x77, 0xdc, 0x89, 0x25, 0x6d, 0x2d, - 0x73, 0x50, 0x17, 0x39, 0xa7, 0xc8, 0x38, 0x11, 0xf4, 0xfd, 0xcf, 0xaa, 0xd0, 0x39, 0x1c, 0x3e, - 0x3a, 0x3e, 0xf4, 0xfd, 0x99, 0x33, 0xa6, 0xd8, 0x3c, 0xec, 0x41, 0x05, 0xfb, 0xa7, 0x82, 0xf7, - 0xdf, 0x7e, 0x51, 0x23, 0x4f, 0xf6, 0xa1, 0x8a, 0x6d, 0x14, 0x29, 0x7a, 0x06, 0xee, 0x17, 0xf6, - 0xf3, 0x62, 0x13, 0xd9, 0x68, 0x5d, 0x7f, 0x0d, 0xee, 0x17, 0x35, 0xf5, 0xe4, 0x07, 0xa0, 0x27, - 0xfd, 0xcd, 0xaa, 0x37, 0xe1, 0xfe, 0xca, 0xf6, 0x5e, 0xe8, 0x27, 0xb5, 0xe0, 0xaa, 0xa7, 0xcd, - 0xfe, 0xca, 0x3e, 0x98, 0x1c, 0x40, 0x3d, 0xaa, 0xa0, 0x8b, 0x5f, 0x6d, 0xfb, 0x2b, 0x5a, 0x6f, - 0x61, 0x1e, 0xd9, 0xb2, 0x14, 0x3d, 0x2d, 0xf7, 0x0b, 0xdf, 0x07, 0xc8, 0x7d, 0xa8, 0xa9, 0xb2, - 0xa6, 0xf0, 0xe5, 0xb6, 0x5f, 0xdc, 0x40, 0x8b, 0x4b, 0x26, 0x4d, 0xdb, 0xaa, 0xe7, 0xef, 0xfe, - 0xca, 0x87, 0x0c, 0x72, 0x08, 0x90, 0xea, 0x3c, 0x56, 0xbe, 0x6b, 0xf7, 0x57, 0x3f, 0x50, 0x90, - 0x87, 0xd0, 0x48, 0x1e, 0x9d, 0x8a, 0x5f, 0xaa, 0xfb, 0xab, 0xde, 0x0c, 0x86, 0xaf, 0xff, 0xeb, - 0xcf, 0x9b, 0xda, 0x6f, 0xae, 0x36, 0xb5, 0x2f, 0xae, 0x36, 0xb5, 0x2f, 0xaf, 0x36, 0xb5, 0xdf, - 0x5f, 0x6d, 0x6a, 0x7f, 0xba, 0xda, 0xd4, 0x7e, 0xfb, 0x97, 0x4d, 0x6d, 0x54, 0x43, 0xf7, 0x7f, - 0xef, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x85, 0xdc, 0x74, 0x8d, 0x99, 0x19, 0x00, 0x00, +var fileDescriptor_types_a177e47fab90f91d = []byte{ + // 2203 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcf, 0x73, 0x1c, 0x47, + 0xf5, 0xd7, 0xec, 0xef, 0x79, 0xab, 0xfd, 0xe1, 0xb6, 0x6c, 0xaf, 0xf7, 0x9b, 0xaf, 0xe4, 0x1a, + 0x43, 0x22, 0x11, 0x67, 0x95, 0x28, 0x98, 0x92, 0xe3, 0x40, 0x95, 0x56, 0x36, 0x48, 0x95, 0x00, + 0x62, 0x6c, 0x8b, 0x0b, 0x55, 0x53, 0xbd, 0x3b, 0xad, 0xdd, 0x29, 0xed, 0xce, 0x4c, 0x66, 0x7a, + 0x95, 0x15, 0x47, 0xce, 0x39, 0xe4, 0xc0, 0x9f, 0xc0, 0x81, 0x3f, 0x21, 0x47, 0x4e, 0x54, 0x8e, + 0x1c, 0x38, 0x1b, 0x10, 0xc5, 0x01, 0xae, 0x14, 0x55, 0x1c, 0xa9, 0x7e, 0xdd, 0xf3, 0x53, 0xb3, + 0x26, 0x0e, 0x9c, 0xb8, 0x48, 0xd3, 0xfd, 0x3e, 0xaf, 0x7f, 0xbc, 0x7d, 0xef, 0x7d, 0xde, 0x6b, + 0xb8, 0x4d, 0x47, 0x63, 0x67, 0x97, 0x5f, 0xfa, 0x2c, 0x94, 0x7f, 0x07, 0x7e, 0xe0, 0x71, 0x8f, + 0x54, 0x71, 0xd0, 0x7f, 0x67, 0xe2, 0xf0, 0xe9, 0x62, 0x34, 0x18, 0x7b, 0xf3, 0xdd, 0x89, 0x37, + 0xf1, 0x76, 0x51, 0x3a, 0x5a, 0x9c, 0xe1, 0x08, 0x07, 0xf8, 0x25, 0xb5, 0xfa, 0x8f, 0x53, 0x70, + 0xce, 0x5c, 0x9b, 0x05, 0x73, 0xc7, 0xe5, 0xe9, 0xcf, 0x71, 0x70, 0xe9, 0x73, 0x6f, 0x77, 0xce, + 0x82, 0xf3, 0x19, 0x53, 0xff, 0x94, 0xf2, 0xfe, 0xbf, 0x55, 0x9e, 0x39, 0xa3, 0x70, 0x77, 0xec, + 0xcd, 0xe7, 0x9e, 0x9b, 0x3e, 0x6c, 0x7f, 0x6b, 0xe2, 0x79, 0x93, 0x19, 0x4b, 0x0e, 0xc7, 0x9d, + 0x39, 0x0b, 0x39, 0x9d, 0xfb, 0x12, 0x60, 0xfc, 0xb6, 0x02, 0x75, 0x93, 0x7d, 0xb2, 0x60, 0x21, + 0x27, 0xdb, 0x50, 0x61, 0xe3, 0xa9, 0xd7, 0x2b, 0xdd, 0xd3, 0xb6, 0x9b, 0x7b, 0x64, 0x20, 0x17, + 0x52, 0xd2, 0xa7, 0xe3, 0xa9, 0x77, 0xb4, 0x66, 0x22, 0x82, 0xbc, 0x0d, 0xd5, 0xb3, 0xd9, 0x22, + 0x9c, 0xf6, 0xca, 0x08, 0xbd, 0x99, 0x85, 0x7e, 0x5f, 0x88, 0x8e, 0xd6, 0x4c, 0x89, 0x11, 0xcb, + 0x3a, 0xee, 0x99, 0xd7, 0xab, 0x14, 0x2d, 0x7b, 0xec, 0x9e, 0xe1, 0xb2, 0x02, 0x41, 0xf6, 0x01, + 0x42, 0xc6, 0x2d, 0xcf, 0xe7, 0x8e, 0xe7, 0xf6, 0xaa, 0x88, 0xbf, 0x93, 0xc5, 0x3f, 0x63, 0xfc, + 0xc7, 0x28, 0x3e, 0x5a, 0x33, 0xf5, 0x30, 0x1a, 0x08, 0x4d, 0xc7, 0x75, 0xb8, 0x35, 0x9e, 0x52, + 0xc7, 0xed, 0xd5, 0x8a, 0x34, 0x8f, 0x5d, 0x87, 0x1f, 0x0a, 0xb1, 0xd0, 0x74, 0xa2, 0x81, 0xb8, + 0xca, 0x27, 0x0b, 0x16, 0x5c, 0xf6, 0xea, 0x45, 0x57, 0xf9, 0x89, 0x10, 0x89, 0xab, 0x20, 0x86, + 0x3c, 0x86, 0xe6, 0x88, 0x4d, 0x1c, 0xd7, 0x1a, 0xcd, 0xbc, 0xf1, 0x79, 0xaf, 0x81, 0x2a, 0xbd, + 0xac, 0xca, 0x50, 0x00, 0x86, 0x42, 0x7e, 0xb4, 0x66, 0xc2, 0x28, 0x1e, 0x91, 0x3d, 0x68, 0x8c, + 0xa7, 0x6c, 0x7c, 0x6e, 0xf1, 0x65, 0x4f, 0x47, 0xcd, 0x5b, 0x59, 0xcd, 0x43, 0x21, 0x7d, 0xbe, + 0x3c, 0x5a, 0x33, 0xeb, 0x63, 0xf9, 0x49, 0x1e, 0x82, 0xce, 0x5c, 0x5b, 0x6d, 0xd7, 0x44, 0xa5, + 0xdb, 0xb9, 0xdf, 0xc5, 0xb5, 0xa3, 0xcd, 0x1a, 0x4c, 0x7d, 0x93, 0x01, 0xd4, 0x84, 0x33, 0x38, + 0xbc, 0xb7, 0x8e, 0x3a, 0x1b, 0xb9, 0x8d, 0x50, 0x76, 0xb4, 0x66, 0x2a, 0x94, 0x30, 0x9f, 0xcd, + 0x66, 0xce, 0x05, 0x0b, 0xc4, 0xe1, 0x6e, 0x16, 0x99, 0xef, 0x89, 0x94, 0xe3, 0xf1, 0x74, 0x3b, + 0x1a, 0x0c, 0xeb, 0x50, 0xbd, 0xa0, 0xb3, 0x05, 0x33, 0xde, 0x82, 0x66, 0xca, 0x53, 0x48, 0x0f, + 0xea, 0x73, 0x16, 0x86, 0x74, 0xc2, 0x7a, 0xda, 0x3d, 0x6d, 0x5b, 0x37, 0xa3, 0xa1, 0xd1, 0x86, + 0xf5, 0xb4, 0x9f, 0x18, 0xf3, 0x58, 0x51, 0xf8, 0x82, 0x50, 0xbc, 0x60, 0x41, 0x28, 0x1c, 0x40, + 0x29, 0xaa, 0x21, 0xb9, 0x0f, 0x2d, 0xb4, 0x83, 0x15, 0xc9, 0x85, 0x9f, 0x56, 0xcc, 0x75, 0x9c, + 0x3c, 0x55, 0xa0, 0x2d, 0x68, 0xfa, 0x7b, 0x7e, 0x0c, 0x29, 0x23, 0x04, 0xfc, 0x3d, 0x5f, 0x01, + 0x8c, 0x0f, 0xa0, 0x9b, 0x77, 0x25, 0xd2, 0x85, 0xf2, 0x39, 0xbb, 0x54, 0xfb, 0x89, 0x4f, 0xb2, + 0xa1, 0xae, 0x85, 0x7b, 0xe8, 0xa6, 0xba, 0xe3, 0xe7, 0xa5, 0x58, 0x39, 0xf6, 0x26, 0xb2, 0x0f, + 0x15, 0x11, 0x54, 0xa8, 0xdd, 0xdc, 0xeb, 0x0f, 0x64, 0xc4, 0x0d, 0xa2, 0x88, 0x1b, 0x3c, 0x8f, + 0x22, 0x6e, 0xd8, 0xf8, 0xf2, 0xe5, 0xd6, 0xda, 0xe7, 0x7f, 0xd8, 0xd2, 0x4c, 0xd4, 0x20, 0x77, + 0x85, 0x43, 0x50, 0xc7, 0xb5, 0x1c, 0x5b, 0xed, 0x53, 0xc7, 0xf1, 0xb1, 0x4d, 0x0e, 0xa0, 0x3b, + 0xf6, 0xdc, 0x90, 0xb9, 0xe1, 0x22, 0xb4, 0x7c, 0x1a, 0xd0, 0x79, 0xa8, 0x62, 0x2d, 0xfa, 0xf9, + 0x0f, 0x23, 0xf1, 0x09, 0x4a, 0xcd, 0xce, 0x38, 0x3b, 0x41, 0x3e, 0x04, 0xb8, 0xa0, 0x33, 0xc7, + 0xa6, 0xdc, 0x0b, 0xc2, 0x5e, 0xe5, 0x5e, 0x39, 0xa5, 0x7c, 0x1a, 0x09, 0x5e, 0xf8, 0x36, 0xe5, + 0x6c, 0x58, 0x11, 0x27, 0x33, 0x53, 0x78, 0xf2, 0x26, 0x74, 0xa8, 0xef, 0x5b, 0x21, 0xa7, 0x9c, + 0x59, 0xa3, 0x4b, 0xce, 0x42, 0x8c, 0xc7, 0x75, 0xb3, 0x45, 0x7d, 0xff, 0x99, 0x98, 0x1d, 0x8a, + 0x49, 0xc3, 0x8e, 0x7f, 0x4d, 0x0c, 0x15, 0x42, 0xa0, 0x62, 0x53, 0x4e, 0xd1, 0x1a, 0xeb, 0x26, + 0x7e, 0x8b, 0x39, 0x9f, 0xf2, 0xa9, 0xba, 0x23, 0x7e, 0x93, 0xdb, 0x50, 0x9b, 0x32, 0x67, 0x32, + 0xe5, 0x78, 0xad, 0xb2, 0xa9, 0x46, 0xc2, 0xf0, 0x7e, 0xe0, 0x5d, 0x30, 0xcc, 0x16, 0x0d, 0x53, + 0x0e, 0x8c, 0xbf, 0x68, 0x70, 0xe3, 0x5a, 0x78, 0x89, 0x75, 0xa7, 0x34, 0x9c, 0x46, 0x7b, 0x89, + 0x6f, 0xf2, 0xb6, 0x58, 0x97, 0xda, 0x2c, 0x50, 0x59, 0xac, 0xa5, 0x6e, 0x7c, 0x84, 0x93, 0xea, + 0xa2, 0x0a, 0x42, 0x9e, 0x42, 0x77, 0x46, 0x43, 0x6e, 0xc9, 0x28, 0xb0, 0x30, 0x4b, 0x95, 0x33, + 0x91, 0xf9, 0x31, 0x8d, 0xa2, 0x45, 0x38, 0xa7, 0x52, 0x6f, 0xcf, 0x32, 0xb3, 0xe4, 0x08, 0x36, + 0x46, 0x97, 0x3f, 0xa7, 0x2e, 0x77, 0x5c, 0x66, 0x5d, 0xb3, 0x79, 0x47, 0x2d, 0xf5, 0xf4, 0xc2, + 0xb1, 0x99, 0x3b, 0x8e, 0x8c, 0x7d, 0x33, 0x56, 0x89, 0x7f, 0x8c, 0xd0, 0xb8, 0x07, 0xed, 0x6c, + 0x2e, 0x20, 0x6d, 0x28, 0xf1, 0xa5, 0xba, 0x61, 0x89, 0x2f, 0x0d, 0x23, 0xf6, 0xc0, 0x38, 0x20, + 0xaf, 0x61, 0x76, 0xa0, 0x93, 0x4b, 0x0e, 0x29, 0x73, 0x6b, 0x69, 0x73, 0x1b, 0x1d, 0x68, 0x65, + 0x72, 0x82, 0xf1, 0x59, 0x15, 0x1a, 0x26, 0x0b, 0x7d, 0xe1, 0x4c, 0x64, 0x1f, 0x74, 0xb6, 0x1c, + 0x33, 0x99, 0x8e, 0xb5, 0x5c, 0xb2, 0x93, 0x98, 0xa7, 0x91, 0x5c, 0xa4, 0x85, 0x18, 0x4c, 0x76, + 0x32, 0x54, 0x72, 0x33, 0xaf, 0x94, 0xe6, 0x92, 0x07, 0x59, 0x2e, 0xd9, 0xc8, 0x61, 0x73, 0x64, + 0xb2, 0x93, 0x21, 0x93, 0xfc, 0xc2, 0x19, 0x36, 0x79, 0x54, 0xc0, 0x26, 0xf9, 0xe3, 0xaf, 0xa0, + 0x93, 0x47, 0x05, 0x74, 0xd2, 0xbb, 0xb6, 0x57, 0x21, 0x9f, 0x3c, 0xc8, 0xf2, 0x49, 0xfe, 0x3a, + 0x39, 0x42, 0xf9, 0xb0, 0x88, 0x50, 0xee, 0xe6, 0x74, 0x56, 0x32, 0xca, 0xfb, 0xd7, 0x18, 0xe5, + 0x76, 0x4e, 0xb5, 0x80, 0x52, 0x1e, 0x65, 0x72, 0x3d, 0x14, 0xde, 0xad, 0x38, 0xd9, 0x93, 0xef, + 0x5c, 0x67, 0xa3, 0x3b, 0xf9, 0x9f, 0xb6, 0x88, 0x8e, 0x76, 0x73, 0x74, 0x74, 0x2b, 0x7f, 0xca, + 0x1c, 0x1f, 0x25, 0xac, 0xb2, 0x23, 0xe2, 0x3e, 0xe7, 0x69, 0x22, 0x47, 0xb0, 0x20, 0xf0, 0x02, + 0x95, 0xb0, 0xe5, 0xc0, 0xd8, 0x16, 0x99, 0x28, 0xf1, 0xaf, 0x57, 0x30, 0x10, 0x3a, 0x7d, 0xca, + 0xbb, 0x8c, 0x2f, 0xb4, 0x44, 0x17, 0x23, 0x3a, 0x9d, 0xc5, 0x74, 0x95, 0xc5, 0x52, 0xc4, 0x54, + 0xca, 0x12, 0xd3, 0x16, 0x34, 0x45, 0xae, 0xcc, 0x71, 0x0e, 0xf5, 0x23, 0xce, 0x21, 0xdf, 0x82, + 0x1b, 0x98, 0x67, 0x24, 0x7d, 0xa9, 0x40, 0xac, 0x60, 0x20, 0x76, 0x84, 0x40, 0x5a, 0x4c, 0x26, + 0xc0, 0x77, 0xe0, 0x66, 0x0a, 0x2b, 0xd6, 0xc5, 0x1c, 0x27, 0x93, 0x6f, 0x37, 0x46, 0x1f, 0xf8, + 0xfe, 0x11, 0x0d, 0xa7, 0xc6, 0x0f, 0x13, 0x03, 0x25, 0x7c, 0x46, 0xa0, 0x32, 0xf6, 0x6c, 0x79, + 0xef, 0x96, 0x89, 0xdf, 0x82, 0xe3, 0x66, 0xde, 0x04, 0x0f, 0xa7, 0x9b, 0xe2, 0x53, 0xa0, 0xe2, + 0x50, 0xd2, 0x65, 0xcc, 0x18, 0xbf, 0xd4, 0x92, 0xf5, 0x12, 0x8a, 0x2b, 0x62, 0x23, 0xed, 0x3f, + 0x61, 0xa3, 0xd2, 0xeb, 0xb1, 0x91, 0x71, 0xa5, 0x25, 0x3f, 0x59, 0xcc, 0x33, 0x5f, 0xef, 0x8a, + 0xc2, 0x7b, 0x1c, 0xd7, 0x66, 0x4b, 0x34, 0x69, 0xd9, 0x94, 0x83, 0xa8, 0x04, 0xa8, 0xa1, 0x99, + 0xb3, 0x25, 0x40, 0x1d, 0xe7, 0xe4, 0x80, 0xdc, 0x47, 0x7e, 0xf2, 0xce, 0x54, 0xa8, 0xb6, 0x06, + 0xaa, 0x50, 0x3f, 0x11, 0x93, 0xa6, 0x94, 0xa5, 0xb2, 0xad, 0x9e, 0x21, 0xb7, 0x37, 0x40, 0x17, + 0x07, 0x0d, 0x7d, 0x3a, 0x66, 0x18, 0x79, 0xba, 0x99, 0x4c, 0x18, 0x27, 0x40, 0xae, 0x47, 0x3c, + 0xf9, 0x00, 0x2a, 0x9c, 0x4e, 0x84, 0xbd, 0x85, 0xc9, 0xda, 0x03, 0x59, 0xe4, 0x0f, 0x3e, 0x3a, + 0x3d, 0xa1, 0x4e, 0x30, 0xbc, 0x2d, 0x4c, 0xf5, 0xb7, 0x97, 0x5b, 0x6d, 0x81, 0x79, 0xe0, 0xcd, + 0x1d, 0xce, 0xe6, 0x3e, 0xbf, 0x34, 0x51, 0xc7, 0xf8, 0xbb, 0x26, 0x98, 0x20, 0x93, 0x09, 0x0a, + 0x0d, 0x17, 0xb9, 0x7b, 0x29, 0x45, 0xda, 0x5f, 0xcd, 0x98, 0xff, 0x0f, 0x30, 0xa1, 0xa1, 0xf5, + 0x29, 0x75, 0x39, 0xb3, 0x95, 0x45, 0xf5, 0x09, 0x0d, 0x7f, 0x8a, 0x13, 0xa2, 0xc2, 0x11, 0xe2, + 0x45, 0xc8, 0x6c, 0x34, 0x6d, 0xd9, 0xac, 0x4f, 0x68, 0xf8, 0x22, 0x64, 0x76, 0x7c, 0xaf, 0xfa, + 0xeb, 0xdf, 0x2b, 0x6b, 0xc7, 0x46, 0xde, 0x8e, 0xff, 0x48, 0xf9, 0x70, 0x42, 0x92, 0xff, 0xfb, + 0xf7, 0xfe, 0xab, 0x26, 0x6a, 0x83, 0x6c, 0x1a, 0x26, 0xc7, 0x70, 0x23, 0x8e, 0x23, 0x6b, 0x81, + 0xf1, 0x15, 0xf9, 0xd2, 0xab, 0xc3, 0xaf, 0x7b, 0x91, 0x9d, 0x0e, 0xc9, 0x8f, 0xe0, 0x4e, 0x2e, + 0x0b, 0xc4, 0x0b, 0x96, 0x5e, 0x99, 0x0c, 0x6e, 0x65, 0x93, 0x41, 0xb4, 0x5e, 0x64, 0x89, 0xf2, + 0xd7, 0xf0, 0xec, 0x6f, 0x88, 0x42, 0x29, 0x4d, 0x1e, 0x45, 0xbf, 0xa5, 0xf1, 0x2b, 0x0d, 0x3a, + 0xb9, 0xc3, 0x90, 0x6d, 0xa8, 0x4a, 0xfe, 0xd2, 0x32, 0xed, 0x28, 0x5a, 0x4b, 0x9d, 0x57, 0x02, + 0xc8, 0x7b, 0xd0, 0x60, 0xaa, 0x66, 0x53, 0x17, 0xbc, 0x95, 0x2b, 0xe5, 0x14, 0x3e, 0x86, 0x91, + 0x6f, 0x83, 0x1e, 0x9b, 0x2d, 0x57, 0xaf, 0xc7, 0x56, 0x56, 0x4a, 0x09, 0xd0, 0x38, 0x84, 0x66, + 0x6a, 0x7b, 0xf2, 0x7f, 0xa0, 0xcf, 0xe9, 0x52, 0x15, 0xdd, 0xb2, 0x5c, 0x6b, 0xcc, 0xe9, 0x12, + 0xeb, 0x6d, 0x72, 0x07, 0xea, 0x42, 0x38, 0xa1, 0xd2, 0xe8, 0x65, 0xb3, 0x36, 0xa7, 0xcb, 0x1f, + 0xd0, 0xd0, 0xd8, 0x81, 0x76, 0xf6, 0x58, 0x11, 0x34, 0x22, 0x40, 0x09, 0x3d, 0x98, 0x30, 0xe3, + 0x21, 0x74, 0x72, 0xa7, 0x21, 0x06, 0xb4, 0xfc, 0xc5, 0xc8, 0x3a, 0x67, 0x97, 0x16, 0x1e, 0x17, + 0x5d, 0x44, 0x37, 0x9b, 0xfe, 0x62, 0xf4, 0x11, 0xbb, 0x7c, 0x2e, 0xa6, 0x8c, 0x67, 0xd0, 0xce, + 0x96, 0xc3, 0x22, 0x45, 0x06, 0xde, 0xc2, 0xb5, 0x71, 0xfd, 0xaa, 0x29, 0x07, 0xa2, 0xa3, 0xbe, + 0xf0, 0xa4, 0x57, 0xa4, 0xeb, 0xdf, 0x53, 0x8f, 0xb3, 0x54, 0x11, 0x2d, 0x31, 0xc6, 0x2f, 0xaa, + 0x50, 0x93, 0xb5, 0x39, 0x19, 0x64, 0x3b, 0x3f, 0xe1, 0x12, 0x4a, 0x53, 0xce, 0x2a, 0xc5, 0x98, + 0x76, 0xdf, 0xcc, 0xb7, 0x4f, 0xc3, 0xe6, 0xd5, 0xcb, 0xad, 0x3a, 0x52, 0xd6, 0xf1, 0x93, 0xa4, + 0x97, 0x5a, 0xd5, 0x6a, 0x44, 0x8d, 0x5b, 0xe5, 0xb5, 0x1b, 0xb7, 0x3b, 0x50, 0x77, 0x17, 0x73, + 0x8b, 0x2f, 0x43, 0x15, 0xfa, 0x35, 0x77, 0x31, 0x7f, 0xbe, 0xc4, 0x9f, 0x8e, 0x7b, 0x9c, 0xce, + 0x50, 0x24, 0x03, 0xbf, 0x81, 0x13, 0x42, 0xb8, 0x0f, 0xad, 0x14, 0xb3, 0x3b, 0xb6, 0xaa, 0x10, + 0xdb, 0x69, 0x0f, 0x3c, 0x7e, 0xa2, 0x6e, 0xd9, 0x8c, 0x99, 0xfe, 0xd8, 0x26, 0xdb, 0xd9, 0x3e, + 0x05, 0x0b, 0x82, 0x06, 0xfa, 0x79, 0xaa, 0x15, 0x11, 0xe5, 0x80, 0x38, 0x80, 0xf0, 0x7c, 0x09, + 0xd1, 0x11, 0xd2, 0x10, 0x13, 0x28, 0x7c, 0x0b, 0x3a, 0x09, 0xa7, 0x4a, 0x08, 0xc8, 0x55, 0x92, + 0x69, 0x04, 0xbe, 0x0b, 0x1b, 0x2e, 0x5b, 0x72, 0x2b, 0x8f, 0x6e, 0x22, 0x9a, 0x08, 0xd9, 0x69, + 0x56, 0xe3, 0x9b, 0xd0, 0x4e, 0x72, 0x03, 0x62, 0xd7, 0x65, 0xb7, 0x18, 0xcf, 0x22, 0xec, 0x2e, + 0x34, 0xe2, 0x8a, 0xa6, 0x85, 0x80, 0x3a, 0x95, 0x85, 0x4c, 0x5c, 0x23, 0x05, 0x2c, 0x5c, 0xcc, + 0xb8, 0x5a, 0xa4, 0x8d, 0x18, 0xac, 0x91, 0x4c, 0x39, 0x8f, 0xd8, 0xfb, 0xd0, 0x8a, 0x42, 0x4e, + 0xe2, 0x3a, 0x88, 0x5b, 0x8f, 0x26, 0x11, 0xb4, 0x03, 0x5d, 0x3f, 0xf0, 0x7c, 0x2f, 0x64, 0x81, + 0x45, 0x6d, 0x3b, 0x60, 0x61, 0xd8, 0xeb, 0xca, 0xf5, 0xa2, 0xf9, 0x03, 0x39, 0x6d, 0xbc, 0x07, + 0xf5, 0xa8, 0x54, 0xdb, 0x80, 0xea, 0x30, 0x4e, 0x0f, 0x15, 0x53, 0x0e, 0x04, 0x29, 0x1c, 0xf8, + 0xbe, 0x7a, 0x70, 0x10, 0x9f, 0xc6, 0xcf, 0xa0, 0xae, 0x7e, 0xb0, 0xc2, 0x36, 0xf4, 0xbb, 0xb0, + 0xee, 0xd3, 0x40, 0x5c, 0x23, 0xdd, 0x8c, 0x46, 0xcd, 0xc0, 0x09, 0x0d, 0xf8, 0x33, 0xc6, 0x33, + 0x3d, 0x69, 0x13, 0xf1, 0x72, 0xca, 0x78, 0x04, 0xad, 0x0c, 0x46, 0x1c, 0x0b, 0xfd, 0x28, 0x8a, + 0x34, 0x1c, 0xc4, 0x3b, 0x97, 0x92, 0x9d, 0x8d, 0xc7, 0xa0, 0xc7, 0xbf, 0x8d, 0xa8, 0x59, 0xa3, + 0xab, 0x6b, 0xca, 0xdc, 0x72, 0x88, 0x7d, 0xb6, 0xf7, 0x29, 0x0b, 0x54, 0x4c, 0xc8, 0x81, 0xf1, + 0x22, 0x95, 0x19, 0x64, 0x9a, 0x26, 0x0f, 0xa0, 0xae, 0x32, 0x83, 0x8a, 0xca, 0xa8, 0xa3, 0x3e, + 0xc1, 0xd4, 0x10, 0x75, 0xd4, 0x32, 0x51, 0x24, 0xcb, 0x96, 0xd2, 0xcb, 0xce, 0xa0, 0x11, 0x45, + 0x7f, 0x36, 0x45, 0xca, 0x15, 0xbb, 0xf9, 0x14, 0xa9, 0x16, 0x4d, 0x80, 0xc2, 0x3b, 0x42, 0x67, + 0xe2, 0x32, 0xdb, 0x4a, 0x42, 0x08, 0xf7, 0x68, 0x98, 0x1d, 0x29, 0xf8, 0x38, 0x8a, 0x17, 0xe3, + 0x5d, 0xa8, 0xc9, 0xb3, 0x09, 0xfb, 0x88, 0x95, 0xa3, 0x32, 0x5e, 0x7c, 0x17, 0xf2, 0xc4, 0xef, + 0x35, 0x68, 0x44, 0xc9, 0xb3, 0x50, 0x29, 0x73, 0xe8, 0xd2, 0x57, 0x3d, 0xf4, 0x7f, 0x3f, 0xf1, + 0x3c, 0x00, 0x22, 0xf3, 0xcb, 0x85, 0xc7, 0x1d, 0x77, 0x62, 0x49, 0x5b, 0xcb, 0x1c, 0xd4, 0x45, + 0xc9, 0x29, 0x0a, 0x4e, 0xc4, 0xfc, 0xde, 0x67, 0x55, 0xe8, 0x1c, 0x0c, 0x0f, 0x8f, 0x0f, 0x7c, + 0x7f, 0xe6, 0x8c, 0x29, 0xb6, 0x06, 0xbb, 0x50, 0xc1, 0xee, 0xa8, 0xe0, 0x75, 0xb7, 0x5f, 0xd4, + 0xa6, 0x93, 0x3d, 0xa8, 0x62, 0x93, 0x44, 0x8a, 0x1e, 0x79, 0xfb, 0x85, 0xdd, 0xba, 0xd8, 0x44, + 0xb6, 0x51, 0xd7, 0xdf, 0x7a, 0xfb, 0x45, 0x2d, 0x3b, 0xf9, 0x1e, 0xe8, 0x49, 0xf7, 0xb2, 0xea, + 0xc5, 0xb7, 0xbf, 0xb2, 0x79, 0x17, 0xfa, 0x49, 0xa5, 0xb7, 0xea, 0xe1, 0xb2, 0xbf, 0xb2, 0xcb, + 0x25, 0xfb, 0x50, 0x8f, 0xea, 0xe3, 0xe2, 0x37, 0xd9, 0xfe, 0x8a, 0xc6, 0x5a, 0x98, 0x47, 0x36, + 0x24, 0x45, 0x0f, 0xc7, 0xfd, 0xc2, 0xee, 0x9f, 0x3c, 0x84, 0x9a, 0x2a, 0x5a, 0x0a, 0xdf, 0x65, + 0xfb, 0xc5, 0xed, 0xb1, 0xb8, 0x64, 0xd2, 0x92, 0xad, 0x7a, 0xdc, 0xee, 0xaf, 0x7c, 0xa6, 0x20, + 0x07, 0x00, 0xa9, 0xbe, 0x62, 0xe5, 0xab, 0x75, 0x7f, 0xf5, 0xf3, 0x03, 0x79, 0x0c, 0x8d, 0xe4, + 0x49, 0xa9, 0xf8, 0x1d, 0xba, 0xbf, 0xea, 0x45, 0x60, 0xf8, 0xc6, 0x3f, 0xff, 0xb4, 0xa9, 0xfd, + 0xfa, 0x6a, 0x53, 0xfb, 0xe2, 0x6a, 0x53, 0xfb, 0xf2, 0x6a, 0x53, 0xfb, 0xdd, 0xd5, 0xa6, 0xf6, + 0xc7, 0xab, 0x4d, 0xed, 0x37, 0x7f, 0xde, 0xd4, 0x46, 0x35, 0x74, 0xff, 0xf7, 0xff, 0x15, 0x00, + 0x00, 0xff, 0xff, 0x38, 0x2d, 0x52, 0x86, 0x77, 0x19, 0x00, 0x00, } diff --git a/abci/types/types.proto b/abci/types/types.proto index 86f5bbc5..8eeecb39 100644 --- a/abci/types/types.proto +++ b/abci/types/types.proto @@ -218,8 +218,6 @@ message BlockParams { int64 max_bytes = 1; // Note: must be greater or equal to -1 int64 max_gas = 2; - // Note: must be greater than 0 - int64 time_iota_ms = 3; } // EvidenceParams contains limits on the evidence. diff --git a/consensus/replay.go b/consensus/replay.go index 0d75561b..6656da62 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -324,7 +324,12 @@ func (h *Handshaker) ReplayBlocks( } if res.ConsensusParams != nil { - state.ConsensusParams = types.PB2TM.ConsensusParams(res.ConsensusParams) + // Preserve TimeIotaMs since it's not exposed to the application. + timeIotaMs := state.ConsensusParams.Block.TimeIotaMs + { + state.ConsensusParams = types.PB2TM.ConsensusParams(res.ConsensusParams) + } + state.ConsensusParams.Block.TimeIotaMs = timeIotaMs } sm.SaveState(h.stateDB, state) } diff --git a/docs/spec/abci/abci.md b/docs/spec/abci/abci.md index 6f624a56..c696c938 100644 --- a/docs/spec/abci/abci.md +++ b/docs/spec/abci/abci.md @@ -456,8 +456,6 @@ Commit are included in the header of the next block. - NOTE: blocks that violate this may be committed if there are Byzantine proposers. It's the application's responsibility to handle this when processing a block! - - `TimeIotaMs (int64)`: Minimum time increment between consecutive blocks (in milliseconds). - ### EvidenceParams diff --git a/state/state_test.go b/state/state_test.go index ff8eed02..eddbe255 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -992,12 +992,11 @@ func TestApplyUpdates(t *testing.T) { 2: {initParams, abci.ConsensusParams{ Block: &abci.BlockParams{ - MaxBytes: 44, - MaxGas: 55, - TimeIotaMs: 66, + MaxBytes: 44, + MaxGas: 55, }, }, - makeParams(44, 55, 66, 4)}, + makeParams(44, 55, 3, 4)}, 3: {initParams, abci.ConsensusParams{ Evidence: &abci.EvidenceParams{ diff --git a/types/params.go b/types/params.go index ce9a6bc6..162aaead 100644 --- a/types/params.go +++ b/types/params.go @@ -36,6 +36,7 @@ type BlockParams struct { MaxBytes int64 `json:"max_bytes"` MaxGas int64 `json:"max_gas"` // Minimum time increment between consecutive blocks (in milliseconds) + // Not exposed to the application. TimeIotaMs int64 `json:"time_iota_ms"` } @@ -169,7 +170,6 @@ func (params ConsensusParams) Update(params2 *abci.ConsensusParams) ConsensusPar if params2.Block != nil { res.Block.MaxBytes = params2.Block.MaxBytes res.Block.MaxGas = params2.Block.MaxGas - res.Block.TimeIotaMs = params2.Block.TimeIotaMs } if params2.Evidence != nil { res.Evidence.MaxAge = params2.Evidence.MaxAge diff --git a/types/params_test.go b/types/params_test.go index ade7c89f..1f2a3512 100644 --- a/types/params_test.go +++ b/types/params_test.go @@ -111,18 +111,17 @@ func TestConsensusParamsUpdate(t *testing.T) { makeParams(1, 2, 10, 3, valEd25519), &abci.ConsensusParams{ Block: &abci.BlockParams{ - MaxBytes: 100, - MaxGas: 200, - TimeIotaMs: 300, + MaxBytes: 100, + MaxGas: 200, }, Evidence: &abci.EvidenceParams{ - MaxAge: 400, + MaxAge: 300, }, Validator: &abci.ValidatorParams{ PubKeyTypes: valSecp256k1, }, }, - makeParams(100, 200, 300, 400, valSecp256k1), + makeParams(100, 200, 10, 300, valSecp256k1), }, } for _, tc := range testCases { diff --git a/types/protobuf.go b/types/protobuf.go index 81b13874..8cad4608 100644 --- a/types/protobuf.go +++ b/types/protobuf.go @@ -126,9 +126,8 @@ func (tm2pb) ValidatorUpdates(vals *ValidatorSet) []abci.ValidatorUpdate { func (tm2pb) ConsensusParams(params *ConsensusParams) *abci.ConsensusParams { return &abci.ConsensusParams{ Block: &abci.BlockParams{ - MaxBytes: params.Block.MaxBytes, - MaxGas: params.Block.MaxGas, - TimeIotaMs: params.Block.TimeIotaMs, + MaxBytes: params.Block.MaxBytes, + MaxGas: params.Block.MaxGas, }, Evidence: &abci.EvidenceParams{ MaxAge: params.Evidence.MaxAge, @@ -223,14 +222,17 @@ func (pb2tm) ValidatorUpdates(vals []abci.ValidatorUpdate) ([]*Validator, error) } func (pb2tm) ConsensusParams(csp *abci.ConsensusParams) ConsensusParams { - params := ConsensusParams{} + params := ConsensusParams{ + Block: BlockParams{}, + Evidence: EvidenceParams{}, + Validator: ValidatorParams{}, + } // we must defensively consider any structs may be nil if csp.Block != nil { params.Block = BlockParams{ - MaxBytes: csp.Block.MaxBytes, - MaxGas: csp.Block.MaxGas, - TimeIotaMs: csp.Block.TimeIotaMs, + MaxBytes: csp.Block.MaxBytes, + MaxGas: csp.Block.MaxGas, } } diff --git a/types/protobuf_test.go b/types/protobuf_test.go index 40859d9e..2e29a502 100644 --- a/types/protobuf_test.go +++ b/types/protobuf_test.go @@ -65,6 +65,8 @@ func TestABCIConsensusParams(t *testing.T) { cp := DefaultConsensusParams() abciCP := TM2PB.ConsensusParams(cp) cp2 := PB2TM.ConsensusParams(abciCP) + // TimeIotaMs is not exposed to the application. + cp2.Block.TimeIotaMs = cp.Block.TimeIotaMs assert.Equal(t, *cp, cp2) } From d741c7b4785c36cef4b0912900b6193db21d00e6 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 11 Mar 2019 22:45:58 +0400 Subject: [PATCH 14/41] limit number of /subscribe clients and queries per client (#3269) * limit number of /subscribe clients and queries per client Add the following config variables (under [rpc] section): * max_subscription_clients * max_subscriptions_per_client * timeout_broadcast_tx_commit Fixes #2826 new HTTPClient interface for subscriptions finalize HTTPClient events interface remove EventSubscriber fix data race ``` WARNING: DATA RACE Read at 0x00c000a36060 by goroutine 129: github.com/tendermint/tendermint/rpc/client.(*Local).Subscribe.func1() /go/src/github.com/tendermint/tendermint/rpc/client/localclient.go:168 +0x1f0 Previous write at 0x00c000a36060 by goroutine 132: github.com/tendermint/tendermint/rpc/client.(*Local).Subscribe() /go/src/github.com/tendermint/tendermint/rpc/client/localclient.go:191 +0x4e0 github.com/tendermint/tendermint/rpc/client.WaitForOneEvent() /go/src/github.com/tendermint/tendermint/rpc/client/helpers.go:64 +0x178 github.com/tendermint/tendermint/rpc/client_test.TestTxEventsSentWithBroadcastTxSync.func1() /go/src/github.com/tendermint/tendermint/rpc/client/event_test.go:139 +0x298 testing.tRunner() /usr/local/go/src/testing/testing.go:827 +0x162 Goroutine 129 (running) created at: github.com/tendermint/tendermint/rpc/client.(*Local).Subscribe() /go/src/github.com/tendermint/tendermint/rpc/client/localclient.go:164 +0x4b7 github.com/tendermint/tendermint/rpc/client.WaitForOneEvent() /go/src/github.com/tendermint/tendermint/rpc/client/helpers.go:64 +0x178 github.com/tendermint/tendermint/rpc/client_test.TestTxEventsSentWithBroadcastTxSync.func1() /go/src/github.com/tendermint/tendermint/rpc/client/event_test.go:139 +0x298 testing.tRunner() /usr/local/go/src/testing/testing.go:827 +0x162 Goroutine 132 (running) created at: testing.(*T).Run() /usr/local/go/src/testing/testing.go:878 +0x659 github.com/tendermint/tendermint/rpc/client_test.TestTxEventsSentWithBroadcastTxSync() /go/src/github.com/tendermint/tendermint/rpc/client/event_test.go:119 +0x186 testing.tRunner() /usr/local/go/src/testing/testing.go:827 +0x162 ================== ``` lite client works (tested manually) godoc comments httpclient: do not close the out channel use TimeoutBroadcastTxCommit no timeout for unsubscribe but 1s Local (5s HTTP) timeout for resubscribe format code change Subscribe#out cap to 1 and replace config vars with RPCConfig TimeoutBroadcastTxCommit can't be greater than rpcserver.WriteTimeout rpc: Context as first parameter to all functions reformat code fixes after my own review fixes after Ethan's review add test stubs fix config.toml * fixes after manual testing - rpc: do not recommend to use BroadcastTxCommit because it's slow and wastes Tendermint resources (pubsub) - rpc: better error in Subscribe and BroadcastTxCommit - HTTPClient: do not resubscribe if err = ErrAlreadySubscribed * fixes after Ismail's review * Update rpc/grpc/grpc_test.go Co-Authored-By: melekes --- CHANGELOG_PENDING.md | 4 +- config/config.go | 30 ++++ config/toml.go | 13 ++ docs/tendermint-core/configuration.md | 13 ++ libs/pubsub/pubsub.go | 14 ++ libs/pubsub/pubsub_test.go | 4 + lite/proxy/proxy.go | 16 ++- lite/proxy/wrapper.go | 53 ++++++++ node/node.go | 13 +- rpc/client/event_test.go | 6 + rpc/client/helpers.go | 8 +- rpc/client/httpclient.go | 155 +++++++++++---------- rpc/client/interface.go | 15 +- rpc/client/localclient.go | 188 +++++++++++++++++++------- rpc/client/mock/client.go | 29 ++-- rpc/core/abci.go | 5 +- rpc/core/blocks.go | 9 +- rpc/core/consensus.go | 9 +- rpc/core/dev.go | 13 +- rpc/core/events.go | 61 +++++---- rpc/core/health.go | 3 +- rpc/core/mempool.go | 44 ++++-- rpc/core/net.go | 9 +- rpc/core/pipe.go | 8 ++ rpc/core/status.go | 3 +- rpc/core/tx.go | 5 +- rpc/grpc/api.go | 7 +- rpc/grpc/client_server.go | 3 +- rpc/grpc/grpc_test.go | 7 +- rpc/lib/rpc_test.go | 10 +- rpc/lib/server/handlers.go | 97 ++++++------- rpc/lib/server/handlers_test.go | 4 +- rpc/lib/server/parse_test.go | 90 ++++++------ rpc/lib/test/main.go | 4 +- rpc/lib/types/types.go | 44 ++++-- types/event_bus.go | 11 ++ 36 files changed, 657 insertions(+), 350 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 29400929..a8998c99 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -7,7 +7,7 @@ Special thanks to external contributors on this release: ### BREAKING CHANGES: * CLI/RPC/Config -- [httpclient] Update Subscribe interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md) +- [rpc/client] Update Subscribe interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md) * Apps @@ -27,6 +27,7 @@ Special thanks to external contributors on this release: - [config] \#2920 Remove `consensus.blocktime_iota` parameter - [genesis] \#2920 Add `time_iota_ms` to block's consensus parameters (not exposed to the application) - [genesis] \#2920 Rename `consensus_params.block_size` to `consensus_params.block` +- [lite] add `/unsubscribe_all` endpoint, which allows you to unsubscribe from all events ### IMPROVEMENTS: - [libs/common] \#3238 exit with zero (0) code upon receiving SIGTERM/SIGINT @@ -41,7 +42,6 @@ Special thanks to external contributors on this release: - leveldb.aliveiters ### BUG FIXES: - - [p2p/conn] \#3347 Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection - [libs/pubsub] \#951, \#1880 use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md) - [p2p] \#3369 do not panic when filter times out diff --git a/config/config.go b/config/config.go index cfd76060..540012a5 100644 --- a/config/config.go +++ b/config/config.go @@ -7,6 +7,7 @@ import ( "time" "github.com/pkg/errors" + rpcserver "github.com/tendermint/tendermint/rpc/lib/server" ) const ( @@ -323,6 +324,19 @@ type RPCConfig struct { // Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} // 1024 - 40 - 10 - 50 = 924 = ~900 MaxOpenConnections int `mapstructure:"max_open_connections"` + + // Maximum number of unique clientIDs that can /subscribe + // If you're using /broadcast_tx_commit, set to the estimated maximum number + // of broadcast_tx_commit calls per block. + MaxSubscriptionClients int `mapstructure:"max_subscription_clients"` + + // Maximum number of unique queries a given client can /subscribe to + // If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set + // to the estimated maximum number of broadcast_tx_commit calls per block. + MaxSubscriptionsPerClient int `mapstructure:"max_subscriptions_per_client"` + + // How long to wait for a tx to be committed during /broadcast_tx_commit + TimeoutBroadcastTxCommit time.Duration `mapstructure:"timeout_broadcast_tx_commit"` } // DefaultRPCConfig returns a default configuration for the RPC server @@ -337,6 +351,10 @@ func DefaultRPCConfig() *RPCConfig { Unsafe: false, MaxOpenConnections: 900, + + MaxSubscriptionClients: 100, + MaxSubscriptionsPerClient: 5, + TimeoutBroadcastTxCommit: 10 * time.Second, } } @@ -358,6 +376,18 @@ func (cfg *RPCConfig) ValidateBasic() error { if cfg.MaxOpenConnections < 0 { return errors.New("max_open_connections can't be negative") } + if cfg.MaxSubscriptionClients < 0 { + return errors.New("max_subscription_clients can't be negative") + } + if cfg.MaxSubscriptionsPerClient < 0 { + return errors.New("max_subscriptions_per_client can't be negative") + } + if cfg.TimeoutBroadcastTxCommit < 0 { + return errors.New("timeout_broadcast_tx_commit can't be negative") + } + if cfg.TimeoutBroadcastTxCommit > rpcserver.WriteTimeout { + return fmt.Errorf("timeout_broadcast_tx_commit can't be greater than rpc server's write timeout: %v", rpcserver.WriteTimeout) + } return nil } diff --git a/config/toml.go b/config/toml.go index 45b9a671..9ce7e76c 100644 --- a/config/toml.go +++ b/config/toml.go @@ -165,6 +165,19 @@ unsafe = {{ .RPC.Unsafe }} # 1024 - 40 - 10 - 50 = 924 = ~900 max_open_connections = {{ .RPC.MaxOpenConnections }} +# Maximum number of unique clientIDs that can /subscribe +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max_subscription_clients = {{ .RPC.MaxSubscriptionClients }} + +# Maximum number of unique queries a given client can /subscribe to +# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to +# the estimated # maximum number of broadcast_tx_commit calls per block. +max_subscriptions_per_client = {{ .RPC.MaxSubscriptionsPerClient }} + +# How long to wait for a tx to be committed during /broadcast_tx_commit. +timeout_broadcast_tx_commit = "{{ .RPC.TimeoutBroadcastTxCommit }}" + ##### peer to peer configuration options ##### [p2p] diff --git a/docs/tendermint-core/configuration.md b/docs/tendermint-core/configuration.md index 4e188aae..f1ac753a 100644 --- a/docs/tendermint-core/configuration.md +++ b/docs/tendermint-core/configuration.md @@ -111,6 +111,19 @@ unsafe = false # 1024 - 40 - 10 - 50 = 924 = ~900 max_open_connections = 900 +# Maximum number of unique clientIDs that can /subscribe +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max_subscription_clients = 100 + +# Maximum number of unique queries a given client can /subscribe to +# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to +# the estimated # maximum number of broadcast_tx_commit calls per block. +max_subscriptions_per_client = 5 + +# How long to wait for a tx to be committed during /broadcast_tx_commit. +timeout_broadcast_tx_commit = "10s" + ##### peer to peer configuration options ##### [p2p] diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index 8d4d1fb0..f78dac1b 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -241,6 +241,20 @@ func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { } } +// NumClients returns the number of clients. +func (s *Server) NumClients() int { + s.mtx.RLock() + defer s.mtx.RUnlock() + return len(s.subscriptions) +} + +// NumClientSubscriptions returns the number of subscriptions the client has. +func (s *Server) NumClientSubscriptions(clientID string) int { + s.mtx.RLock() + defer s.mtx.RUnlock() + return len(s.subscriptions[clientID]) +} + // Publish publishes the given message. An error will be returned to the caller // if the context is canceled. func (s *Server) Publish(ctx context.Context, msg interface{}) error { diff --git a/libs/pubsub/pubsub_test.go b/libs/pubsub/pubsub_test.go index e2bd50e6..88447756 100644 --- a/libs/pubsub/pubsub_test.go +++ b/libs/pubsub/pubsub_test.go @@ -29,6 +29,10 @@ func TestSubscribe(t *testing.T) { ctx := context.Background() subscription, err := s.Subscribe(ctx, clientID, query.Empty{}) require.NoError(t, err) + + assert.Equal(t, 1, s.NumClients()) + assert.Equal(t, 1, s.NumClientSubscriptions(clientID)) + err = s.Publish(ctx, "Ka-Zar") require.NoError(t, err) assertReceive(t, "Ka-Zar", subscription.Out()) diff --git a/lite/proxy/proxy.go b/lite/proxy/proxy.go index 39baf5a4..020e5753 100644 --- a/lite/proxy/proxy.go +++ b/lite/proxy/proxy.go @@ -1,6 +1,7 @@ package proxy import ( + "context" "net/http" amino "github.com/tendermint/go-amino" @@ -34,7 +35,12 @@ func StartProxy(c rpcclient.Client, listenAddr string, logger log.Logger, maxOpe mux := http.NewServeMux() rpcserver.RegisterRPCFuncs(mux, r, cdc, logger) - wm := rpcserver.NewWebsocketManager(r, cdc, rpcserver.EventSubscriber(c)) + unsubscribeFromAllEvents := func(remoteAddr string) { + if err := c.UnsubscribeAll(context.Background(), remoteAddr); err != nil { + logger.Error("Failed to unsubscribe from events", "err", err) + } + } + wm := rpcserver.NewWebsocketManager(r, cdc, rpcserver.OnDisconnect(unsubscribeFromAllEvents)) wm.SetLogger(logger) core.SetLogger(logger) mux.HandleFunc(wsEndpoint, wm.WebsocketHandler) @@ -51,13 +57,11 @@ func StartProxy(c rpcclient.Client, listenAddr string, logger log.Logger, maxOpe // // if we want security, the client must implement it as a secure client func RPCRoutes(c rpcclient.Client) map[string]*rpcserver.RPCFunc { - return map[string]*rpcserver.RPCFunc{ // Subscribe/unsubscribe are reserved for websocket events. - // We can just use the core tendermint impl, which uses the - // EventSwitch we registered in NewWebsocketManager above - "subscribe": rpcserver.NewWSRPCFunc(core.Subscribe, "query"), - "unsubscribe": rpcserver.NewWSRPCFunc(core.Unsubscribe, "query"), + "subscribe": rpcserver.NewWSRPCFunc(c.(Wrapper).SubscribeWS, "query"), + "unsubscribe": rpcserver.NewWSRPCFunc(c.(Wrapper).UnsubscribeWS, "query"), + "unsubscribe_all": rpcserver.NewWSRPCFunc(c.(Wrapper).UnsubscribeAllWS, ""), // info API "status": rpcserver.NewRPCFunc(c.Status, ""), diff --git a/lite/proxy/wrapper.go b/lite/proxy/wrapper.go index c90cdb27..2d333e9f 100644 --- a/lite/proxy/wrapper.go +++ b/lite/proxy/wrapper.go @@ -1,12 +1,16 @@ package proxy import ( + "context" + "fmt" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/lite" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/lib/types" ) var _ rpcclient.Client = Wrapper{} @@ -149,6 +153,55 @@ func (w Wrapper) RegisterOpDecoder(typ string, dec merkle.OpDecoder) { w.prt.RegisterOpDecoder(typ, dec) } +// SubscribeWS subscribes for events using the given query and remote address as +// a subscriber, but does not verify responses (UNSAFE)! +func (w Wrapper) SubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) { + out, err := w.Client.Subscribe(context.Background(), ctx.RemoteAddr(), query) + if err != nil { + return nil, err + } + + go func() { + for { + select { + case resultEvent := <-out: + // XXX(melekes) We should have a switch here that performs a validation + // depending on the event's type. + ctx.WSConn.TryWriteRPCResponse( + rpctypes.NewRPCSuccessResponse( + ctx.WSConn.Codec(), + rpctypes.JSONRPCStringID(fmt.Sprintf("%v#event", ctx.JSONReq.ID)), + resultEvent, + )) + case <-w.Client.Quit(): + return + } + } + }() + + return &ctypes.ResultSubscribe{}, nil +} + +// UnsubscribeWS calls original client's Unsubscribe using remote address as a +// subscriber. +func (w Wrapper) UnsubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) { + err := w.Client.Unsubscribe(context.Background(), ctx.RemoteAddr(), query) + if err != nil { + return nil, err + } + return &ctypes.ResultUnsubscribe{}, nil +} + +// UnsubscribeAllWS calls original client's UnsubscribeAll using remote address +// as a subscriber. +func (w Wrapper) UnsubscribeAllWS(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) { + err := w.Client.UnsubscribeAll(context.Background(), ctx.RemoteAddr()) + if err != nil { + return nil, err + } + return &ctypes.ResultUnsubscribe{}, nil +} + // // WrappedSwitch creates a websocket connection that auto-verifies any info // // coming through before passing it along. // // diff --git a/node/node.go b/node/node.go index 2b803502..f3f9dca3 100644 --- a/node/node.go +++ b/node/node.go @@ -26,6 +26,7 @@ import ( cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" + tmpubsub "github.com/tendermint/tendermint/libs/pubsub" mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p/pex" @@ -658,6 +659,7 @@ func (n *Node) ConfigureRPC() { rpccore.SetConsensusReactor(n.consensusReactor) rpccore.SetEventBus(n.eventBus) rpccore.SetLogger(n.Logger.With("module", "rpc")) + rpccore.SetConfig(*n.config.RPC) } func (n *Node) startRPC() ([]net.Listener, error) { @@ -675,8 +677,15 @@ func (n *Node) startRPC() ([]net.Listener, error) { for i, listenAddr := range listenAddrs { mux := http.NewServeMux() rpcLogger := n.Logger.With("module", "rpc-server") - wm := rpcserver.NewWebsocketManager(rpccore.Routes, coreCodec, rpcserver.EventSubscriber(n.eventBus)) - wm.SetLogger(rpcLogger.With("protocol", "websocket")) + wmLogger := rpcLogger.With("protocol", "websocket") + wm := rpcserver.NewWebsocketManager(rpccore.Routes, coreCodec, + rpcserver.OnDisconnect(func(remoteAddr string) { + err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr) + if err != nil && err != tmpubsub.ErrSubscriptionNotFound { + wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err) + } + })) + wm.SetLogger(wmLogger) mux.HandleFunc("/websocket", wm.WebsocketHandler) rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, coreCodec, rpcLogger) diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go index 7b00d6ea..b0a40fc2 100644 --- a/rpc/client/event_test.go +++ b/rpc/client/event_test.go @@ -129,3 +129,9 @@ func testTxEventsSent(t *testing.T, broadcastMethod string) { }) } } + +// Test HTTPClient resubscribes upon disconnect && subscription error. +// Test Local client resubscribes upon subscription error. +func TestClientsResubscribe(t *testing.T) { + // TODO(melekes) +} diff --git a/rpc/client/helpers.go b/rpc/client/helpers.go index ec63fb3b..4889b074 100644 --- a/rpc/client/helpers.go +++ b/rpc/client/helpers.go @@ -61,7 +61,7 @@ func WaitForOneEvent(c EventsClient, evtTyp string, timeout time.Duration) (type defer cancel() // register for the next event of this type - sub, err := c.Subscribe(ctx, subscriber, types.QueryForEvent(evtTyp)) + eventCh, err := c.Subscribe(ctx, subscriber, types.QueryForEvent(evtTyp).String()) if err != nil { return nil, errors.Wrap(err, "failed to subscribe") } @@ -69,10 +69,8 @@ func WaitForOneEvent(c EventsClient, evtTyp string, timeout time.Duration) (type defer c.UnsubscribeAll(ctx, subscriber) select { - case msg := <-sub.Out(): - return msg.Data().(types.TMEventData), nil - case <-sub.Cancelled(): - return nil, errors.New("subscription was cancelled") + case event := <-eventCh: + return event.Data.(types.TMEventData), nil case <-ctx.Done(): return nil, errors.New("timed out waiting for event") } diff --git a/rpc/client/httpclient.go b/rpc/client/httpclient.go index a1dee991..e982292e 100644 --- a/rpc/client/httpclient.go +++ b/rpc/client/httpclient.go @@ -2,11 +2,14 @@ package client import ( "context" + "strings" "sync" + "time" "github.com/pkg/errors" amino "github.com/tendermint/go-amino" + cmn "github.com/tendermint/tendermint/libs/common" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" ctypes "github.com/tendermint/tendermint/rpc/core/types" @@ -15,13 +18,18 @@ import ( ) /* -HTTP is a Client implementation that communicates -with a tendermint node over json rpc and websockets. +HTTP is a Client implementation that communicates with a tendermint node over +json rpc and websockets. -This is the main implementation you probably want to use in -production code. There are other implementations when calling -the tendermint node in-process (local), or when you want to mock -out the server for test code (mock). +This is the main implementation you probably want to use in production code. +There are other implementations when calling the tendermint node in-process +(Local), or when you want to mock out the server for test code (mock). + +You can subscribe for any event published by Tendermint using Subscribe method. +Note delivery is best-effort. If you don't read events fast enough or network +is slow, Tendermint might cancel the subscription. The client will attempt to +resubscribe (you don't need to do anything). It will keep trying every second +indefinitely until successful. */ type HTTP struct { remote string @@ -249,28 +257,6 @@ func (c *HTTP) Validators(height *int64) (*ctypes.ResultValidators, error) { /** websocket event stuff here... **/ -type subscription struct { - out chan tmpubsub.Message - cancelled chan struct{} - - mtx sync.RWMutex - err error -} - -func (s *subscription) Out() <-chan tmpubsub.Message { - return s.out -} - -func (s *subscription) Cancelled() <-chan struct{} { - return s.cancelled -} - -func (s *subscription) Err() error { - s.mtx.RLock() - defer s.mtx.RUnlock() - return s.err -} - type WSEvents struct { cmn.BaseService cdc *amino.Codec @@ -279,8 +265,8 @@ type WSEvents struct { ws *rpcclient.WSClient mtx sync.RWMutex - // query -> subscription - subscriptions map[string]*subscription + // query -> chan + subscriptions map[string]chan ctypes.ResultEvent } func newWSEvents(cdc *amino.Codec, remote, endpoint string) *WSEvents { @@ -288,16 +274,18 @@ func newWSEvents(cdc *amino.Codec, remote, endpoint string) *WSEvents { cdc: cdc, endpoint: endpoint, remote: remote, - subscriptions: make(map[string]*subscription), + subscriptions: make(map[string]chan ctypes.ResultEvent), } wsEvents.BaseService = *cmn.NewBaseService(nil, "WSEvents", wsEvents) return wsEvents } +// OnStart implements cmn.Service by starting WSClient and event loop. func (w *WSEvents) OnStart() error { w.ws = rpcclient.NewWSClient(w.remote, w.endpoint, rpcclient.OnReconnect(func() { - w.redoSubscriptions() + // resubscribe immediately + w.redoSubscriptionsAfter(0 * time.Second) })) w.ws.SetCodec(w.cdc) @@ -310,75 +298,63 @@ func (w *WSEvents) OnStart() error { return nil } -// Stop wraps the BaseService/eventSwitch actions as Start does +// OnStop implements cmn.Service by stopping WSClient. func (w *WSEvents) OnStop() { - err := w.ws.Stop() - if err != nil { - w.Logger.Error("failed to stop WSClient", "err", err) - } + _ = w.ws.Stop() } -func (w *WSEvents) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, outCapacity ...int) (types.Subscription, error) { - q := query.String() +// Subscribe implements EventsClient by using WSClient to subscribe given +// subscriber to query. By default, returns a channel with cap=1. Error is +// returned if it fails to subscribe. +// Channel is never closed to prevent clients from seeing an erroneus event. +func (w *WSEvents) Subscribe(ctx context.Context, subscriber, query string, + outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { - err := w.ws.Subscribe(ctx, q) - if err != nil { + if err := w.ws.Subscribe(ctx, query); err != nil { return nil, err } outCap := 1 - if len(outCapacity) > 0 && outCapacity[0] >= 0 { + if len(outCapacity) > 0 { outCap = outCapacity[0] } + outc := make(chan ctypes.ResultEvent, outCap) w.mtx.Lock() // subscriber param is ignored because Tendermint will override it with // remote IP anyway. - w.subscriptions[q] = &subscription{ - out: make(chan tmpubsub.Message, outCap), - cancelled: make(chan struct{}), - } + w.subscriptions[query] = outc w.mtx.Unlock() - return w.subscriptions[q], nil + return outc, nil } -func (w *WSEvents) Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error { - q := query.String() - - err := w.ws.Unsubscribe(ctx, q) - if err != nil { +// Unsubscribe implements EventsClient by using WSClient to unsubscribe given +// subscriber from query. +func (w *WSEvents) Unsubscribe(ctx context.Context, subscriber, query string) error { + if err := w.ws.Unsubscribe(ctx, query); err != nil { return err } w.mtx.Lock() - sub, ok := w.subscriptions[q] + _, ok := w.subscriptions[query] if ok { - close(sub.cancelled) - sub.mtx.Lock() - sub.err = errors.New("unsubscribed") - sub.mtx.Unlock() - delete(w.subscriptions, q) + delete(w.subscriptions, query) } w.mtx.Unlock() return nil } +// UnsubscribeAll implements EventsClient by using WSClient to unsubscribe +// given subscriber from all the queries. func (w *WSEvents) UnsubscribeAll(ctx context.Context, subscriber string) error { - err := w.ws.UnsubscribeAll(ctx) - if err != nil { + if err := w.ws.UnsubscribeAll(ctx); err != nil { return err } w.mtx.Lock() - for _, sub := range w.subscriptions { - close(sub.cancelled) - sub.mtx.Lock() - sub.err = errors.New("unsubscribed") - sub.mtx.Unlock() - } - w.subscriptions = make(map[string]*subscription) + w.subscriptions = make(map[string]chan ctypes.ResultEvent) w.mtx.Unlock() return nil @@ -386,18 +362,21 @@ func (w *WSEvents) UnsubscribeAll(ctx context.Context, subscriber string) error // After being reconnected, it is necessary to redo subscription to server // otherwise no data will be automatically received. -func (w *WSEvents) redoSubscriptions() { +func (w *WSEvents) redoSubscriptionsAfter(d time.Duration) { + time.Sleep(d) + for q := range w.subscriptions { - // NOTE: no timeout for resubscribing - // FIXME: better logging/handling of errors?? - w.ws.Subscribe(context.Background(), q) + err := w.ws.Subscribe(context.Background(), q) + if err != nil { + w.Logger.Error("Failed to resubscribe", "err", err) + } } } -// eventListener is an infinite loop pulling all websocket events -// and pushing them to the EventSwitch. -// -// the goroutine only stops by closing quit +func isErrAlreadySubscribed(err error) bool { + return strings.Contains(err.Error(), tmpubsub.ErrAlreadySubscribed.Error()) +} + func (w *WSEvents) eventListener() { for { select { @@ -405,21 +384,39 @@ func (w *WSEvents) eventListener() { if !ok { return } + if resp.Error != nil { w.Logger.Error("WS error", "err", resp.Error.Error()) + // Error can be ErrAlreadySubscribed or max client (subscriptions per + // client) reached or Tendermint exited. + // We can ignore ErrAlreadySubscribed, but need to retry in other + // cases. + if !isErrAlreadySubscribed(resp.Error) { + // Resubscribe after 1 second to give Tendermint time to restart (if + // crashed). + w.redoSubscriptionsAfter(1 * time.Second) + } continue } + result := new(ctypes.ResultEvent) err := w.cdc.UnmarshalJSON(resp.Result, result) if err != nil { w.Logger.Error("failed to unmarshal response", "err", err) continue } - // NOTE: writing also happens inside mutex so we can't close a channel in - // Unsubscribe/UnsubscribeAll. + w.mtx.RLock() - if sub, ok := w.subscriptions[result.Query]; ok { - sub.out <- tmpubsub.NewMessage(result.Data, result.Tags) + if out, ok := w.subscriptions[result.Query]; ok { + if cap(out) == 0 { + out <- *result + } else { + select { + case out <- *result: + default: + w.Logger.Error("wanted to publish ResultEvent, but out channel is full", "result", result, "query", result.Query) + } + } } w.mtx.RUnlock() case <-w.Quit(): diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 7477225e..605d84ba 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -21,6 +21,8 @@ implementation. */ import ( + "context" + cmn "github.com/tendermint/tendermint/libs/common" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" @@ -91,7 +93,18 @@ type NetworkClient interface { // EventsClient is reactive, you can subscribe to any message, given the proper // string. see tendermint/types/events.go type EventsClient interface { - types.EventBusSubscriber + // Subscribe subscribes given subscriber to query. Returns a channel with + // cap=1 onto which events are published. An error is returned if it fails to + // subscribe. outCapacity can be used optionally to set capacity for the + // channel. Channel is never closed to prevent accidental reads. + // + // ctx cannot be used to unsubscribe. To unsubscribe, use either Unsubscribe + // or UnsubscribeAll. + Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) + // Unsubscribe unsubscribes given subscriber from query. + Unsubscribe(ctx context.Context, subscriber, query string) error + // UnsubscribeAll unsubscribes given subscriber from all the queries. + UnsubscribeAll(ctx context.Context, subscriber string) error } // MempoolClient shows us data about current mempool state. diff --git a/rpc/client/localclient.go b/rpc/client/localclient.go index 33a1ce22..976c9892 100644 --- a/rpc/client/localclient.go +++ b/rpc/client/localclient.go @@ -2,12 +2,18 @@ package client import ( "context" + "time" + + "github.com/pkg/errors" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" + tmquery "github.com/tendermint/tendermint/libs/pubsub/query" nm "github.com/tendermint/tendermint/node" "github.com/tendermint/tendermint/rpc/core" ctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/lib/types" "github.com/tendermint/tendermint/types" ) @@ -24,9 +30,17 @@ are compiled in process. For real clients, you probably want to use client.HTTP. For more powerful control during testing, you probably want the "client/mock" package. + +You can subscribe for any event published by Tendermint using Subscribe method. +Note delivery is best-effort. If you don't read events fast enough, Tendermint +might cancel the subscription. The client will attempt to resubscribe (you +don't need to do anything). It will keep trying indefinitely with exponential +backoff (10ms -> 20ms -> 40ms) until successful. */ type Local struct { *types.EventBus + Logger log.Logger + ctx *rpctypes.Context } // NewLocal configures a client that calls the Node directly. @@ -39,113 +53,189 @@ func NewLocal(node *nm.Node) *Local { node.ConfigureRPC() return &Local{ EventBus: node.EventBus(), + Logger: log.NewNopLogger(), + ctx: &rpctypes.Context{}, } } var ( _ Client = (*Local)(nil) - _ NetworkClient = Local{} + _ NetworkClient = (*Local)(nil) _ EventsClient = (*Local)(nil) ) -func (Local) Status() (*ctypes.ResultStatus, error) { - return core.Status() +// SetLogger allows to set a logger on the client. +func (c *Local) SetLogger(l log.Logger) { + c.Logger = l } -func (Local) ABCIInfo() (*ctypes.ResultABCIInfo, error) { - return core.ABCIInfo() +func (c *Local) Status() (*ctypes.ResultStatus, error) { + return core.Status(c.ctx) +} + +func (c *Local) ABCIInfo() (*ctypes.ResultABCIInfo, error) { + return core.ABCIInfo(c.ctx) } func (c *Local) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { return c.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions) } -func (Local) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - return core.ABCIQuery(path, data, opts.Height, opts.Prove) +func (c *Local) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + return core.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove) } -func (Local) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - return core.BroadcastTxCommit(tx) +func (c *Local) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { + return core.BroadcastTxCommit(c.ctx, tx) } -func (Local) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return core.BroadcastTxAsync(tx) +func (c *Local) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return core.BroadcastTxAsync(c.ctx, tx) } -func (Local) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return core.BroadcastTxSync(tx) +func (c *Local) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return core.BroadcastTxSync(c.ctx, tx) } -func (Local) UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) { - return core.UnconfirmedTxs(limit) +func (c *Local) UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) { + return core.UnconfirmedTxs(c.ctx, limit) } -func (Local) NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) { - return core.NumUnconfirmedTxs() +func (c *Local) NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) { + return core.NumUnconfirmedTxs(c.ctx) } -func (Local) NetInfo() (*ctypes.ResultNetInfo, error) { - return core.NetInfo() +func (c *Local) NetInfo() (*ctypes.ResultNetInfo, error) { + return core.NetInfo(c.ctx) } -func (Local) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { - return core.DumpConsensusState() +func (c *Local) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { + return core.DumpConsensusState(c.ctx) } -func (Local) ConsensusState() (*ctypes.ResultConsensusState, error) { - return core.ConsensusState() +func (c *Local) ConsensusState() (*ctypes.ResultConsensusState, error) { + return core.ConsensusState(c.ctx) } -func (Local) Health() (*ctypes.ResultHealth, error) { - return core.Health() +func (c *Local) Health() (*ctypes.ResultHealth, error) { + return core.Health(c.ctx) } -func (Local) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) { - return core.UnsafeDialSeeds(seeds) +func (c *Local) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) { + return core.UnsafeDialSeeds(c.ctx, seeds) } -func (Local) DialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, error) { - return core.UnsafeDialPeers(peers, persistent) +func (c *Local) DialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, error) { + return core.UnsafeDialPeers(c.ctx, peers, persistent) } -func (Local) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { - return core.BlockchainInfo(minHeight, maxHeight) +func (c *Local) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { + return core.BlockchainInfo(c.ctx, minHeight, maxHeight) } -func (Local) Genesis() (*ctypes.ResultGenesis, error) { - return core.Genesis() +func (c *Local) Genesis() (*ctypes.ResultGenesis, error) { + return core.Genesis(c.ctx) } -func (Local) Block(height *int64) (*ctypes.ResultBlock, error) { - return core.Block(height) +func (c *Local) Block(height *int64) (*ctypes.ResultBlock, error) { + return core.Block(c.ctx, height) } -func (Local) BlockResults(height *int64) (*ctypes.ResultBlockResults, error) { - return core.BlockResults(height) +func (c *Local) BlockResults(height *int64) (*ctypes.ResultBlockResults, error) { + return core.BlockResults(c.ctx, height) } -func (Local) Commit(height *int64) (*ctypes.ResultCommit, error) { - return core.Commit(height) +func (c *Local) Commit(height *int64) (*ctypes.ResultCommit, error) { + return core.Commit(c.ctx, height) } -func (Local) Validators(height *int64) (*ctypes.ResultValidators, error) { - return core.Validators(height) +func (c *Local) Validators(height *int64) (*ctypes.ResultValidators, error) { + return core.Validators(c.ctx, height) } -func (Local) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { - return core.Tx(hash, prove) +func (c *Local) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { + return core.Tx(c.ctx, hash, prove) } -func (Local) TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error) { - return core.TxSearch(query, prove, page, perPage) +func (c *Local) TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error) { + return core.TxSearch(c.ctx, query, prove, page, perPage) } -func (c *Local) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, outCapacity ...int) (types.Subscription, error) { - return c.EventBus.Subscribe(ctx, subscriber, query, outCapacity...) +func (c *Local) Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { + q, err := tmquery.New(query) + if err != nil { + return nil, errors.Wrap(err, "failed to parse query") + } + sub, err := c.EventBus.Subscribe(ctx, subscriber, q) + if err != nil { + return nil, errors.Wrap(err, "failed to subscribe") + } + + outCap := 1 + if len(outCapacity) > 0 { + outCap = outCapacity[0] + } + + outc := make(chan ctypes.ResultEvent, outCap) + go c.eventsRoutine(sub, subscriber, q, outc) + + return outc, nil } -func (c *Local) Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error { - return c.EventBus.Unsubscribe(ctx, subscriber, query) +func (c *Local) eventsRoutine(sub types.Subscription, subscriber string, q tmpubsub.Query, outc chan<- ctypes.ResultEvent) { + for { + select { + case msg := <-sub.Out(): + result := ctypes.ResultEvent{Query: q.String(), Data: msg.Data(), Tags: msg.Tags()} + if cap(outc) == 0 { + outc <- result + } else { + select { + case outc <- result: + default: + c.Logger.Error("wanted to publish ResultEvent, but out channel is full", "result", result, "query", result.Query) + } + } + case <-sub.Cancelled(): + if sub.Err() == tmpubsub.ErrUnsubscribed { + return + } + + c.Logger.Error("subscription was cancelled, resubscribing...", "err", sub.Err(), "query", q.String()) + sub = c.resubscribe(subscriber, q) + if sub == nil { // client was stopped + return + } + case <-c.Quit(): + return + } + } +} + +// Try to resubscribe with exponential backoff. +func (c *Local) resubscribe(subscriber string, q tmpubsub.Query) types.Subscription { + attempts := 0 + for { + if !c.IsRunning() { + return nil + } + + sub, err := c.EventBus.Subscribe(context.Background(), subscriber, q) + if err == nil { + return sub + } + + attempts++ + time.Sleep((10 << uint(attempts)) * time.Millisecond) // 10ms -> 20ms -> 40ms + } +} + +func (c *Local) Unsubscribe(ctx context.Context, subscriber, query string) error { + q, err := tmquery.New(query) + if err != nil { + return errors.Wrap(err, "failed to parse query") + } + return c.EventBus.Unsubscribe(ctx, subscriber, q) } func (c *Local) UnsubscribeAll(ctx context.Context, subscriber string) error { diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index ef2d4f19..9c0eb75b 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -21,6 +21,7 @@ import ( "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/core" ctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/lib/types" "github.com/tendermint/tendermint/types" ) @@ -76,11 +77,11 @@ func (c Call) GetResponse(args interface{}) (interface{}, error) { } func (c Client) Status() (*ctypes.ResultStatus, error) { - return core.Status() + return core.Status(&rpctypes.Context{}) } func (c Client) ABCIInfo() (*ctypes.ResultABCIInfo, error) { - return core.ABCIInfo() + return core.ABCIInfo(&rpctypes.Context{}) } func (c Client) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { @@ -88,49 +89,49 @@ func (c Client) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQue } func (c Client) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - return core.ABCIQuery(path, data, opts.Height, opts.Prove) + return core.ABCIQuery(&rpctypes.Context{}, path, data, opts.Height, opts.Prove) } func (c Client) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - return core.BroadcastTxCommit(tx) + return core.BroadcastTxCommit(&rpctypes.Context{}, tx) } func (c Client) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return core.BroadcastTxAsync(tx) + return core.BroadcastTxAsync(&rpctypes.Context{}, tx) } func (c Client) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return core.BroadcastTxSync(tx) + return core.BroadcastTxSync(&rpctypes.Context{}, tx) } func (c Client) NetInfo() (*ctypes.ResultNetInfo, error) { - return core.NetInfo() + return core.NetInfo(&rpctypes.Context{}) } func (c Client) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) { - return core.UnsafeDialSeeds(seeds) + return core.UnsafeDialSeeds(&rpctypes.Context{}, seeds) } func (c Client) DialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, error) { - return core.UnsafeDialPeers(peers, persistent) + return core.UnsafeDialPeers(&rpctypes.Context{}, peers, persistent) } func (c Client) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { - return core.BlockchainInfo(minHeight, maxHeight) + return core.BlockchainInfo(&rpctypes.Context{}, minHeight, maxHeight) } func (c Client) Genesis() (*ctypes.ResultGenesis, error) { - return core.Genesis() + return core.Genesis(&rpctypes.Context{}) } func (c Client) Block(height *int64) (*ctypes.ResultBlock, error) { - return core.Block(height) + return core.Block(&rpctypes.Context{}, height) } func (c Client) Commit(height *int64) (*ctypes.ResultCommit, error) { - return core.Commit(height) + return core.Commit(&rpctypes.Context{}, height) } func (c Client) Validators(height *int64) (*ctypes.ResultValidators, error) { - return core.Validators(height) + return core.Validators(&rpctypes.Context{}, height) } diff --git a/rpc/core/abci.go b/rpc/core/abci.go index aa6089b6..ce15ac14 100644 --- a/rpc/core/abci.go +++ b/rpc/core/abci.go @@ -5,6 +5,7 @@ import ( cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/proxy" ctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/lib/types" ) // Query the application for some information. @@ -52,7 +53,7 @@ import ( // | data | []byte | false | true | Data | // | height | int64 | 0 | false | Height (0 means latest) | // | prove | bool | false | false | Includes proof if true | -func ABCIQuery(path string, data cmn.HexBytes, height int64, prove bool) (*ctypes.ResultABCIQuery, error) { +func ABCIQuery(ctx *rpctypes.Context, path string, data cmn.HexBytes, height int64, prove bool) (*ctypes.ResultABCIQuery, error) { resQuery, err := proxyAppQuery.QuerySync(abci.RequestQuery{ Path: path, Data: data, @@ -96,7 +97,7 @@ func ABCIQuery(path string, data cmn.HexBytes, height int64, prove bool) (*ctype // "jsonrpc": "2.0" // } // ``` -func ABCIInfo() (*ctypes.ResultABCIInfo, error) { +func ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { resInfo, err := proxyAppQuery.InfoSync(proxy.RequestInfo) if err != nil { return nil, err diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index 906aea7b..40b6811d 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -5,6 +5,7 @@ import ( cmn "github.com/tendermint/tendermint/libs/common" ctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/lib/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -68,7 +69,7 @@ import ( // ``` // // -func BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { +func BlockchainInfo(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { // maximum 20 block metas const limit int64 = 20 @@ -226,7 +227,7 @@ func filterMinMax(height, min, max, limit int64) (int64, int64, error) { // "jsonrpc": "2.0" // } // ``` -func Block(heightPtr *int64) (*ctypes.ResultBlock, error) { +func Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) { storeHeight := blockStore.Height() height, err := getHeight(storeHeight, heightPtr) if err != nil { @@ -313,7 +314,7 @@ func Block(heightPtr *int64) (*ctypes.ResultBlock, error) { // "jsonrpc": "2.0" // } // ``` -func Commit(heightPtr *int64) (*ctypes.ResultCommit, error) { +func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) { storeHeight := blockStore.Height() height, err := getHeight(storeHeight, heightPtr) if err != nil { @@ -372,7 +373,7 @@ func Commit(heightPtr *int64) (*ctypes.ResultCommit, error) { // ] // } // ``` -func BlockResults(heightPtr *int64) (*ctypes.ResultBlockResults, error) { +func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) { storeHeight := blockStore.Height() height, err := getHeight(storeHeight, heightPtr) if err != nil { diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index 81694b7e..b8a91f10 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -3,6 +3,7 @@ package core import ( cm "github.com/tendermint/tendermint/consensus" ctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/lib/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -47,7 +48,7 @@ import ( // "jsonrpc": "2.0" // } // ``` -func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { +func Validators(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultValidators, error) { // The latest validator that we know is the // NextValidator of the last block. height := consensusState.GetState().LastBlockHeight + 1 @@ -200,7 +201,7 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { // } // } // ``` -func DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { +func DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) { // Get Peer consensus states. peers := p2pPeers.Peers().List() peerStates := make([]ctypes.PeerStateInfo, len(peers)) @@ -277,7 +278,7 @@ func DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { // } //} //``` -func ConsensusState() (*ctypes.ResultConsensusState, error) { +func ConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) { // Get self round state. bz, err := consensusState.GetRoundStateSimpleJSON() return &ctypes.ResultConsensusState{RoundState: bz}, err @@ -320,7 +321,7 @@ func ConsensusState() (*ctypes.ResultConsensusState, error) { // } // } // ``` -func ConsensusParams(heightPtr *int64) (*ctypes.ResultConsensusParams, error) { +func ConsensusParams(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultConsensusParams, error) { height := consensusState.GetState().LastBlockHeight + 1 height, err := getHeight(height, heightPtr) if err != nil { diff --git a/rpc/core/dev.go b/rpc/core/dev.go index 0b515476..71f284f8 100644 --- a/rpc/core/dev.go +++ b/rpc/core/dev.go @@ -5,16 +5,19 @@ import ( "runtime/pprof" ctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/lib/types" ) -func UnsafeFlushMempool() (*ctypes.ResultUnsafeFlushMempool, error) { +// UnsafeFlushMempool removes all transactions from the mempool. +func UnsafeFlushMempool(ctx *rpctypes.Context) (*ctypes.ResultUnsafeFlushMempool, error) { mempool.Flush() return &ctypes.ResultUnsafeFlushMempool{}, nil } var profFile *os.File -func UnsafeStartCPUProfiler(filename string) (*ctypes.ResultUnsafeProfile, error) { +// UnsafeStartCPUProfiler starts a pprof profiler using the given filename. +func UnsafeStartCPUProfiler(ctx *rpctypes.Context, filename string) (*ctypes.ResultUnsafeProfile, error) { var err error profFile, err = os.Create(filename) if err != nil { @@ -27,7 +30,8 @@ func UnsafeStartCPUProfiler(filename string) (*ctypes.ResultUnsafeProfile, error return &ctypes.ResultUnsafeProfile{}, nil } -func UnsafeStopCPUProfiler() (*ctypes.ResultUnsafeProfile, error) { +// UnsafeStopCPUProfiler stops the running pprof profiler. +func UnsafeStopCPUProfiler(ctx *rpctypes.Context) (*ctypes.ResultUnsafeProfile, error) { pprof.StopCPUProfile() if err := profFile.Close(); err != nil { return nil, err @@ -35,7 +39,8 @@ func UnsafeStopCPUProfiler() (*ctypes.ResultUnsafeProfile, error) { return &ctypes.ResultUnsafeProfile{}, nil } -func UnsafeWriteHeapProfile(filename string) (*ctypes.ResultUnsafeProfile, error) { +// UnsafeWriteHeapProfile dumps a heap profile to the given filename. +func UnsafeWriteHeapProfile(ctx *rpctypes.Context, filename string) (*ctypes.ResultUnsafeProfile, error) { memProfFile, err := os.Create(filename) if err != nil { return nil, err diff --git a/rpc/core/events.go b/rpc/core/events.go index 22c7ea78..3ea33fa8 100644 --- a/rpc/core/events.go +++ b/rpc/core/events.go @@ -6,10 +6,10 @@ import ( "github.com/pkg/errors" + tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/lib/types" - tmtypes "github.com/tendermint/tendermint/types" ) // Subscribe for events via WebSocket. @@ -90,8 +90,15 @@ import ( // | query | string | "" | true | Query | // // -func Subscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultSubscribe, error) { - addr := wsCtx.GetRemoteAddr() +func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) { + addr := ctx.RemoteAddr() + + if eventBus.NumClients() >= config.MaxSubscriptionClients { + return nil, fmt.Errorf("max_subscription_clients %d reached", config.MaxSubscriptionClients) + } else if eventBus.NumClientSubscriptions(addr) >= config.MaxSubscriptionsPerClient { + return nil, fmt.Errorf("max_subscriptions_per_client %d reached", config.MaxSubscriptionsPerClient) + } + logger.Info("Subscribe to query", "remote", addr, "query", query) q, err := tmquery.New(query) @@ -99,9 +106,9 @@ func Subscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultSubscri return nil, errors.Wrap(err, "failed to parse query") } - ctx, cancel := context.WithTimeout(context.Background(), subscribeTimeout) + subCtx, cancel := context.WithTimeout(context.Background(), subscribeTimeout) defer cancel() - sub, err := eventBusFor(wsCtx).Subscribe(ctx, addr, q) + sub, err := eventBus.Subscribe(subCtx, addr, q) if err != nil { return nil, err } @@ -111,18 +118,26 @@ func Subscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultSubscri select { case msg := <-sub.Out(): resultEvent := &ctypes.ResultEvent{Query: query, Data: msg.Data(), Tags: msg.Tags()} - wsCtx.TryWriteRPCResponse( + ctx.WSConn.TryWriteRPCResponse( rpctypes.NewRPCSuccessResponse( - wsCtx.Codec(), - rpctypes.JSONRPCStringID(fmt.Sprintf("%v#event", wsCtx.Request.ID)), + ctx.WSConn.Codec(), + rpctypes.JSONRPCStringID(fmt.Sprintf("%v#event", ctx.JSONReq.ID)), resultEvent, )) case <-sub.Cancelled(): - wsCtx.TryWriteRPCResponse( - rpctypes.RPCServerError(rpctypes.JSONRPCStringID( - fmt.Sprintf("%v#event", wsCtx.Request.ID)), - fmt.Errorf("subscription was cancelled (reason: %v)", sub.Err()), - )) + if sub.Err() != tmpubsub.ErrUnsubscribed { + var reason string + if sub.Err() == nil { + reason = "Tendermint exited" + } else { + reason = sub.Err().Error() + } + ctx.WSConn.TryWriteRPCResponse( + rpctypes.RPCServerError(rpctypes.JSONRPCStringID( + fmt.Sprintf("%v#event", ctx.JSONReq.ID)), + fmt.Errorf("subscription was cancelled (reason: %s)", reason), + )) + } return } } @@ -161,14 +176,14 @@ func Subscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultSubscri // | query | string | "" | true | Query | // // -func Unsubscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultUnsubscribe, error) { - addr := wsCtx.GetRemoteAddr() +func Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) { + addr := ctx.RemoteAddr() logger.Info("Unsubscribe from query", "remote", addr, "query", query) q, err := tmquery.New(query) if err != nil { return nil, errors.Wrap(err, "failed to parse query") } - err = eventBusFor(wsCtx).Unsubscribe(context.Background(), addr, q) + err = eventBus.Unsubscribe(context.Background(), addr, q) if err != nil { return nil, err } @@ -199,20 +214,12 @@ func Unsubscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultUnsub // ``` // // -func UnsubscribeAll(wsCtx rpctypes.WSRPCContext) (*ctypes.ResultUnsubscribe, error) { - addr := wsCtx.GetRemoteAddr() +func UnsubscribeAll(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) { + addr := ctx.RemoteAddr() logger.Info("Unsubscribe from all", "remote", addr) - err := eventBusFor(wsCtx).UnsubscribeAll(context.Background(), addr) + err := eventBus.UnsubscribeAll(context.Background(), addr) if err != nil { return nil, err } return &ctypes.ResultUnsubscribe{}, nil } - -func eventBusFor(wsCtx rpctypes.WSRPCContext) tmtypes.EventBusSubscriber { - es := wsCtx.GetEventSubscriber() - if es == nil { - es = eventBus - } - return es -} diff --git a/rpc/core/health.go b/rpc/core/health.go index eeb8686b..41186a04 100644 --- a/rpc/core/health.go +++ b/rpc/core/health.go @@ -2,6 +2,7 @@ package core import ( ctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/lib/types" ) // Get node health. Returns empty result (200 OK) on success, no response - in @@ -31,6 +32,6 @@ import ( // "jsonrpc": "2.0" // } // ``` -func Health() (*ctypes.ResultHealth, error) { +func Health(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) { return &ctypes.ResultHealth{}, nil } diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index 42aa56af..6ebdbcfc 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -9,7 +9,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpcserver "github.com/tendermint/tendermint/rpc/lib/server" + rpctypes "github.com/tendermint/tendermint/rpc/lib/types" "github.com/tendermint/tendermint/types" ) @@ -59,7 +59,7 @@ import ( // | Parameter | Type | Default | Required | Description | // |-----------+------+---------+----------+-----------------| // | tx | Tx | nil | true | The transaction | -func BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { err := mempool.CheckTx(tx, nil) if err != nil { return nil, err @@ -108,7 +108,7 @@ func BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { // | Parameter | Type | Default | Required | Description | // |-----------+------+---------+----------+-----------------| // | tx | Tx | nil | true | The transaction | -func BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { resCh := make(chan *abci.Response, 1) err := mempool.CheckTx(tx, func(res *abci.Response) { resCh <- res @@ -128,6 +128,11 @@ func BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { // Returns with the responses from CheckTx and DeliverTx. // +// IMPORTANT: use only for testing and development. In production, use +// BroadcastTxSync or BroadcastTxAsync. You can subscribe for the transaction +// result using JSONRPC via a websocket. See +// https://tendermint.com/docs/app-dev/subscribing-to-events-via-websocket.html +// // CONTRACT: only returns error if mempool.CheckTx() errs or if we timeout // waiting for tx to commit. // @@ -182,18 +187,26 @@ func BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { // | Parameter | Type | Default | Required | Description | // |-----------+------+---------+----------+-----------------| // | tx | Tx | nil | true | The transaction | -func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { + subscriber := ctx.RemoteAddr() + + if eventBus.NumClients() >= config.MaxSubscriptionClients { + return nil, fmt.Errorf("max_subscription_clients %d reached", config.MaxSubscriptionClients) + } else if eventBus.NumClientSubscriptions(subscriber) >= config.MaxSubscriptionsPerClient { + return nil, fmt.Errorf("max_subscriptions_per_client %d reached", config.MaxSubscriptionsPerClient) + } + // Subscribe to tx being committed in block. - ctx, cancel := context.WithTimeout(context.Background(), subscribeTimeout) + subCtx, cancel := context.WithTimeout(context.Background(), subscribeTimeout) defer cancel() q := types.EventQueryTxFor(tx) - deliverTxSub, err := eventBus.Subscribe(ctx, "mempool", q) + deliverTxSub, err := eventBus.Subscribe(subCtx, subscriber, q) if err != nil { err = errors.Wrap(err, "failed to subscribe to tx") logger.Error("Error on broadcast_tx_commit", "err", err) return nil, err } - defer eventBus.Unsubscribe(context.Background(), "mempool", q) + defer eventBus.Unsubscribe(context.Background(), subscriber, q) // Broadcast tx and wait for CheckTx result checkTxResCh := make(chan *abci.Response, 1) @@ -215,8 +228,6 @@ func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { } // Wait for the tx to be included in a block or timeout. - // TODO: configurable? - var deliverTxTimeout = rpcserver.WriteTimeout / 2 select { case msg := <-deliverTxSub.Out(): // The tx was included in a block. deliverTxRes := msg.Data().(types.EventDataTx) @@ -227,14 +238,20 @@ func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { Height: deliverTxRes.Height, }, nil case <-deliverTxSub.Cancelled(): - err = errors.New("deliverTxSub was cancelled. Did the Tendermint stop?") + var reason string + if deliverTxSub.Err() == nil { + reason = "Tendermint exited" + } else { + reason = deliverTxSub.Err().Error() + } + err = fmt.Errorf("deliverTxSub was cancelled (reason: %s)", reason) logger.Error("Error on broadcastTxCommit", "err", err) return &ctypes.ResultBroadcastTxCommit{ CheckTx: *checkTxRes, DeliverTx: abci.ResponseDeliverTx{}, Hash: tx.Hash(), }, err - case <-time.After(deliverTxTimeout): + case <-time.After(config.TimeoutBroadcastTxCommit): err = errors.New("Timed out waiting for tx to be included in a block") logger.Error("Error on broadcastTxCommit", "err", err) return &ctypes.ResultBroadcastTxCommit{ @@ -281,7 +298,8 @@ func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { // | Parameter | Type | Default | Required | Description | // |-----------+------+---------+----------+--------------------------------------| // | limit | int | 30 | false | Maximum number of entries (max: 100) | -func UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) { +// ``` +func UnconfirmedTxs(ctx *rpctypes.Context, limit int) (*ctypes.ResultUnconfirmedTxs, error) { // reuse per_page validator limit = validatePerPage(limit) @@ -323,7 +341,7 @@ func UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) { // } // } // ``` -func NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) { +func NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) { return &ctypes.ResultUnconfirmedTxs{ Count: mempool.Size(), Total: mempool.Size(), diff --git a/rpc/core/net.go b/rpc/core/net.go index e920ea7c..23bc40e8 100644 --- a/rpc/core/net.go +++ b/rpc/core/net.go @@ -7,6 +7,7 @@ import ( "github.com/tendermint/tendermint/p2p" ctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/lib/types" ) // Get network info. @@ -153,7 +154,7 @@ import ( // ... // } // ``` -func NetInfo() (*ctypes.ResultNetInfo, error) { +func NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) { out, in, _ := p2pPeers.NumPeers() peers := make([]ctypes.Peer, 0, out+in) for _, peer := range p2pPeers.Peers().List() { @@ -179,7 +180,7 @@ func NetInfo() (*ctypes.ResultNetInfo, error) { }, nil } -func UnsafeDialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) { +func UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { if len(seeds) == 0 { return &ctypes.ResultDialSeeds{}, errors.New("No seeds provided") } @@ -192,7 +193,7 @@ func UnsafeDialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) { return &ctypes.ResultDialSeeds{Log: "Dialing seeds in progress. See /net_info for details"}, nil } -func UnsafeDialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, error) { +func UnsafeDialPeers(ctx *rpctypes.Context, peers []string, persistent bool) (*ctypes.ResultDialPeers, error) { if len(peers) == 0 { return &ctypes.ResultDialPeers{}, errors.New("No peers provided") } @@ -247,6 +248,6 @@ func UnsafeDialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, // "jsonrpc": "2.0" // } // ``` -func Genesis() (*ctypes.ResultGenesis, error) { +func Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { return &ctypes.ResultGenesis{Genesis: genDoc}, nil } diff --git a/rpc/core/pipe.go b/rpc/core/pipe.go index 23649544..0b760344 100644 --- a/rpc/core/pipe.go +++ b/rpc/core/pipe.go @@ -1,6 +1,7 @@ package core import ( + cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/consensus" "github.com/tendermint/tendermint/crypto" dbm "github.com/tendermint/tendermint/libs/db" @@ -71,6 +72,8 @@ var ( mempool *mempl.Mempool logger log.Logger + + config cfg.RPCConfig ) func SetStateDB(db dbm.DB) { @@ -133,6 +136,11 @@ func SetEventBus(b *types.EventBus) { eventBus = b } +// SetConfig sets an RPCConfig. +func SetConfig(c cfg.RPCConfig) { + config = c +} + func validatePage(page, perPage, totalCount int) int { if perPage < 1 { return 1 diff --git a/rpc/core/status.go b/rpc/core/status.go index ae22ecd3..aab86466 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -7,6 +7,7 @@ import ( cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p" ctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/lib/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -70,7 +71,7 @@ import ( // } // } // ``` -func Status() (*ctypes.ResultStatus, error) { +func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { var latestHeight int64 if consensusReactor.FastSync() { latestHeight = blockStore.Height() diff --git a/rpc/core/tx.go b/rpc/core/tx.go index aa439218..575553f8 100644 --- a/rpc/core/tx.go +++ b/rpc/core/tx.go @@ -7,6 +7,7 @@ import ( tmquery "github.com/tendermint/tendermint/libs/pubsub/query" ctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/lib/types" "github.com/tendermint/tendermint/state/txindex/null" "github.com/tendermint/tendermint/types" ) @@ -77,7 +78,7 @@ import ( // - `index`: `int` - index of the transaction // - `height`: `int` - height of the block where this transaction was in // - `hash`: `[]byte` - hash of the transaction -func Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { +func Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { // if index is disabled, return error if _, ok := txIndexer.(*null.TxIndex); ok { @@ -183,7 +184,7 @@ func Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { // - `index`: `int` - index of the transaction // - `height`: `int` - height of the block where this transaction was in // - `hash`: `[]byte` - hash of the transaction -func TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error) { +func TxSearch(ctx *rpctypes.Context, query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error) { // if index is disabled, return error if _, ok := txIndexer.(*null.TxIndex); ok { return nil, fmt.Errorf("Transaction indexing is disabled") diff --git a/rpc/grpc/api.go b/rpc/grpc/api.go index 0b840e3e..741d63af 100644 --- a/rpc/grpc/api.go +++ b/rpc/grpc/api.go @@ -5,6 +5,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" core "github.com/tendermint/tendermint/rpc/core" + rpctypes "github.com/tendermint/tendermint/rpc/lib/types" ) type broadcastAPI struct { @@ -16,12 +17,14 @@ func (bapi *broadcastAPI) Ping(ctx context.Context, req *RequestPing) (*Response } func (bapi *broadcastAPI) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { - res, err := core.BroadcastTxCommit(req.Tx) + // NOTE: there's no way to get client's remote address + // see https://stackoverflow.com/questions/33684570/session-and-remote-ip-address-in-grpc-go + res, err := core.BroadcastTxCommit(&rpctypes.Context{}, req.Tx) if err != nil { return nil, err } - return &ResponseBroadcastTx{ + return &ResponseBroadcastTx{ CheckTx: &abci.ResponseCheckTx{ Code: res.CheckTx.Code, Data: res.CheckTx.Data, diff --git a/rpc/grpc/client_server.go b/rpc/grpc/client_server.go index 2bc89864..922016dd 100644 --- a/rpc/grpc/client_server.go +++ b/rpc/grpc/client_server.go @@ -14,7 +14,8 @@ type Config struct { MaxOpenConnections int } -// StartGRPCServer starts a new gRPC BroadcastAPIServer using the given net.Listener. +// StartGRPCServer starts a new gRPC BroadcastAPIServer using the given +// net.Listener. // NOTE: This function blocks - you may want to call it in a go-routine. func StartGRPCServer(ln net.Listener) error { grpcServer := grpc.NewServer() diff --git a/rpc/grpc/grpc_test.go b/rpc/grpc/grpc_test.go index b82e5222..1e1f2540 100644 --- a/rpc/grpc/grpc_test.go +++ b/rpc/grpc/grpc_test.go @@ -25,9 +25,8 @@ func TestMain(m *testing.M) { } func TestBroadcastTx(t *testing.T) { - require := require.New(t) res, err := rpctest.GetGRPCClient().BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{Tx: []byte("this is a tx")}) - require.Nil(err, "%+v", err) - require.EqualValues(0, res.CheckTx.Code) - require.EqualValues(0, res.DeliverTx.Code) + require.NoError(t, err) + require.EqualValues(t, 0, res.CheckTx.Code) + require.EqualValues(t, 0, res.DeliverTx.Code) } diff --git a/rpc/lib/rpc_test.go b/rpc/lib/rpc_test.go index 794ab462..68c134a7 100644 --- a/rpc/lib/rpc_test.go +++ b/rpc/lib/rpc_test.go @@ -63,23 +63,23 @@ var Routes = map[string]*server.RPCFunc{ // Amino codec required to encode/decode everything above. var RoutesCdc = amino.NewCodec() -func EchoResult(v string) (*ResultEcho, error) { +func EchoResult(ctx *types.Context, v string) (*ResultEcho, error) { return &ResultEcho{v}, nil } -func EchoWSResult(wsCtx types.WSRPCContext, v string) (*ResultEcho, error) { +func EchoWSResult(ctx *types.Context, v string) (*ResultEcho, error) { return &ResultEcho{v}, nil } -func EchoIntResult(v int) (*ResultEchoInt, error) { +func EchoIntResult(ctx *types.Context, v int) (*ResultEchoInt, error) { return &ResultEchoInt{v}, nil } -func EchoBytesResult(v []byte) (*ResultEchoBytes, error) { +func EchoBytesResult(ctx *types.Context, v []byte) (*ResultEchoBytes, error) { return &ResultEchoBytes{v}, nil } -func EchoDataBytesResult(v cmn.HexBytes) (*ResultEchoDataBytes, error) { +func EchoDataBytesResult(ctx *types.Context, v cmn.HexBytes) (*ResultEchoDataBytes, error) { return &ResultEchoDataBytes{v}, nil } diff --git a/rpc/lib/server/handlers.go b/rpc/lib/server/handlers.go index 80eb4308..36ea47da 100644 --- a/rpc/lib/server/handlers.go +++ b/rpc/lib/server/handlers.go @@ -2,7 +2,6 @@ package rpcserver import ( "bytes" - "context" "encoding/hex" "encoding/json" "fmt" @@ -129,20 +128,26 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, cdc *amino.Codec, logger lo WriteRPCResponseHTTP(w, types.RPCInvalidRequestError(request.ID, errors.Errorf("Path %s is invalid", r.URL.Path))) return } + rpcFunc := funcMap[request.Method] if rpcFunc == nil || rpcFunc.ws { WriteRPCResponseHTTP(w, types.RPCMethodNotFoundError(request.ID)) return } - var args []reflect.Value + + ctx := &types.Context{JSONReq: &request, HTTPReq: r} + args := []reflect.Value{reflect.ValueOf(ctx)} if len(request.Params) > 0 { - args, err = jsonParamsToArgsRPC(rpcFunc, cdc, request.Params) + fnArgs, err := jsonParamsToArgs(rpcFunc, cdc, request.Params) if err != nil { WriteRPCResponseHTTP(w, types.RPCInvalidParamsError(request.ID, errors.Wrap(err, "Error converting json params to arguments"))) return } + args = append(args, fnArgs...) } + returns := rpcFunc.f.Call(args) + logger.Info("HTTPJSONRPC", "method", request.Method, "args", args, "returns", returns) result, err := unreflectResult(returns) if err != nil { @@ -205,13 +210,14 @@ func arrayParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, params []json.RawMess return values, nil } -// `raw` is unparsed json (from json.RawMessage) encoding either a map or an array. -// `argsOffset` should be 0 for RPC calls, and 1 for WS requests, where len(rpcFunc.args) != len(rpcFunc.argNames). +// raw is unparsed json (from json.RawMessage) encoding either a map or an +// array. // // Example: -// rpcFunc.args = [rpctypes.WSRPCContext string] +// rpcFunc.args = [rpctypes.Context string] // rpcFunc.argNames = ["arg"] -func jsonParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, raw []byte, argsOffset int) ([]reflect.Value, error) { +func jsonParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, raw []byte) ([]reflect.Value, error) { + const argsOffset = 1 // TODO: Make more efficient, perhaps by checking the first character for '{' or '['? // First, try to get the map. @@ -232,20 +238,6 @@ func jsonParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, raw []byte, argsOffset return nil, errors.Errorf("Unknown type for JSON params: %v. Expected map or array", err) } -// Convert a []interface{} OR a map[string]interface{} to properly typed values -func jsonParamsToArgsRPC(rpcFunc *RPCFunc, cdc *amino.Codec, params json.RawMessage) ([]reflect.Value, error) { - return jsonParamsToArgs(rpcFunc, cdc, params, 0) -} - -// Same as above, but with the first param the websocket connection -func jsonParamsToArgsWS(rpcFunc *RPCFunc, cdc *amino.Codec, params json.RawMessage, wsCtx types.WSRPCContext) ([]reflect.Value, error) { - values, err := jsonParamsToArgs(rpcFunc, cdc, params, 1) - if err != nil { - return nil, err - } - return append([]reflect.Value{reflect.ValueOf(wsCtx)}, values...), nil -} - // rpc.json //----------------------------------------------------------------------------- // rpc.http @@ -258,15 +250,23 @@ func makeHTTPHandler(rpcFunc *RPCFunc, cdc *amino.Codec, logger log.Logger) func WriteRPCResponseHTTP(w, types.RPCMethodNotFoundError(types.JSONRPCStringID(""))) } } + // All other endpoints return func(w http.ResponseWriter, r *http.Request) { logger.Debug("HTTP HANDLER", "req", r) - args, err := httpParamsToArgs(rpcFunc, cdc, r) + + ctx := &types.Context{HTTPReq: r} + args := []reflect.Value{reflect.ValueOf(ctx)} + + fnArgs, err := httpParamsToArgs(rpcFunc, cdc, r) if err != nil { WriteRPCResponseHTTP(w, types.RPCInvalidParamsError(types.JSONRPCStringID(""), errors.Wrap(err, "Error converting http params to arguments"))) return } + args = append(args, fnArgs...) + returns := rpcFunc.f.Call(args) + logger.Info("HTTPRestRPC", "method", r.URL.Path, "args", args, "returns", returns) result, err := unreflectResult(returns) if err != nil { @@ -280,10 +280,13 @@ func makeHTTPHandler(rpcFunc *RPCFunc, cdc *amino.Codec, logger log.Logger) func // Covert an http query to a list of properly typed values. // To be properly decoded the arg must be a concrete type from tendermint (if its an interface). func httpParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, r *http.Request) ([]reflect.Value, error) { - values := make([]reflect.Value, len(rpcFunc.args)) + // skip types.Context + const argsOffset = 1 + + values := make([]reflect.Value, len(rpcFunc.argNames)) for i, name := range rpcFunc.argNames { - argType := rpcFunc.args[i] + argType := rpcFunc.args[i+argsOffset] values[i] = reflect.Zero(argType) // set default for that type @@ -434,8 +437,8 @@ type wsConnection struct { // Send pings to server with this period. Must be less than readWait, but greater than zero. pingPeriod time.Duration - // object that is used to subscribe / unsubscribe from events - eventSub types.EventSubscriber + // callback which is called upon disconnect + onDisconnect func(remoteAddr string) } // NewWSConnection wraps websocket.Conn. @@ -468,12 +471,11 @@ func NewWSConnection( return wsc } -// EventSubscriber sets object that is used to subscribe / unsubscribe from -// events - not Goroutine-safe. If none given, default node's eventBus will be -// used. -func EventSubscriber(eventSub types.EventSubscriber) func(*wsConnection) { +// OnDisconnect sets a callback which is used upon disconnect - not +// Goroutine-safe. Nop by default. +func OnDisconnect(onDisconnect func(remoteAddr string)) func(*wsConnection) { return func(wsc *wsConnection) { - wsc.eventSub = eventSub + wsc.onDisconnect = onDisconnect } } @@ -527,8 +529,8 @@ func (wsc *wsConnection) OnStop() { // Both read and write loops close the websocket connection when they exit their loops. // The writeChan is never closed, to allow WriteRPCResponse() to fail. - if wsc.eventSub != nil { - wsc.eventSub.UnsubscribeAll(context.TODO(), wsc.remoteAddr) + if wsc.onDisconnect != nil { + wsc.onDisconnect(wsc.remoteAddr) } } @@ -538,11 +540,6 @@ func (wsc *wsConnection) GetRemoteAddr() string { return wsc.remoteAddr } -// GetEventSubscriber implements WSRPCConnection by returning event subscriber. -func (wsc *wsConnection) GetEventSubscriber() types.EventSubscriber { - return wsc.eventSub -} - // WriteRPCResponse pushes a response to the writeChan, and blocks until it is accepted. // It implements WSRPCConnection. It is Goroutine-safe. func (wsc *wsConnection) WriteRPCResponse(resp types.RPCResponse) { @@ -628,27 +625,23 @@ func (wsc *wsConnection) readRoutine() { } // Now, fetch the RPCFunc and execute it. - rpcFunc := wsc.funcMap[request.Method] if rpcFunc == nil { wsc.WriteRPCResponse(types.RPCMethodNotFoundError(request.ID)) continue } - var args []reflect.Value - if rpcFunc.ws { - wsCtx := types.WSRPCContext{Request: request, WSRPCConnection: wsc} - if len(request.Params) > 0 { - args, err = jsonParamsToArgsWS(rpcFunc, wsc.cdc, request.Params, wsCtx) - } - } else { - if len(request.Params) > 0 { - args, err = jsonParamsToArgsRPC(rpcFunc, wsc.cdc, request.Params) + + ctx := &types.Context{JSONReq: &request, WSConn: wsc} + args := []reflect.Value{reflect.ValueOf(ctx)} + if len(request.Params) > 0 { + fnArgs, err := jsonParamsToArgs(rpcFunc, wsc.cdc, request.Params) + if err != nil { + wsc.WriteRPCResponse(types.RPCInternalError(request.ID, errors.Wrap(err, "Error converting json params to arguments"))) + continue } + args = append(args, fnArgs...) } - if err != nil { - wsc.WriteRPCResponse(types.RPCInternalError(request.ID, errors.Wrap(err, "Error converting json params to arguments"))) - continue - } + returns := rpcFunc.f.Call(args) // TODO: Need to encode args/returns to string if we want to log them diff --git a/rpc/lib/server/handlers_test.go b/rpc/lib/server/handlers_test.go index b1d3c788..f8ad0610 100644 --- a/rpc/lib/server/handlers_test.go +++ b/rpc/lib/server/handlers_test.go @@ -28,7 +28,7 @@ import ( func testMux() *http.ServeMux { funcMap := map[string]*rs.RPCFunc{ - "c": rs.NewRPCFunc(func(s string, i int) (string, error) { return "foo", nil }, "s,i"), + "c": rs.NewRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"), } cdc := amino.NewCodec() mux := http.NewServeMux() @@ -195,7 +195,7 @@ func TestWebsocketManagerHandler(t *testing.T) { func newWSServer() *httptest.Server { funcMap := map[string]*rs.RPCFunc{ - "c": rs.NewWSRPCFunc(func(wsCtx types.WSRPCContext, s string, i int) (string, error) { return "foo", nil }, "s,i"), + "c": rs.NewWSRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"), } wm := rs.NewWebsocketManager(funcMap, amino.NewCodec()) wm.SetLogger(log.TestingLogger()) diff --git a/rpc/lib/server/parse_test.go b/rpc/lib/server/parse_test.go index 7b0aacdb..9196bb71 100644 --- a/rpc/lib/server/parse_test.go +++ b/rpc/lib/server/parse_test.go @@ -10,24 +10,23 @@ import ( "github.com/stretchr/testify/assert" amino "github.com/tendermint/go-amino" cmn "github.com/tendermint/tendermint/libs/common" + types "github.com/tendermint/tendermint/rpc/lib/types" ) func TestParseJSONMap(t *testing.T) { - assert := assert.New(t) - input := []byte(`{"value":"1234","height":22}`) // naive is float,string var p1 map[string]interface{} err := json.Unmarshal(input, &p1) - if assert.Nil(err) { + if assert.Nil(t, err) { h, ok := p1["height"].(float64) - if assert.True(ok, "%#v", p1["height"]) { - assert.EqualValues(22, h) + if assert.True(t, ok, "%#v", p1["height"]) { + assert.EqualValues(t, 22, h) } v, ok := p1["value"].(string) - if assert.True(ok, "%#v", p1["value"]) { - assert.EqualValues("1234", v) + if assert.True(t, ok, "%#v", p1["value"]) { + assert.EqualValues(t, "1234", v) } } @@ -38,14 +37,14 @@ func TestParseJSONMap(t *testing.T) { "height": &tmp, } err = json.Unmarshal(input, &p2) - if assert.Nil(err) { + if assert.Nil(t, err) { h, ok := p2["height"].(float64) - if assert.True(ok, "%#v", p2["height"]) { - assert.EqualValues(22, h) + if assert.True(t, ok, "%#v", p2["height"]) { + assert.EqualValues(t, 22, h) } v, ok := p2["value"].(string) - if assert.True(ok, "%#v", p2["value"]) { - assert.EqualValues("1234", v) + if assert.True(t, ok, "%#v", p2["value"]) { + assert.EqualValues(t, "1234", v) } } @@ -60,14 +59,14 @@ func TestParseJSONMap(t *testing.T) { Value: &cmn.HexBytes{}, } err = json.Unmarshal(input, &p3) - if assert.Nil(err) { + if assert.Nil(t, err) { h, ok := p3.Height.(*int) - if assert.True(ok, "%#v", p3.Height) { - assert.Equal(22, *h) + if assert.True(t, ok, "%#v", p3.Height) { + assert.Equal(t, 22, *h) } v, ok := p3.Value.(*cmn.HexBytes) - if assert.True(ok, "%#v", p3.Value) { - assert.EqualValues([]byte{0x12, 0x34}, *v) + if assert.True(t, ok, "%#v", p3.Value) { + assert.EqualValues(t, []byte{0x12, 0x34}, *v) } } @@ -77,46 +76,44 @@ func TestParseJSONMap(t *testing.T) { Height int `json:"height"` }{} err = json.Unmarshal(input, &p4) - if assert.Nil(err) { - assert.EqualValues(22, p4.Height) - assert.EqualValues([]byte{0x12, 0x34}, p4.Value) + if assert.Nil(t, err) { + assert.EqualValues(t, 22, p4.Height) + assert.EqualValues(t, []byte{0x12, 0x34}, p4.Value) } // so, let's use this trick... // dynamic keys on map, and we can deserialize to the desired types var p5 map[string]*json.RawMessage err = json.Unmarshal(input, &p5) - if assert.Nil(err) { + if assert.Nil(t, err) { var h int err = json.Unmarshal(*p5["height"], &h) - if assert.Nil(err) { - assert.Equal(22, h) + if assert.Nil(t, err) { + assert.Equal(t, 22, h) } var v cmn.HexBytes err = json.Unmarshal(*p5["value"], &v) - if assert.Nil(err) { - assert.Equal(cmn.HexBytes{0x12, 0x34}, v) + if assert.Nil(t, err) { + assert.Equal(t, cmn.HexBytes{0x12, 0x34}, v) } } } func TestParseJSONArray(t *testing.T) { - assert := assert.New(t) - input := []byte(`["1234",22]`) // naive is float,string var p1 []interface{} err := json.Unmarshal(input, &p1) - if assert.Nil(err) { + if assert.Nil(t, err) { v, ok := p1[0].(string) - if assert.True(ok, "%#v", p1[0]) { - assert.EqualValues("1234", v) + if assert.True(t, ok, "%#v", p1[0]) { + assert.EqualValues(t, "1234", v) } h, ok := p1[1].(float64) - if assert.True(ok, "%#v", p1[1]) { - assert.EqualValues(22, h) + if assert.True(t, ok, "%#v", p1[1]) { + assert.EqualValues(t, 22, h) } } @@ -124,22 +121,20 @@ func TestParseJSONArray(t *testing.T) { tmp := 0 p2 := []interface{}{&cmn.HexBytes{}, &tmp} err = json.Unmarshal(input, &p2) - if assert.Nil(err) { + if assert.Nil(t, err) { v, ok := p2[0].(*cmn.HexBytes) - if assert.True(ok, "%#v", p2[0]) { - assert.EqualValues([]byte{0x12, 0x34}, *v) + if assert.True(t, ok, "%#v", p2[0]) { + assert.EqualValues(t, []byte{0x12, 0x34}, *v) } h, ok := p2[1].(*int) - if assert.True(ok, "%#v", p2[1]) { - assert.EqualValues(22, *h) + if assert.True(t, ok, "%#v", p2[1]) { + assert.EqualValues(t, 22, *h) } } } func TestParseJSONRPC(t *testing.T) { - assert := assert.New(t) - - demo := func(height int, name string) {} + demo := func(ctx *types.Context, height int, name string) {} call := NewRPCFunc(demo, "height,name") cdc := amino.NewCodec() @@ -162,14 +157,14 @@ func TestParseJSONRPC(t *testing.T) { for idx, tc := range cases { i := strconv.Itoa(idx) data := []byte(tc.raw) - vals, err := jsonParamsToArgs(call, cdc, data, 0) + vals, err := jsonParamsToArgs(call, cdc, data) if tc.fail { - assert.NotNil(err, i) + assert.NotNil(t, err, i) } else { - assert.Nil(err, "%s: %+v", i, err) - if assert.Equal(2, len(vals), i) { - assert.Equal(tc.height, vals[0].Int(), i) - assert.Equal(tc.name, vals[1].String(), i) + assert.Nil(t, err, "%s: %+v", i, err) + if assert.Equal(t, 2, len(vals), i) { + assert.Equal(t, tc.height, vals[0].Int(), i) + assert.Equal(t, tc.name, vals[1].String(), i) } } @@ -177,8 +172,7 @@ func TestParseJSONRPC(t *testing.T) { } func TestParseURI(t *testing.T) { - - demo := func(height int, name string) {} + demo := func(ctx *types.Context, height int, name string) {} call := NewRPCFunc(demo, "height,name") cdc := amino.NewCodec() diff --git a/rpc/lib/test/main.go b/rpc/lib/test/main.go index b2f94580..3afc1ac1 100644 --- a/rpc/lib/test/main.go +++ b/rpc/lib/test/main.go @@ -6,16 +6,18 @@ import ( "os" amino "github.com/tendermint/go-amino" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" rpcserver "github.com/tendermint/tendermint/rpc/lib/server" + rpctypes "github.com/tendermint/tendermint/rpc/lib/types" ) var routes = map[string]*rpcserver.RPCFunc{ "hello_world": rpcserver.NewRPCFunc(HelloWorld, "name,num"), } -func HelloWorld(name string, num int) (Result, error) { +func HelloWorld(ctx *rpctypes.Context, name string, num int) (Result, error) { return Result{fmt.Sprintf("hi %s %d", name, num)}, nil } diff --git a/rpc/lib/types/types.go b/rpc/lib/types/types.go index d4e82b10..21623e41 100644 --- a/rpc/lib/types/types.go +++ b/rpc/lib/types/types.go @@ -1,18 +1,15 @@ package rpctypes import ( - "context" "encoding/json" "fmt" + "net/http" "reflect" "strings" "github.com/pkg/errors" amino "github.com/tendermint/go-amino" - - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - tmtypes "github.com/tendermint/tendermint/types" ) // a wrapper to emulate a sum type: jsonrpcid = string | int @@ -236,30 +233,47 @@ func RPCServerError(id jsonrpcid, err error) RPCResponse { //---------------------------------------- -// *wsConnection implements this interface. +// WSRPCConnection represents a websocket connection. type WSRPCConnection interface { + // GetRemoteAddr returns a remote address of the connection. GetRemoteAddr() string + // WriteRPCResponse writes the resp onto connection (BLOCKING). WriteRPCResponse(resp RPCResponse) + // TryWriteRPCResponse tries to write the resp onto connection (NON-BLOCKING). TryWriteRPCResponse(resp RPCResponse) bool - GetEventSubscriber() EventSubscriber + // Codec returns an Amino codec used. Codec() *amino.Codec } -// websocket-only RPCFuncs take this as the first parameter. -type WSRPCContext struct { - Request RPCRequest - WSRPCConnection +// Context is the first parameter for all functions. It carries a json-rpc +// request, http request and websocket connection. +// +// - JSONReq is non-nil when JSONRPC is called over websocket or HTTP. +// - WSConn is non-nil when we're connected via a websocket. +// - HTTPReq is non-nil when URI or JSONRPC is called over HTTP. +type Context struct { + // json-rpc request + JSONReq *RPCRequest + // websocket connection + WSConn WSRPCConnection + // http request + HTTPReq *http.Request } -// EventSubscriber mirrors tendermint/tendermint/types.EventBusSubscriber -type EventSubscriber interface { - Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, outCapacity ...int) (tmtypes.Subscription, error) - Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error - UnsubscribeAll(ctx context.Context, subscriber string) error +// RemoteAddr returns either HTTPReq#RemoteAddr or result of the +// WSConn#GetRemoteAddr(). +func (ctx *Context) RemoteAddr() string { + if ctx.HTTPReq != nil { + return ctx.HTTPReq.RemoteAddr + } else if ctx.WSConn != nil { + return ctx.WSConn.GetRemoteAddr() + } + return "" } //---------------------------------------- // SOCKETS + // // Determine if its a unix or tcp socket. // If tcp, must specify the port; `0.0.0.0` will return incorrectly as "unix" since there's no port diff --git a/types/event_bus.go b/types/event_bus.go index 2aa84a4a..da959090 100644 --- a/types/event_bus.go +++ b/types/event_bus.go @@ -15,6 +15,9 @@ type EventBusSubscriber interface { Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, outCapacity ...int) (Subscription, error) Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error UnsubscribeAll(ctx context.Context, subscriber string) error + + NumClients() int + NumClientSubscriptions(clientID string) int } type Subscription interface { @@ -58,6 +61,14 @@ func (b *EventBus) OnStop() { b.pubsub.Stop() } +func (b *EventBus) NumClients() int { + return b.pubsub.NumClients() +} + +func (b *EventBus) NumClientSubscriptions(clientID string) int { + return b.pubsub.NumClientSubscriptions(clientID) +} + func (b *EventBus) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, outCapacity ...int) (Subscription, error) { return b.pubsub.Subscribe(ctx, subscriber, query, outCapacity...) } From 303557203402f0a1b6db4a55cb2c6562765058b1 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 11 Mar 2019 22:52:09 +0400 Subject: [PATCH 15/41] cs: comment out log.Error to avoid TestReactorValidatorSetChanges timing out (#3401) --- consensus/state.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/consensus/state.go b/consensus/state.go index d4a12a0c..74ec092f 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -671,8 +671,8 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) { defer cs.mtx.Unlock() var ( - err error added bool + err error ) msg, peerID := mi.Msg, mi.PeerID switch msg := msg.(type) { @@ -714,11 +714,15 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) { // the peer is sending us CatchupCommit precommits. // We could make note of this and help filter in broadcastHasVoteMessage(). default: - cs.Logger.Error("Unknown msg type", reflect.TypeOf(msg)) + cs.Logger.Error("Unknown msg type", "type", reflect.TypeOf(msg)) + return } + if err != nil { - cs.Logger.Error("Error with msg", "height", cs.Height, "round", cs.Round, - "peer", peerID, "err", err, "msg", msg) + // Causes TestReactorValidatorSetChanges to timeout + // https://github.com/tendermint/tendermint/issues/3406 + // cs.Logger.Error("Error with msg", "height", cs.Height, "round", cs.Round, + // "peer", peerID, "err", err, "msg", msg) } } From 676212fa8fdd1ddf29e9c352129c97da3c113769 Mon Sep 17 00:00:00 2001 From: srmo Date: Mon, 11 Mar 2019 20:06:03 +0100 Subject: [PATCH 16/41] cmd: make sure to have 'testnet' create the data directory for nonvals (#3409) Fixes #3408 --- CHANGELOG_PENDING.md | 2 ++ cmd/tendermint/commands/testnet.go | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index a8998c99..c4136b55 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -3,6 +3,7 @@ ** Special thanks to external contributors on this release: +@srmo ### BREAKING CHANGES: @@ -45,3 +46,4 @@ Special thanks to external contributors on this release: - [p2p/conn] \#3347 Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection - [libs/pubsub] \#951, \#1880 use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md) - [p2p] \#3369 do not panic when filter times out +- [cmd] \#3408 Fix `testnet` command's panic when creating non-validator configs (using `--n` flag) (@srmo) diff --git a/cmd/tendermint/commands/testnet.go b/cmd/tendermint/commands/testnet.go index c3ef8619..e34b8d30 100644 --- a/cmd/tendermint/commands/testnet.go +++ b/cmd/tendermint/commands/testnet.go @@ -115,6 +115,12 @@ func testnetFiles(cmd *cobra.Command, args []string) error { return err } + err = os.MkdirAll(filepath.Join(nodeDir, "data"), nodeDirPerm) + if err != nil { + _ = os.RemoveAll(outputDir) + return err + } + initFilesWithConfig(config) } From ad3e990c6a424d939b181a2a7c911eb4592c0c79 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 11 Mar 2019 23:59:00 +0400 Subject: [PATCH 17/41] fix GO_VERSION in installation scripts (#3411) there is no such file https://storage.googleapis.com/golang/go1.12.0.linux-amd64.tar.gz Fixes #3405 --- scripts/install/install_tendermint_arm.sh | 2 +- scripts/install/install_tendermint_bsd.sh | 2 +- scripts/install/install_tendermint_ubuntu.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/install/install_tendermint_arm.sh b/scripts/install/install_tendermint_arm.sh index 703a736f..b260d8d0 100644 --- a/scripts/install/install_tendermint_arm.sh +++ b/scripts/install/install_tendermint_arm.sh @@ -5,7 +5,7 @@ REPO=github.com/tendermint/tendermint # change this to a specific release or branch BRANCH=master -GO_VERSION=1.12.0 +GO_VERSION=1.12 sudo apt-get update -y diff --git a/scripts/install/install_tendermint_bsd.sh b/scripts/install/install_tendermint_bsd.sh index ebada72e..b76b9485 100644 --- a/scripts/install/install_tendermint_bsd.sh +++ b/scripts/install/install_tendermint_bsd.sh @@ -16,7 +16,7 @@ set BRANCH=master set REPO=github.com/tendermint/tendermint -set GO_VERSION=1.12.0 +set GO_VERSION=1.12 sudo pkg update diff --git a/scripts/install/install_tendermint_ubuntu.sh b/scripts/install/install_tendermint_ubuntu.sh index 20e61129..3fe6ea8e 100644 --- a/scripts/install/install_tendermint_ubuntu.sh +++ b/scripts/install/install_tendermint_ubuntu.sh @@ -13,7 +13,7 @@ REPO=github.com/tendermint/tendermint # change this to a specific release or branch BRANCH=master -GO_VERSION=1.12.0 +GO_VERSION=1.12 sudo apt-get update -y sudo apt-get install -y make From e42f833fd4ce6cb886dbf594dcb2f3af61918325 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 12 Mar 2019 16:20:59 +0400 Subject: [PATCH 18/41] Merge master back to develop (#3412) * libs/db: close batch (#3397) ClevelDB requires closing when WriteBatch is no longer needed, https://godoc.org/github.com/jmhodges/levigo#WriteBatch.Close Fixes the memory leak in https://github.com/cosmos/cosmos-sdk/issues/3842 * update changelog and bump version to 0.30.2 --- CHANGELOG.md | 19 +++++++++++++++++++ version/version.go | 2 +- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 42e8761a..44ecdf38 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## v0.30.2 + +*March 10th, 2019* + +This release fixes a CLevelDB memory leak. It was happening because we were not +closing the WriteBatch object after use. See [levigo's +godoc](https://godoc.org/github.com/jmhodges/levigo#WriteBatch.Close) for the +Close method. Special thanks goes to @Stumble who both reported an issue in +[cosmos-sdk](https://github.com/cosmos/cosmos-sdk/issues/3842) and provided a +fix here. + +### BREAKING CHANGES: + +* Go API +- [libs/db] [\#3842](https://github.com/cosmos/cosmos-sdk/issues/3842) Add Close() method to Batch interface (@Stumble) + +### BUG FIXES: +- [libs/db] [\#3842](https://github.com/cosmos/cosmos-sdk/issues/3842) Fix CLevelDB memory leak (@Stumble) + ## v0.30.1 *February 20th, 2019* diff --git a/version/version.go b/version/version.go index 1f30978c..1b0a36ae 100644 --- a/version/version.go +++ b/version/version.go @@ -20,7 +20,7 @@ const ( // Must be a string because scripts like dist.sh read this file. // XXX: Don't change the name of this variable or you will break // automation :) - TMCoreSemVer = "0.30.1" + TMCoreSemVer = "0.30.2" // ABCISemVer is the semantic version of the ABCI library ABCISemVer = "0.15.0" From 85c023db88a8517587f0e8499b1e89f0eea1112d Mon Sep 17 00:00:00 2001 From: Ismail Khoffi Date: Tue, 12 Mar 2019 20:07:26 +0100 Subject: [PATCH 19/41] Prep release v0.31.0: - update changelog, reset pending - bump versions - add external contributors (partly manually) --- CHANGELOG.md | 50 ++++++++++++++++++++++++++++++++++++++++++++ CHANGELOG_PENDING.md | 31 ++------------------------- version/version.go | 2 +- 3 files changed, 53 insertions(+), 30 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 44ecdf38..20e5a6fb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,55 @@ # Changelog +## v0.31.0 + +*March 13th, 2019* + +Special thanks to external contributors on this release: +@danil-lashin, @guagualvcha, @jleni, @siburu, @silasdavis, @srmo, @Stumble, @svenstaro + +### BREAKING CHANGES: + +* CLI/RPC/Config +- [rpc/client] Update Subscribe interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md) + +* Apps + +* Go API +- [libs/common] TrapSignal accepts logger as a first parameter and does not block anymore + * previously it was dumping "captured ..." msg to os.Stdout + * TrapSignal should not be responsible for blocking thread of execution + +* Blockchain Protocol + +* P2P Protocol + +### FEATURES: +- [mempool] [\#3079](https://github.com/tendermint/tendermint/issues/3079) bound mempool memory usage (`mempool.max_txs_bytes` is set to 1GB by default; see config.toml) + mempool's current `txs_total_bytes` is exposed via `total_bytes` field in + `/num_unconfirmed_txs` and `/unconfirmed_txs` RPC endpoints. +- [config] [\#2920](https://github.com/tendermint/tendermint/issues/2920) Remove `consensus.blocktime_iota` parameter +- [genesis] [\#2920](https://github.com/tendermint/tendermint/issues/2920) Add `time_iota_ms` to block's consensus parameters (not exposed to the application) +- [genesis] [\#2920](https://github.com/tendermint/tendermint/issues/2920) Rename `consensus_params.block_size` to `consensus_params.block` +- [lite] add `/unsubscribe_all` endpoint, which allows you to unsubscribe from all events + +### IMPROVEMENTS: +- [libs/common] [\#3238](https://github.com/tendermint/tendermint/issues/3238) exit with zero (0) code upon receiving SIGTERM/SIGINT +- [libs/db] [\#3378](https://github.com/tendermint/tendermint/issues/3378) CLevelDB#Stats now returns the following properties: + - leveldb.num-files-at-level{n} + - leveldb.stats + - leveldb.sstables + - leveldb.blockpool + - leveldb.cachedblock + - leveldb.openedtables + - leveldb.alivesnaps + - leveldb.aliveiters + +### BUG FIXES: +- [p2p/conn] [\#3347](https://github.com/tendermint/tendermint/issues/3347) Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection +- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md) +- [p2p] [\#3369](https://github.com/tendermint/tendermint/issues/3369) do not panic when filter times out +- [cmd] [\#3408](https://github.com/tendermint/tendermint/issues/3408) Fix `testnet` command's panic when creating non-validator configs (using `--n` flag) (@srmo) + ## v0.30.2 *March 10th, 2019* diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index c4136b55..470282aa 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -1,49 +1,22 @@ -## v0.31.0 +## v0.32.0 ** -Special thanks to external contributors on this release: -@srmo - ### BREAKING CHANGES: * CLI/RPC/Config -- [rpc/client] Update Subscribe interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md) * Apps * Go API -- [libs/common] TrapSignal accepts logger as a first parameter and does not block anymore - * previously it was dumping "captured ..." msg to os.Stdout - * TrapSignal should not be responsible for blocking thread of execution * Blockchain Protocol * P2P Protocol ### FEATURES: -- [mempool] \#3079 bound mempool memory usage (`mempool.max_txs_bytes` is set to 1GB by default; see config.toml) - mempool's current `txs_total_bytes` is exposed via `total_bytes` field in - `/num_unconfirmed_txs` and `/unconfirmed_txs` RPC endpoints. -- [config] \#2920 Remove `consensus.blocktime_iota` parameter -- [genesis] \#2920 Add `time_iota_ms` to block's consensus parameters (not exposed to the application) -- [genesis] \#2920 Rename `consensus_params.block_size` to `consensus_params.block` -- [lite] add `/unsubscribe_all` endpoint, which allows you to unsubscribe from all events ### IMPROVEMENTS: -- [libs/common] \#3238 exit with zero (0) code upon receiving SIGTERM/SIGINT -- [libs/db] \#3378 CLevelDB#Stats now returns the following properties: - - leveldb.num-files-at-level{n} - - leveldb.stats - - leveldb.sstables - - leveldb.blockpool - - leveldb.cachedblock - - leveldb.openedtables - - leveldb.alivesnaps - - leveldb.aliveiters ### BUG FIXES: -- [p2p/conn] \#3347 Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection -- [libs/pubsub] \#951, \#1880 use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md) -- [p2p] \#3369 do not panic when filter times out -- [cmd] \#3408 Fix `testnet` command's panic when creating non-validator configs (using `--n` flag) (@srmo) + diff --git a/version/version.go b/version/version.go index 1b0a36ae..83093958 100644 --- a/version/version.go +++ b/version/version.go @@ -20,7 +20,7 @@ const ( // Must be a string because scripts like dist.sh read this file. // XXX: Don't change the name of this variable or you will break // automation :) - TMCoreSemVer = "0.30.2" + TMCoreSemVer = "0.31.0" // ABCISemVer is the semantic version of the ABCI library ABCISemVer = "0.15.0" From a59930a327065f2d091840586da6c35b6fd07472 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 13 Mar 2019 16:09:05 +0400 Subject: [PATCH 20/41] localnet: fix $LOG variable (#3423) Fixes #3421 Before: it was creating a file named ${LOG:-tendermint.log} in .build/nodeX After: it creates a file named tendermint.log --- docker-compose.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 61862e5c..ccc80204 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -8,7 +8,7 @@ services: - "26656-26657:26656-26657" environment: - ID=0 - - LOG=$${LOG:-tendermint.log} + - LOG=${LOG:-tendermint.log} volumes: - ./build:/tendermint:Z networks: @@ -22,7 +22,7 @@ services: - "26659-26660:26656-26657" environment: - ID=1 - - LOG=$${LOG:-tendermint.log} + - LOG=${LOG:-tendermint.log} volumes: - ./build:/tendermint:Z networks: @@ -34,7 +34,7 @@ services: image: "tendermint/localnode" environment: - ID=2 - - LOG=$${LOG:-tendermint.log} + - LOG=${LOG:-tendermint.log} ports: - "26661-26662:26656-26657" volumes: @@ -48,7 +48,7 @@ services: image: "tendermint/localnode" environment: - ID=3 - - LOG=$${LOG:-tendermint.log} + - LOG=${LOG:-tendermint.log} ports: - "26663-26664:26656-26657" volumes: From 745713330736c5c751450245d88b8037cbee3aa6 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Thu, 14 Mar 2019 15:00:58 +0400 Subject: [PATCH 21/41] grpcdb: close Iterator/ReverseIterator after use (#3424) Fixes #3402 --- CHANGELOG_PENDING.md | 1 + libs/db/remotedb/grpcdb/server.go | 2 ++ 2 files changed, 3 insertions(+) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index c4136b55..e9d4a925 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -47,3 +47,4 @@ Special thanks to external contributors on this release: - [libs/pubsub] \#951, \#1880 use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md) - [p2p] \#3369 do not panic when filter times out - [cmd] \#3408 Fix `testnet` command's panic when creating non-validator configs (using `--n` flag) (@srmo) +- [libs/db/remotedb/grpcdb] \#3402 Close Iterator/ReverseIterator after use diff --git a/libs/db/remotedb/grpcdb/server.go b/libs/db/remotedb/grpcdb/server.go index bfe65e61..a032292b 100644 --- a/libs/db/remotedb/grpcdb/server.go +++ b/libs/db/remotedb/grpcdb/server.go @@ -138,6 +138,7 @@ func (s *server) SetSync(ctx context.Context, in *protodb.Entity) (*protodb.Noth func (s *server) Iterator(query *protodb.Entity, dis protodb.DB_IteratorServer) error { it := s.db.Iterator(query.Start, query.End) + defer it.Close() return s.handleIterator(it, dis.Send) } @@ -162,6 +163,7 @@ func (s *server) handleIterator(it db.Iterator, sendFunc func(*protodb.Iterator) func (s *server) ReverseIterator(query *protodb.Entity, dis protodb.DB_ReverseIteratorServer) error { it := s.db.ReverseIterator(query.Start, query.End) + defer it.Close() return s.handleIterator(it, dis.Send) } From 5483ac6b0a2a75bc140487e8405392533a3f1687 Mon Sep 17 00:00:00 2001 From: Ismail Khoffi Date: Thu, 14 Mar 2019 12:17:49 +0100 Subject: [PATCH 22/41] minor changes / fixes to release 0.31.0 (#3422) * bump ABCIVersion due to renaming BlockSizeParams -> BlockParams (https://github.com/tendermint/tendermint/pull/3417#discussion_r264974791) * Move changelog on consensus params entry to breaking * Add @melekes' suggestion for breaking change in pubsub into upgrading.md * Add changelog entry for #3351 * Add changelog entry for #3358 & #3359 * Add changelog entry for #3397 * remove changelog entry for #3397 (was already released in 0.30.2) * move 3351 to improvements * Update changelog comment --- CHANGELOG.md | 7 ++++++- UPGRADING.md | 18 ++++++++++++++++++ version/version.go | 2 +- 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 20e5a6fb..c73a0254 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,10 +14,13 @@ Special thanks to external contributors on this release: * Apps +- [genesis] [\#2920](https://github.com/tendermint/tendermint/issues/2920) Rename `consensus_params.block_size` to `consensus_params.block` in ABCI ConsensusParams + * Go API - [libs/common] TrapSignal accepts logger as a first parameter and does not block anymore * previously it was dumping "captured ..." msg to os.Stdout * TrapSignal should not be responsible for blocking thread of execution +- [libs/db] [\#3397](https://github.com/tendermint/tendermint/pull/3397) Add possibility to `Close()` `Batch` to prevent memory leak when using ClevelDB. (@Stumble) * Blockchain Protocol @@ -29,7 +32,6 @@ Special thanks to external contributors on this release: `/num_unconfirmed_txs` and `/unconfirmed_txs` RPC endpoints. - [config] [\#2920](https://github.com/tendermint/tendermint/issues/2920) Remove `consensus.blocktime_iota` parameter - [genesis] [\#2920](https://github.com/tendermint/tendermint/issues/2920) Add `time_iota_ms` to block's consensus parameters (not exposed to the application) -- [genesis] [\#2920](https://github.com/tendermint/tendermint/issues/2920) Rename `consensus_params.block_size` to `consensus_params.block` - [lite] add `/unsubscribe_all` endpoint, which allows you to unsubscribe from all events ### IMPROVEMENTS: @@ -43,12 +45,15 @@ Special thanks to external contributors on this release: - leveldb.openedtables - leveldb.alivesnaps - leveldb.aliveiters +- [privval] [\#3351](https://github.com/tendermint/tendermint/pull/3351) First part of larger refactoring that clarifies and separates concerns in the privval package. ### BUG FIXES: - [p2p/conn] [\#3347](https://github.com/tendermint/tendermint/issues/3347) Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection - [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md) - [p2p] [\#3369](https://github.com/tendermint/tendermint/issues/3369) do not panic when filter times out - [cmd] [\#3408](https://github.com/tendermint/tendermint/issues/3408) Fix `testnet` command's panic when creating non-validator configs (using `--n` flag) (@srmo) +- [blockchain] [\#3358](https://github.com/tendermint/tendermint/pull/3358) Fix timer leak in `BlockPool` (@guagualvcha) +- [p2p] [\#3359](https://github.com/tendermint/tendermint/pull/3359) Fix reconnecting report duplicate ID error due to race condition between adding peer to peerSet and starting it (@guagualvcha) ## v0.30.2 diff --git a/UPGRADING.md b/UPGRADING.md index f3fecb5e..ae1c8889 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -3,6 +3,24 @@ This guide provides steps to be followed when you upgrade your applications to a newer version of Tendermint Core. +## v0.31.0 + +Since the pubsub no longer blocks on sending, some WS clients might stop working as expected. +If your WS client is not consuming events fast enough, Tendermint can terminate the subscription. +In this case, the WS client will receive an error with description: + +```json +{ + "jsonrpc": "2.0", + "id": "{ID}#event", + "error": { + "code": -32000, + "msg": "Server error", + "data": "subscription was cancelled (reason: client is not pulling messages fast enough)" // or "subscription was cancelled (reason: Tendermint exited)" + } +} +``` + ## v0.30.0 This release contains a breaking change to both the block and p2p protocols, diff --git a/version/version.go b/version/version.go index 83093958..b2202206 100644 --- a/version/version.go +++ b/version/version.go @@ -23,7 +23,7 @@ const ( TMCoreSemVer = "0.31.0" // ABCISemVer is the semantic version of the ABCI library - ABCISemVer = "0.15.0" + ABCISemVer = "0.16.0" ABCIVersion = ABCISemVer ) From 52c4e15eb25e8a3d6e77f90239710de3655d4ec6 Mon Sep 17 00:00:00 2001 From: Ismail Khoffi Date: Thu, 14 Mar 2019 16:07:06 +0100 Subject: [PATCH 23/41] changelog: more review fixes/release/v0.31.0 (#3427) * Update release summary * Add pubsub config changes * Add link to issue for pubsub changes --- CHANGELOG.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c73a0254..4c41abab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,10 +7,19 @@ Special thanks to external contributors on this release: @danil-lashin, @guagualvcha, @jleni, @siburu, @silasdavis, @srmo, @Stumble, @svenstaro +This release brings pubsub 2.0, limits the mempool size to 1GB (max_txs_bytes) and number of `/subscribe` WebSocket +clients (`max_subscription_clients`) and adds `/unsubscribe_all` endpoint to the lite client. +It also contains many smaller improvements and bug-fixes. +Pubsub 2.0 is an improved version of the older pubsub, which is a) non-blocking b) has nicer API. +Note our HttpClient's interface got updated to reflect the pubsub changes and now also has a better API for WebSocket subscriptions. + ### BREAKING CHANGES: * CLI/RPC/Config - [rpc/client] Update Subscribe interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md) +- [config] [\#2826](https://github.com/tendermint/tendermint/issues/2826) Add `rpc.max_subscription_clients` config parameter to control how many unique clientIDs can `/subscribe` at the same time +- [config] [\#2826](https://github.com/tendermint/tendermint/issues/2826) Add `rpc.max_subscriptions_per_client` config parameter to control how many unique queries a given client can `/subscribe` to +- [config] [\#2826](https://github.com/tendermint/tendermint/issues/2826) Add `rpc.max_subscription_clients` config parameter allowing you to change time to wait for a tx to be committed during `/broadcast_tx_commit` * Apps From 551b6322f5a74f578a8487001e805e4e1da6394d Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sat, 16 Mar 2019 19:24:12 -0400 Subject: [PATCH 24/41] Update v0.31.0 release notes (#3434) * changelog: fix formatting * update release notes * update changelog * linkify * update UPGRADING --- CHANGELOG.md | 78 ++++++++++++++++++++++++++++++++++------------------ UPGRADING.md | 27 ++++++++++++++++-- 2 files changed, 76 insertions(+), 29 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4c41abab..11487947 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,48 +2,70 @@ ## v0.31.0 -*March 13th, 2019* +*March 16th, 2019* Special thanks to external contributors on this release: -@danil-lashin, @guagualvcha, @jleni, @siburu, @silasdavis, @srmo, @Stumble, @svenstaro +@danil-lashin, @guagualvcha, @siburu, @silasdavis, @srmo, @Stumble, @svenstaro -This release brings pubsub 2.0, limits the mempool size to 1GB (max_txs_bytes) and number of `/subscribe` WebSocket -clients (`max_subscription_clients`) and adds `/unsubscribe_all` endpoint to the lite client. -It also contains many smaller improvements and bug-fixes. -Pubsub 2.0 is an improved version of the older pubsub, which is a) non-blocking b) has nicer API. -Note our HttpClient's interface got updated to reflect the pubsub changes and now also has a better API for WebSocket subscriptions. +This release is primarily about the new pubsub implementation, dubbed `pubsub 2.0`, and related changes, +like configurable limits on the number of active RPC subscriptions at a time (`max_subscription_clients`). +Pubsub 2.0 is an improved version of the older pubsub that is non-blocking and has a nicer API. +Note the improved pubsub API also resulted in some improvements to the HTTPClient interface and the API for WebSocket subscriptions. +This release also adds a configurable limit to the mempool size, `max_txs_bytes`, with +default 1GB, and includes many smaller improvements and bug-fixes. + +See the [v0.31.0 +Milestone](https://github.com/tendermint/tendermint/milestone/19?closed=1) for +more details. + +Friendly reminder, we have a [bug bounty +program](https://hackerone.com/tendermint). ### BREAKING CHANGES: * CLI/RPC/Config -- [rpc/client] Update Subscribe interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md) -- [config] [\#2826](https://github.com/tendermint/tendermint/issues/2826) Add `rpc.max_subscription_clients` config parameter to control how many unique clientIDs can `/subscribe` at the same time -- [config] [\#2826](https://github.com/tendermint/tendermint/issues/2826) Add `rpc.max_subscriptions_per_client` config parameter to control how many unique queries a given client can `/subscribe` to -- [config] [\#2826](https://github.com/tendermint/tendermint/issues/2826) Add `rpc.max_subscription_clients` config parameter allowing you to change time to wait for a tx to be committed during `/broadcast_tx_commit` + - [config] [\#2920](https://github.com/tendermint/tendermint/issues/2920) Remove `consensus.blocktime_iota` parameter + - [rpc] [\#3227](https://github.com/tendermint/tendermint/issues/3227) New PubSub design does not block on clients when publishing + messages. Slow clients may miss messages and receive an error, terminating + the subscription. + - [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique clientIDs with open subscriptions. Configurable via `rpc.max_subscription_clients` + - [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique queries a given client can subscribe to at once. Configurable via `rpc.max_subscriptions_per_client`. + - [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods. * Apps - -- [genesis] [\#2920](https://github.com/tendermint/tendermint/issues/2920) Rename `consensus_params.block_size` to `consensus_params.block` in ABCI ConsensusParams + - [abci] [\#3403](https://github.com/tendermint/tendermint/issues/3403) Remove `time_iota_ms` from BlockParams. This is a + ConsensusParam but need not be exposed to the app for now. + - [abci] [\#2920](https://github.com/tendermint/tendermint/issues/2920) Rename `consensus_params.block_size` to `consensus_params.block` in ABCI ConsensusParams * Go API -- [libs/common] TrapSignal accepts logger as a first parameter and does not block anymore - * previously it was dumping "captured ..." msg to os.Stdout - * TrapSignal should not be responsible for blocking thread of execution -- [libs/db] [\#3397](https://github.com/tendermint/tendermint/pull/3397) Add possibility to `Close()` `Batch` to prevent memory leak when using ClevelDB. (@Stumble) + - [libs/common] TrapSignal accepts logger as a first parameter and does not block anymore + * previously it was dumping "captured ..." msg to os.Stdout + * TrapSignal should not be responsible for blocking thread of execution + - [libs/db] [\#3397](https://github.com/tendermint/tendermint/pull/3397) Add possibility to `Close()` `Batch` to prevent memory leak when using ClevelDB. (@Stumble) + - [types] [\#3354](https://github.com/tendermint/tendermint/issues/3354) Remove RoundState from EventDataRoundState * Blockchain Protocol * P2P Protocol ### FEATURES: -- [mempool] [\#3079](https://github.com/tendermint/tendermint/issues/3079) bound mempool memory usage (`mempool.max_txs_bytes` is set to 1GB by default; see config.toml) - mempool's current `txs_total_bytes` is exposed via `total_bytes` field in +- [config] [\#3269](https://github.com/tendermint/tendermint/issues/2826) New configuration values for controlling RPC subscriptions: + - `rpc.max_subscription_clients` sets the maximum number of unique clients + with open subscriptions + - `rpc.max_subscriptions_per_client`sets the maximum number of unique + subscriptions from a given client + - `rpc.timeout_broadcast_tx_commit` sets the time to wait for a tx to be committed during `/broadcast_tx_commit` +- [types] [\#2920](https://github.com/tendermint/tendermint/issues/2920) Add `time_iota_ms` to block's consensus parameters (not exposed to the application) +- [lite] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Add `/unsubscribe_all` endpoint to unsubscribe from all events +- [mempool] [\#3079](https://github.com/tendermint/tendermint/issues/3079) Bound mempool memory usage via the `mempool.max_txs_bytes` configuration value. Set to 1GB by default. The mempool's current `txs_total_bytes` is exposed via `total_bytes` field in `/num_unconfirmed_txs` and `/unconfirmed_txs` RPC endpoints. -- [config] [\#2920](https://github.com/tendermint/tendermint/issues/2920) Remove `consensus.blocktime_iota` parameter -- [genesis] [\#2920](https://github.com/tendermint/tendermint/issues/2920) Add `time_iota_ms` to block's consensus parameters (not exposed to the application) -- [lite] add `/unsubscribe_all` endpoint, which allows you to unsubscribe from all events ### IMPROVEMENTS: +- [all] [\#3385](https://github.com/tendermint/tendermint/issues/3385), [\#3386](https://github.com/tendermint/tendermint/issues/3386) Various linting improvements +- [crypto] [\#3371](https://github.com/tendermint/tendermint/issues/3371) Copy in secp256k1 package from go-ethereum instead of importing + go-ethereum (@silasdavis) +- [deps] [\#3382](https://github.com/tendermint/tendermint/issues/3382) Don't pin repos without releases +- [deps] [\#3357](https://github.com/tendermint/tendermint/issues/3357), [\#3389](https://github.com/tendermint/tendermint/issues/3389), [\#3392](https://github.com/tendermint/tendermint/issues/3392) Update gogo/protobuf, golang/protobuf, levigo, golang.org/x/crypto - [libs/common] [\#3238](https://github.com/tendermint/tendermint/issues/3238) exit with zero (0) code upon receiving SIGTERM/SIGINT - [libs/db] [\#3378](https://github.com/tendermint/tendermint/issues/3378) CLevelDB#Stats now returns the following properties: - leveldb.num-files-at-level{n} @@ -57,11 +79,13 @@ Note our HttpClient's interface got updated to reflect the pubsub changes and no - [privval] [\#3351](https://github.com/tendermint/tendermint/pull/3351) First part of larger refactoring that clarifies and separates concerns in the privval package. ### BUG FIXES: -- [p2p/conn] [\#3347](https://github.com/tendermint/tendermint/issues/3347) Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection -- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md) -- [p2p] [\#3369](https://github.com/tendermint/tendermint/issues/3369) do not panic when filter times out -- [cmd] [\#3408](https://github.com/tendermint/tendermint/issues/3408) Fix `testnet` command's panic when creating non-validator configs (using `--n` flag) (@srmo) - [blockchain] [\#3358](https://github.com/tendermint/tendermint/pull/3358) Fix timer leak in `BlockPool` (@guagualvcha) +- [cmd] [\#3408](https://github.com/tendermint/tendermint/issues/3408) Fix `testnet` command's panic when creating non-validator configs (using `--n` flag) (@srmo) +- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md) +- [lite] [\#3364](https://github.com/tendermint/tendermint/issues/3364) Fix `/validators` and `/abci_query` proxy endpoints + (@guagualvcha) +- [p2p/conn] [\#3347](https://github.com/tendermint/tendermint/issues/3347) Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection +- [p2p] [\#3369](https://github.com/tendermint/tendermint/issues/3369) Do not panic when filter times out - [p2p] [\#3359](https://github.com/tendermint/tendermint/pull/3359) Fix reconnecting report duplicate ID error due to race condition between adding peer to peerSet and starting it (@guagualvcha) ## v0.30.2 @@ -78,7 +102,7 @@ fix here. ### BREAKING CHANGES: * Go API -- [libs/db] [\#3842](https://github.com/cosmos/cosmos-sdk/issues/3842) Add Close() method to Batch interface (@Stumble) + - [libs/db] [\#3842](https://github.com/cosmos/cosmos-sdk/issues/3842) Add Close() method to Batch interface (@Stumble) ### BUG FIXES: - [libs/db] [\#3842](https://github.com/cosmos/cosmos-sdk/issues/3842) Fix CLevelDB memory leak (@Stumble) diff --git a/UPGRADING.md b/UPGRADING.md index ae1c8889..eccb954d 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -5,8 +5,15 @@ a newer version of Tendermint Core. ## v0.31.0 -Since the pubsub no longer blocks on sending, some WS clients might stop working as expected. -If your WS client is not consuming events fast enough, Tendermint can terminate the subscription. +This release contains a breaking change to the behaviour of the pubsub system. +It also contains some minor breaking changes in the Go API and ABCI. +There are no changes to the block or p2p protocols, so v0.31.0 should work fine +with blockchains created from the v0.30 series. + +### RPC + +The pubsub no longer blocks on publishing. This may cause some WebSocket (WS) clients to stop working as expected. +If your WS client is not consuming events fast enough, Tendermint can terminate the subscription. In this case, the WS client will receive an error with description: ```json @@ -19,8 +26,24 @@ In this case, the WS client will receive an error with description: "data": "subscription was cancelled (reason: client is not pulling messages fast enough)" // or "subscription was cancelled (reason: Tendermint exited)" } } + +Additionally, there are now limits on the number of subscribers and +subscriptions that can be active at once. See the new +`rpc.max_subscription_clients` and `rpc.max_subscriptions_per_client` values to +configure this. ``` +### Applications + +Simple rename of `ConsensusParams.BlockSize` to `ConsensusParams.Block`. + +The `ConsensusParams.Block.TimeIotaMS` field was also removed. It's configured +in the ConsensusParsm in genesis. + +### Go API + +See the [CHANGELOG](CHANGELOG.md). These are relatively straight forward. + ## v0.30.0 This release contains a breaking change to both the block and p2p protocols, From 4162ebe8b586deccc0e7476d8abafb75138bfe58 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 19 Mar 2019 11:38:32 +0400 Subject: [PATCH 25/41] types: refactor PB2TM.ConsensusParams to take BlockTimeIota as an arg (#3442) See https://github.com/tendermint/tendermint/pull/3403/files#r266208947 In #3403 we unexposed BlockTimeIota from the ABCI, but it's still part of the ConsensusParams struct, so we have to remember to add it back after calling PB2TM.ConsensusParams. Instead, PB2TM.ConsensusParams should take it as an argument Fixes #3432 --- consensus/replay.go | 7 +------ types/protobuf.go | 9 ++++++--- types/protobuf_test.go | 4 +--- 3 files changed, 8 insertions(+), 12 deletions(-) diff --git a/consensus/replay.go b/consensus/replay.go index 6656da62..c8ab8a33 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -324,12 +324,7 @@ func (h *Handshaker) ReplayBlocks( } if res.ConsensusParams != nil { - // Preserve TimeIotaMs since it's not exposed to the application. - timeIotaMs := state.ConsensusParams.Block.TimeIotaMs - { - state.ConsensusParams = types.PB2TM.ConsensusParams(res.ConsensusParams) - } - state.ConsensusParams.Block.TimeIotaMs = timeIotaMs + state.ConsensusParams = types.PB2TM.ConsensusParams(res.ConsensusParams, state.ConsensusParams.Block.TimeIotaMs) } sm.SaveState(h.stateDB, state) } diff --git a/types/protobuf.go b/types/protobuf.go index 8cad4608..e10b9186 100644 --- a/types/protobuf.go +++ b/types/protobuf.go @@ -221,7 +221,9 @@ func (pb2tm) ValidatorUpdates(vals []abci.ValidatorUpdate) ([]*Validator, error) return tmVals, nil } -func (pb2tm) ConsensusParams(csp *abci.ConsensusParams) ConsensusParams { +// BlockParams.TimeIotaMs is not exposed to the application. Therefore a caller +// must provide it. +func (pb2tm) ConsensusParams(csp *abci.ConsensusParams, blockTimeIotaMs int64) ConsensusParams { params := ConsensusParams{ Block: BlockParams{}, Evidence: EvidenceParams{}, @@ -231,8 +233,9 @@ func (pb2tm) ConsensusParams(csp *abci.ConsensusParams) ConsensusParams { // we must defensively consider any structs may be nil if csp.Block != nil { params.Block = BlockParams{ - MaxBytes: csp.Block.MaxBytes, - MaxGas: csp.Block.MaxGas, + MaxBytes: csp.Block.MaxBytes, + MaxGas: csp.Block.MaxGas, + TimeIotaMs: blockTimeIotaMs, } } diff --git a/types/protobuf_test.go b/types/protobuf_test.go index 2e29a502..152c92d1 100644 --- a/types/protobuf_test.go +++ b/types/protobuf_test.go @@ -64,9 +64,7 @@ func TestABCIValidators(t *testing.T) { func TestABCIConsensusParams(t *testing.T) { cp := DefaultConsensusParams() abciCP := TM2PB.ConsensusParams(cp) - cp2 := PB2TM.ConsensusParams(abciCP) - // TimeIotaMs is not exposed to the application. - cp2.Block.TimeIotaMs = cp.Block.TimeIotaMs + cp2 := PB2TM.ConsensusParams(abciCP, cp.Block.TimeIotaMs) assert.Equal(t, *cp, cp2) } From 8e62a3d62a5b04a5a7126d00d75925c990aec9c6 Mon Sep 17 00:00:00 2001 From: Ismail Khoffi Date: Tue, 19 Mar 2019 12:19:02 +0100 Subject: [PATCH 26/41] Add #3421 to changelog and reorder alphabetically --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8f714818..24955b08 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -81,13 +81,14 @@ program](https://hackerone.com/tendermint). ### BUG FIXES: - [blockchain] [\#3358](https://github.com/tendermint/tendermint/pull/3358) Fix timer leak in `BlockPool` (@guagualvcha) - [cmd] [\#3408](https://github.com/tendermint/tendermint/issues/3408) Fix `testnet` command's panic when creating non-validator configs (using `--n` flag) (@srmo) +- [libs/db/remotedb/grpcdb] [\#3402](https://github.com/tendermint/tendermint/issues/3402) Close Iterator/ReverseIterator after use - [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md) - [lite] [\#3364](https://github.com/tendermint/tendermint/issues/3364) Fix `/validators` and `/abci_query` proxy endpoints (@guagualvcha) - [p2p/conn] [\#3347](https://github.com/tendermint/tendermint/issues/3347) Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection - [p2p] [\#3369](https://github.com/tendermint/tendermint/issues/3369) Do not panic when filter times out - [p2p] [\#3359](https://github.com/tendermint/tendermint/pull/3359) Fix reconnecting report duplicate ID error due to race condition between adding peer to peerSet and starting it (@guagualvcha) -- [libs/db/remotedb/grpcdb] [\#3402](https://github.com/tendermint/tendermint/issues/3402) Close Iterator/ReverseIterator after use +- [test] [\#3421](https://github.com/tendermint/tendermint/issues/3421) Fix logfile names created by localnet via docker-compose.yml ## v0.30.2 From e276f35f86b3980e088a95aa70c96dbddcdf658b Mon Sep 17 00:00:00 2001 From: Ismail Khoffi Date: Tue, 19 Mar 2019 14:36:42 +0100 Subject: [PATCH 27/41] remove 3421 from changelog --- CHANGELOG.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 24955b08..8968a7a7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -88,7 +88,6 @@ program](https://hackerone.com/tendermint). - [p2p/conn] [\#3347](https://github.com/tendermint/tendermint/issues/3347) Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection - [p2p] [\#3369](https://github.com/tendermint/tendermint/issues/3369) Do not panic when filter times out - [p2p] [\#3359](https://github.com/tendermint/tendermint/pull/3359) Fix reconnecting report duplicate ID error due to race condition between adding peer to peerSet and starting it (@guagualvcha) -- [test] [\#3421](https://github.com/tendermint/tendermint/issues/3421) Fix logfile names created by localnet via docker-compose.yml ## v0.30.2 From 1e3469789dce5a034a21b6e48288f1809a102595 Mon Sep 17 00:00:00 2001 From: Ismail Khoffi Date: Wed, 20 Mar 2019 00:45:51 +0100 Subject: [PATCH 28/41] Ensure WriteTimeout > TimeoutBroadcastTxCommit (#3443) * Make sure config.TimeoutBroadcastTxCommit < rpcserver.WriteTimeout() * remove redundant comment * libs/rpc/http_server: move Read/WriteTimeout into Config * increase defaults for read/write timeouts Based on this article https://www.digitalocean.com/community/tutorials/how-to-optimize-nginx-configuration * WriteTimeout should be larger than TimeoutBroadcastTxCommit * set a deadline for subscribing to txs * extract duration into const * add two changelog entries * Update CHANGELOG_PENDING.md Co-Authored-By: melekes * Update CHANGELOG_PENDING.md Co-Authored-By: melekes * 12 -> 10 * changelog * changelog --- CHANGELOG.md | 6 +++-- config/config.go | 7 +++-- config/toml.go | 3 +++ docs/tendermint-core/configuration.md | 3 +++ lite/proxy/proxy.go | 6 +++-- node/node.go | 17 +++++++++--- rpc/core/events.go | 3 +-- rpc/core/mempool.go | 2 +- rpc/core/pipe.go | 9 ++++--- rpc/lib/rpc_test.go | 9 ++++--- rpc/lib/server/handlers.go | 18 +++++++++++++ rpc/lib/server/http_server.go | 37 ++++++++++++++++----------- rpc/lib/server/http_server_test.go | 12 ++++++--- rpc/lib/test/main.go | 5 ++-- rpc/lib/types/types.go | 27 +++++++++++++++++-- tools/tm-monitor/rpc.go | 5 ++-- 16 files changed, 123 insertions(+), 46 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8968a7a7..f0ba675a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,8 +11,8 @@ This release is primarily about the new pubsub implementation, dubbed `pubsub 2. like configurable limits on the number of active RPC subscriptions at a time (`max_subscription_clients`). Pubsub 2.0 is an improved version of the older pubsub that is non-blocking and has a nicer API. Note the improved pubsub API also resulted in some improvements to the HTTPClient interface and the API for WebSocket subscriptions. -This release also adds a configurable limit to the mempool size, `max_txs_bytes`, with -default 1GB, and includes many smaller improvements and bug-fixes. +This release also adds a configurable limit to the mempool size (`max_txs_bytes`, default 1GB) +and a configurable timeout for the `/broadcast_tx_commit` endpoint. See the [v0.31.0 Milestone](https://github.com/tendermint/tendermint/milestone/19?closed=1) for @@ -30,6 +30,7 @@ program](https://hackerone.com/tendermint). the subscription. - [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique clientIDs with open subscriptions. Configurable via `rpc.max_subscription_clients` - [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique queries a given client can subscribe to at once. Configurable via `rpc.max_subscriptions_per_client`. + - [rpc] [\#3435](https://github.com/tendermint/tendermint/issues/3435) Default ReadTimeout and WriteTimeout changed to 10s. WriteTimeout can increased by setting `rpc.timeout_broadcast_tx_commit` in the config. - [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods. * Apps @@ -43,6 +44,7 @@ program](https://hackerone.com/tendermint). * TrapSignal should not be responsible for blocking thread of execution - [libs/db] [\#3397](https://github.com/tendermint/tendermint/pull/3397) Add possibility to `Close()` `Batch` to prevent memory leak when using ClevelDB. (@Stumble) - [types] [\#3354](https://github.com/tendermint/tendermint/issues/3354) Remove RoundState from EventDataRoundState + - [rpc] [\#3435](https://github.com/tendermint/tendermint/issues/3435) `StartHTTPServer` / `StartHTTPAndTLSServer` now require a Config (use `rpcserver.DefaultConfig`) * Blockchain Protocol diff --git a/config/config.go b/config/config.go index 540012a5..8342921a 100644 --- a/config/config.go +++ b/config/config.go @@ -7,7 +7,6 @@ import ( "time" "github.com/pkg/errors" - rpcserver "github.com/tendermint/tendermint/rpc/lib/server" ) const ( @@ -336,6 +335,9 @@ type RPCConfig struct { MaxSubscriptionsPerClient int `mapstructure:"max_subscriptions_per_client"` // How long to wait for a tx to be committed during /broadcast_tx_commit + // WARNING: Using a value larger than 10s will result in increasing the + // global HTTP write timeout, which applies to all connections and endpoints. + // See https://github.com/tendermint/tendermint/issues/3435 TimeoutBroadcastTxCommit time.Duration `mapstructure:"timeout_broadcast_tx_commit"` } @@ -385,9 +387,6 @@ func (cfg *RPCConfig) ValidateBasic() error { if cfg.TimeoutBroadcastTxCommit < 0 { return errors.New("timeout_broadcast_tx_commit can't be negative") } - if cfg.TimeoutBroadcastTxCommit > rpcserver.WriteTimeout { - return fmt.Errorf("timeout_broadcast_tx_commit can't be greater than rpc server's write timeout: %v", rpcserver.WriteTimeout) - } return nil } diff --git a/config/toml.go b/config/toml.go index 9ce7e76c..a0b651d9 100644 --- a/config/toml.go +++ b/config/toml.go @@ -176,6 +176,9 @@ max_subscription_clients = {{ .RPC.MaxSubscriptionClients }} max_subscriptions_per_client = {{ .RPC.MaxSubscriptionsPerClient }} # How long to wait for a tx to be committed during /broadcast_tx_commit. +# WARNING: Using a value larger than 10s will result in increasing the +# global HTTP write timeout, which applies to all connections and endpoints. +# See https://github.com/tendermint/tendermint/issues/3435 timeout_broadcast_tx_commit = "{{ .RPC.TimeoutBroadcastTxCommit }}" ##### peer to peer configuration options ##### diff --git a/docs/tendermint-core/configuration.md b/docs/tendermint-core/configuration.md index f1ac753a..aa275c7a 100644 --- a/docs/tendermint-core/configuration.md +++ b/docs/tendermint-core/configuration.md @@ -122,6 +122,9 @@ max_subscription_clients = 100 max_subscriptions_per_client = 5 # How long to wait for a tx to be committed during /broadcast_tx_commit. +# WARNING: Using a value larger than 10s will result in increasing the +# global HTTP write timeout, which applies to all connections and endpoints. +# See https://github.com/tendermint/tendermint/issues/3435 timeout_broadcast_tx_commit = "10s" ##### peer to peer configuration options ##### diff --git a/lite/proxy/proxy.go b/lite/proxy/proxy.go index 020e5753..d3c16d4a 100644 --- a/lite/proxy/proxy.go +++ b/lite/proxy/proxy.go @@ -45,11 +45,13 @@ func StartProxy(c rpcclient.Client, listenAddr string, logger log.Logger, maxOpe core.SetLogger(logger) mux.HandleFunc(wsEndpoint, wm.WebsocketHandler) - l, err := rpcserver.Listen(listenAddr, rpcserver.Config{MaxOpenConnections: maxOpenConnections}) + config := rpcserver.DefaultConfig() + config.MaxOpenConnections = maxOpenConnections + l, err := rpcserver.Listen(listenAddr, config) if err != nil { return err } - return rpcserver.StartHTTPServer(l, mux, logger) + return rpcserver.StartHTTPServer(l, mux, logger, config) } // RPCRoutes just routes everything to the given client, as if it were diff --git a/node/node.go b/node/node.go index f3f9dca3..8f71fa31 100644 --- a/node/node.go +++ b/node/node.go @@ -689,9 +689,18 @@ func (n *Node) startRPC() ([]net.Listener, error) { mux.HandleFunc("/websocket", wm.WebsocketHandler) rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, coreCodec, rpcLogger) + config := rpcserver.DefaultConfig() + config.MaxOpenConnections = n.config.RPC.MaxOpenConnections + // If necessary adjust global WriteTimeout to ensure it's greater than + // TimeoutBroadcastTxCommit. + // See https://github.com/tendermint/tendermint/issues/3435 + if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { + config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second + } + listener, err := rpcserver.Listen( listenAddr, - rpcserver.Config{MaxOpenConnections: n.config.RPC.MaxOpenConnections}, + config, ) if err != nil { return nil, err @@ -711,6 +720,7 @@ func (n *Node) startRPC() ([]net.Listener, error) { listener, rootHandler, rpcLogger, + config, ) listeners[i] = listener } @@ -718,8 +728,9 @@ func (n *Node) startRPC() ([]net.Listener, error) { // we expose a simplified api over grpc for convenience to app devs grpcListenAddr := n.config.RPC.GRPCListenAddress if grpcListenAddr != "" { - listener, err := rpcserver.Listen( - grpcListenAddr, rpcserver.Config{MaxOpenConnections: n.config.RPC.GRPCMaxOpenConnections}) + config := rpcserver.DefaultConfig() + config.MaxOpenConnections = n.config.RPC.MaxOpenConnections + listener, err := rpcserver.Listen(grpcListenAddr, config) if err != nil { return nil, err } diff --git a/rpc/core/events.go b/rpc/core/events.go index 3ea33fa8..6bc5ecc7 100644 --- a/rpc/core/events.go +++ b/rpc/core/events.go @@ -105,8 +105,7 @@ func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, er if err != nil { return nil, errors.Wrap(err, "failed to parse query") } - - subCtx, cancel := context.WithTimeout(context.Background(), subscribeTimeout) + subCtx, cancel := context.WithTimeout(ctx.Context(), SubscribeTimeout) defer cancel() sub, err := eventBus.Subscribe(subCtx, addr, q) if err != nil { diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index 6ebdbcfc..967466e7 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -197,7 +197,7 @@ func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadc } // Subscribe to tx being committed in block. - subCtx, cancel := context.WithTimeout(context.Background(), subscribeTimeout) + subCtx, cancel := context.WithTimeout(ctx.Context(), SubscribeTimeout) defer cancel() q := types.EventQueryTxFor(tx) deliverTxSub, err := eventBus.Subscribe(subCtx, subscriber, q) diff --git a/rpc/core/pipe.go b/rpc/core/pipe.go index 0b760344..ad8afdef 100644 --- a/rpc/core/pipe.go +++ b/rpc/core/pipe.go @@ -1,6 +1,8 @@ package core import ( + "time" + cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/consensus" "github.com/tendermint/tendermint/crypto" @@ -9,7 +11,6 @@ import ( mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/proxy" - rpcserver "github.com/tendermint/tendermint/rpc/lib/server" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/types" @@ -19,9 +20,11 @@ const ( // see README defaultPerPage = 30 maxPerPage = 100 -) -var subscribeTimeout = rpcserver.WriteTimeout / 2 + // SubscribeTimeout is the maximum time we wait to subscribe for an event. + // must be less than the server's write timeout (see rpcserver.DefaultConfig) + SubscribeTimeout = 5 * time.Second +) //---------------------------------------------- // These interfaces are used by RPC and must be thread safe diff --git a/rpc/lib/rpc_test.go b/rpc/lib/rpc_test.go index 68c134a7..3fa4de47 100644 --- a/rpc/lib/rpc_test.go +++ b/rpc/lib/rpc_test.go @@ -121,11 +121,12 @@ func setup() { wm := server.NewWebsocketManager(Routes, RoutesCdc, server.ReadWait(5*time.Second), server.PingPeriod(1*time.Second)) wm.SetLogger(tcpLogger) mux.HandleFunc(websocketEndpoint, wm.WebsocketHandler) - listener1, err := server.Listen(tcpAddr, server.Config{}) + config := server.DefaultConfig() + listener1, err := server.Listen(tcpAddr, config) if err != nil { panic(err) } - go server.StartHTTPServer(listener1, mux, tcpLogger) + go server.StartHTTPServer(listener1, mux, tcpLogger, config) unixLogger := logger.With("socket", "unix") mux2 := http.NewServeMux() @@ -133,11 +134,11 @@ func setup() { wm = server.NewWebsocketManager(Routes, RoutesCdc) wm.SetLogger(unixLogger) mux2.HandleFunc(websocketEndpoint, wm.WebsocketHandler) - listener2, err := server.Listen(unixAddr, server.Config{}) + listener2, err := server.Listen(unixAddr, config) if err != nil { panic(err) } - go server.StartHTTPServer(listener2, mux2, unixLogger) + go server.StartHTTPServer(listener2, mux2, unixLogger, config) // wait for servers to start time.Sleep(time.Second * 2) diff --git a/rpc/lib/server/handlers.go b/rpc/lib/server/handlers.go index 36ea47da..6391b009 100644 --- a/rpc/lib/server/handlers.go +++ b/rpc/lib/server/handlers.go @@ -2,6 +2,7 @@ package rpcserver import ( "bytes" + "context" "encoding/hex" "encoding/json" "fmt" @@ -439,6 +440,9 @@ type wsConnection struct { // callback which is called upon disconnect onDisconnect func(remoteAddr string) + + ctx context.Context + cancel context.CancelFunc } // NewWSConnection wraps websocket.Conn. @@ -532,6 +536,10 @@ func (wsc *wsConnection) OnStop() { if wsc.onDisconnect != nil { wsc.onDisconnect(wsc.remoteAddr) } + + if wsc.ctx != nil { + wsc.cancel() + } } // GetRemoteAddr returns the remote address of the underlying connection. @@ -569,6 +577,16 @@ func (wsc *wsConnection) Codec() *amino.Codec { return wsc.cdc } +// Context returns the connection's context. +// The context is canceled when the client's connection closes. +func (wsc *wsConnection) Context() context.Context { + if wsc.ctx != nil { + return wsc.ctx + } + wsc.ctx, wsc.cancel = context.WithCancel(context.Background()) + return wsc.ctx +} + // Read from the socket and subscribe to or unsubscribe from events func (wsc *wsConnection) readRoutine() { defer func() { diff --git a/rpc/lib/server/http_server.go b/rpc/lib/server/http_server.go index 9db69b6f..c4bb6fa1 100644 --- a/rpc/lib/server/http_server.go +++ b/rpc/lib/server/http_server.go @@ -18,9 +18,23 @@ import ( types "github.com/tendermint/tendermint/rpc/lib/types" ) -// Config is an RPC server configuration. +// Config is a RPC server configuration. type Config struct { + // see netutil.LimitListener MaxOpenConnections int + // mirrors http.Server#ReadTimeout + ReadTimeout time.Duration + // mirrors http.Server#WriteTimeout + WriteTimeout time.Duration +} + +// DefaultConfig returns a default configuration. +func DefaultConfig() *Config { + return &Config{ + MaxOpenConnections: 0, // unlimited + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + } } const ( @@ -30,25 +44,17 @@ const ( // same as the net/http default maxHeaderBytes = 1 << 20 - - // Timeouts for reading/writing to the http connection. - // Public so handlers can read them - - // /broadcast_tx_commit has it's own timeout, which should - // be less than the WriteTimeout here. - // TODO: use a config instead. - ReadTimeout = 3 * time.Second - WriteTimeout = 20 * time.Second ) // StartHTTPServer takes a listener and starts an HTTP server with the given handler. // It wraps handler with RecoverAndLogHandler. // NOTE: This function blocks - you may want to call it in a go-routine. -func StartHTTPServer(listener net.Listener, handler http.Handler, logger log.Logger) error { +func StartHTTPServer(listener net.Listener, handler http.Handler, logger log.Logger, config *Config) error { logger.Info(fmt.Sprintf("Starting RPC HTTP server on %s", listener.Addr())) s := &http.Server{ Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: maxBodyBytes}, logger), - ReadTimeout: ReadTimeout, - WriteTimeout: WriteTimeout, + ReadTimeout: config.ReadTimeout, + WriteTimeout: config.WriteTimeout, MaxHeaderBytes: maxHeaderBytes, } err := s.Serve(listener) @@ -64,13 +70,14 @@ func StartHTTPAndTLSServer( handler http.Handler, certFile, keyFile string, logger log.Logger, + config *Config, ) error { logger.Info(fmt.Sprintf("Starting RPC HTTPS server on %s (cert: %q, key: %q)", listener.Addr(), certFile, keyFile)) s := &http.Server{ Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: maxBodyBytes}, logger), - ReadTimeout: ReadTimeout, - WriteTimeout: WriteTimeout, + ReadTimeout: config.ReadTimeout, + WriteTimeout: config.WriteTimeout, MaxHeaderBytes: maxHeaderBytes, } err := s.ServeTLS(listener, certFile, keyFile) @@ -180,7 +187,7 @@ func (h maxBytesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Listen starts a new net.Listener on the given address. // It returns an error if the address is invalid or the call to Listen() fails. -func Listen(addr string, config Config) (listener net.Listener, err error) { +func Listen(addr string, config *Config) (listener net.Listener, err error) { parts := strings.SplitN(addr, "://", 2) if len(parts) != 2 { return nil, errors.Errorf( diff --git a/rpc/lib/server/http_server_test.go b/rpc/lib/server/http_server_test.go index 6b852afa..7f47a30b 100644 --- a/rpc/lib/server/http_server_test.go +++ b/rpc/lib/server/http_server_test.go @@ -30,10 +30,12 @@ func TestMaxOpenConnections(t *testing.T) { time.Sleep(10 * time.Millisecond) fmt.Fprint(w, "some body") }) - l, err := Listen("tcp://127.0.0.1:0", Config{MaxOpenConnections: max}) + config := DefaultConfig() + config.MaxOpenConnections = max + l, err := Listen("tcp://127.0.0.1:0", config) require.NoError(t, err) defer l.Close() - go StartHTTPServer(l, mux, log.TestingLogger()) + go StartHTTPServer(l, mux, log.TestingLogger(), config) // Make N GET calls to the server. attempts := max * 2 @@ -64,15 +66,17 @@ func TestMaxOpenConnections(t *testing.T) { } func TestStartHTTPAndTLSServer(t *testing.T) { + config := DefaultConfig() + config.MaxOpenConnections = 1 // set up fixtures listenerAddr := "tcp://0.0.0.0:0" - listener, err := Listen(listenerAddr, Config{MaxOpenConnections: 1}) + listener, err := Listen(listenerAddr, config) require.NoError(t, err) mux := http.NewServeMux() mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {}) // test failure - err = StartHTTPAndTLSServer(listener, mux, "", "", log.TestingLogger()) + err = StartHTTPAndTLSServer(listener, mux, "", "", log.TestingLogger(), config) require.IsType(t, (*os.PathError)(nil), err) // TODO: test that starting the server can actually work diff --git a/rpc/lib/test/main.go b/rpc/lib/test/main.go index 3afc1ac1..2e433b90 100644 --- a/rpc/lib/test/main.go +++ b/rpc/lib/test/main.go @@ -36,9 +36,10 @@ func main() { cmn.TrapSignal(logger, func() {}) rpcserver.RegisterRPCFuncs(mux, routes, cdc, logger) - listener, err := rpcserver.Listen("0.0.0.0:8008", rpcserver.Config{}) + config := rpcserver.DefaultConfig() + listener, err := rpcserver.Listen("0.0.0.0:8008", config) if err != nil { cmn.Exit(err.Error()) } - rpcserver.StartHTTPServer(listener, mux, logger) + rpcserver.StartHTTPServer(listener, mux, logger, config) } diff --git a/rpc/lib/types/types.go b/rpc/lib/types/types.go index 21623e41..14317d43 100644 --- a/rpc/lib/types/types.go +++ b/rpc/lib/types/types.go @@ -1,6 +1,7 @@ package rpctypes import ( + "context" "encoding/json" "fmt" "net/http" @@ -243,6 +244,8 @@ type WSRPCConnection interface { TryWriteRPCResponse(resp RPCResponse) bool // Codec returns an Amino codec used. Codec() *amino.Codec + // Context returns the connection's context. + Context() context.Context } // Context is the first parameter for all functions. It carries a json-rpc @@ -260,8 +263,12 @@ type Context struct { HTTPReq *http.Request } -// RemoteAddr returns either HTTPReq#RemoteAddr or result of the -// WSConn#GetRemoteAddr(). +// RemoteAddr returns the remote address (usually a string "IP:port"). +// If neither HTTPReq nor WSConn is set, an empty string is returned. +// HTTP: +// http.Request#RemoteAddr +// WS: +// result of GetRemoteAddr func (ctx *Context) RemoteAddr() string { if ctx.HTTPReq != nil { return ctx.HTTPReq.RemoteAddr @@ -271,6 +278,22 @@ func (ctx *Context) RemoteAddr() string { return "" } +// Context returns the request's context. +// The returned context is always non-nil; it defaults to the background context. +// HTTP: +// The context is canceled when the client's connection closes, the request +// is canceled (with HTTP/2), or when the ServeHTTP method returns. +// WS: +// The context is canceled when the client's connections closes. +func (ctx *Context) Context() context.Context { + if ctx.HTTPReq != nil { + return ctx.HTTPReq.Context() + } else if ctx.WSConn != nil { + return ctx.WSConn.Context() + } + return context.Background() +} + //---------------------------------------- // SOCKETS diff --git a/tools/tm-monitor/rpc.go b/tools/tm-monitor/rpc.go index 1a08a9ec..4412e6e0 100644 --- a/tools/tm-monitor/rpc.go +++ b/tools/tm-monitor/rpc.go @@ -17,11 +17,12 @@ func startRPC(listenAddr string, m *monitor.Monitor, logger log.Logger) net.List wm := rpc.NewWebsocketManager(routes, nil) mux.HandleFunc("/websocket", wm.WebsocketHandler) rpc.RegisterRPCFuncs(mux, routes, cdc, logger) - listener, err := rpc.Listen(listenAddr, rpc.Config{}) + config := rpc.DefaultConfig() + listener, err := rpc.Listen(listenAddr, config) if err != nil { panic(err) } - go rpc.StartHTTPServer(listener, mux, logger) + go rpc.StartHTTPServer(listener, mux, logger, config) return listener } From a6349f50633a45755041b8230a0f7eb990383cfd Mon Sep 17 00:00:00 2001 From: Anca Zamfir Date: Wed, 20 Mar 2019 01:56:13 +0200 Subject: [PATCH 29/41] Formalize proposer election algorithm properties (#3140) * Update proposer-selection.md * Fixed typos * fixed typos * Attempt to address some comments * Update proposer-selection.md * Update proposer-selection.md * Update proposer-selection.md Added the normalization step. * Addressed review comments * New example for normalization section Added a new example to better show the need for normalization Added requirement for changing validator set Addressed review comments * Fixed problem with R2 * fixed the math for new validator * test * more small updates * Moved the centering above the round-robin election - the centering is now done before the actual round-robin block - updated examples - cleanup * change to reflect new implementation for new validator --- .../reactors/consensus/proposer-selection.md | 305 ++++++++++++++++-- 1 file changed, 275 insertions(+), 30 deletions(-) diff --git a/docs/spec/reactors/consensus/proposer-selection.md b/docs/spec/reactors/consensus/proposer-selection.md index b5e0b35a..6cb596ec 100644 --- a/docs/spec/reactors/consensus/proposer-selection.md +++ b/docs/spec/reactors/consensus/proposer-selection.md @@ -2,45 +2,290 @@ This document specifies the Proposer Selection Procedure that is used in Tendermint to choose a round proposer. As Tendermint is “leader-based protocol”, the proposer selection is critical for its correct functioning. -Let denote with `proposer_p(h,r)` a process returned by the Proposer Selection Procedure at the process p, at height h -and round r. Then the Proposer Selection procedure should fulfill the following properties: -`Agreement`: Given a validator set V, and two honest validators, -p and q, for each height h, and each round r, -proposer_p(h,r) = proposer_q(h,r) +At a given block height, the proposer selection algorithm runs with the same validator set at each round . +Between heights, an updated validator set may be specified by the application as part of the ABCIResponses' EndBlock. -`Liveness`: In every consecutive sequence of rounds of size K (K is system parameter), at least a -single round has an honest proposer. +## Requirements for Proposer Selection -`Fairness`: The proposer selection is proportional to the validator voting power, i.e., a validator with more -voting power is selected more frequently, proportional to its power. More precisely, given a set of processes -with the total voting power N, during a sequence of rounds of size N, every process is proposer in a number of rounds -equal to its voting power. +This sections covers the requirements with Rx being mandatory and Ox optional requirements. +The following requirements must be met by the Proposer Selection procedure: -We now look at a few particular cases to understand better how fairness should be implemented. -If we have 4 processes with the following voting power distribution (p0,4), (p1, 2), (p2, 2), (p3, 2) at some round r, -we have the following sequence of proposer selections in the following rounds: +#### R1: Determinism +Given a validator set `V`, and two honest validators `p` and `q`, for each height `h` and each round `r` the following must hold: -`p0, p1, p2, p3, p0, p0, p1, p2, p3, p0, p0, p1, p2, p3, p0, p0, p1, p2, p3, p0, etc` + `proposer_p(h,r) = proposer_q(h,r)` -Let consider now the following scenario where a total voting power of faulty processes is aggregated in a single process -p0: (p0,3), (p1, 1), (p2, 1), (p3, 1), (p4, 1), (p5, 1), (p6, 1), (p7, 1). -In this case the sequence of proposer selections looks like this: +where `proposer_p(h,r)` is the proposer returned by the Proposer Selection Procedure at process `p`, at height `h` and round `r`. -`p0, p1, p2, p3, p0, p4, p5, p6, p7, p0, p0, p1, p2, p3, p0, p4, p5, p6, p7, p0, etc` +#### R2: Fairness +Given a validator set with total voting power P and a sequence S of elections. In any sub-sequence of S with length C*P, a validator v must be elected as proposer P/VP(v) times, i.e. with frequency: -In this case, we see that a number of rounds coordinated by a faulty process is proportional to its voting power. -We consider also the case where we have voting power uniformly distributed among processes, i.e., we have 10 processes -each with voting power of 1. And let consider that there are 3 faulty processes with consecutive addresses, -for example the first 3 processes are faulty. Then the sequence looks like this: + f(v) ~ VP(v) / P -`p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, etc` +where C is a tolerance factor for validator set changes with following values: +- C == 1 if there are no validator set changes +- C ~ k when there are validator changes -In this case, we have 3 consecutive rounds with a faulty proposer. -One special case we consider is the case where a single honest process p0 has most of the voting power, for example: -(p0,100), (p1, 2), (p2, 3), (p3, 4). Then the sequence of proposer selection looks like this: +*[this needs more work]* -p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p1, p0, p0, p0, p0, p0, etc +### Basic Algorithm -This basically means that almost all rounds have the same proposer. But in this case, the process p0 has anyway enough -voting power to decide whatever he wants, so the fact that he coordinates almost all rounds seems correct. +At its core, the proposer selection procedure uses a weighted round-robin algorithm. + +A model that gives a good intuition on how/ why the selection algorithm works and it is fair is that of a priority queue. The validators move ahead in this queue according to their voting power (the higher the voting power the faster a validator moves towards the head of the queue). When the algorithm runs the following happens: +- all validators move "ahead" according to their powers: for each validator, increase the priority by the voting power +- first in the queue becomes the proposer: select the validator with highest priority +- move the proposer back in the queue: decrease the proposer's priority by the total voting power + +Notation: +- vset - the validator set +- n - the number of validators +- VP(i) - voting power of validator i +- A(i) - accumulated priority for validator i +- P - total voting power of set +- avg - average of all validator priorities +- prop - proposer + +Simple view at the Selection Algorithm: + +``` + def ProposerSelection (vset): + + // compute priorities and elect proposer + for each validator i in vset: + A(i) += VP(i) + prop = max(A) + A(prop) -= P +``` + +### Stable Set + +Consider the validator set: + +Validator | p1| p2 +----------|---|--- +VP | 1 | 3 + +Assuming no validator changes, the following table shows the proposer priority computation over a few runs. Four runs of the selection procedure are shown, starting with the 5th the same values are computed. +Each row shows the priority queue and the process place in it. The proposer is the closest to the head, the rightmost validator. As priorities are updated, the validators move right in the queue. The proposer moves left as its priority is reduced after election. + +|Priority Run | -2| -1| 0 | 1| 2 | 3 | 4 | 5 | Alg step +|--------------- |---|---|---- |---|---- |---|---|---|-------- +| | | |p1,p2| | | | | |Initialized to 0 +|run 1 | | | | p1| | p2| | |A(i)+=VP(i) +| | | p2| | p1| | | | |A(p2)-= P +|run 2 | | | | |p1,p2| | | |A(i)+=VP(i) +| | p1| | | | p2| | | |A(p1)-= P +|run 3 | | p1| | | | | | p2|A(i)+=VP(i) +| | | p1| | p2| | | | |A(p2)-= P +|run 4 | | | p1| | | | p2| |A(i)+=VP(i) +| | | |p1,p2| | | | | |A(p2)-= P + +It can be shown that: +- At the end of each run k+1 the sum of the priorities is the same as at end of run k. If a new set's priorities are initialized to 0 then the sum of priorities will be 0 at each run while there are no changes. +- The max distance between priorites is (n-1) * P. *[formal proof not finished]* + +### Validator Set Changes +Between proposer selection runs the validator set may change. Some changes have implications on the proposer election. + +#### Voting Power Change +Consider again the earlier example and assume that the voting power of p1 is changed to 4: + +Validator | p1| p2 +----------|---| --- +VP | 4 | 3 + +Let's also assume that before this change the proposer priorites were as shown in first row (last run). As it can be seen, the selection could run again, without changes, as before. + +|Priority Run| -2 | -1 | 0 | 1 | 2 | Comment +|--------------| ---|--- |------|--- |--- |-------- +| last run | | p2 | | p1 | |__update VP(p1)__ +| next run | | | | | p2 |A(i)+=VP(i) +| | p1 | | | | p2 |A(p1)-= P + +However, when a validator changes power from a high to a low value, some other validator remain far back in the queue for a long time. This scenario is considered again in the Proposer Priority Range section. + +As before: +- At the end of each run k+1 the sum of the priorities is the same as at run k. +- The max distance between priorites is (n-1) * P. + +#### Validator Removal +Consider a new example with set: + +Validator | p1 | p2 | p3 | +--------- |--- |--- |--- | +VP | 1 | 2 | 3 | + +Let's assume that after the last run the proposer priorities were as shown in first row with their sum being 0. After p2 is removed, at the end of next proposer selection run (penultimate row) the sum of priorities is -2 (minus the priority of the removed process). + +The procedure could continue without modifications. However, after a sufficiently large number of modifications in validator set, the priority values would migrate towards maximum or minimum allowed values causing truncations due to overflow detection. +For this reason, the selection procedure adds another __new step__ that centers the current priority values such that the priority sum remains close to 0. + +|Priority Run |-3 | -2 | -1 | 0 | 1 | 2 | 4 |Comment +|--------------- |--- | ---|--- |--- |--- |--- |---|-------- +| last run |p3 | | | | p1 | p2 | |__remove p2__ +| nextrun | | | | | | | | +| __new step__ | | p3 | | | | p1 | |A(i) -= avg, avg = -1 +| | | | | | p3 | p1 | |A(i)+=VP(i) +| | | | p1 | | p3 | | |A(p1)-= P + +The modified selection algorithm is: + + def ProposerSelection (vset): + + // center priorities around zero + avg = sum(A(i) for i in vset)/len(vset) + for each validator i in vset: + A(i) -= avg + + // compute priorities and elect proposer + for each validator i in vset: + A(i) += VP(i) + prop = max(A) + A(prop) -= P + +Observations: +- The sum of priorities is now close to 0. Due to integer division the sum is an integer in (-n, n), where n is the number of validators. + +#### New Validator +When a new validator is added, same problem as the one described for removal appears, the sum of priorities in the new set is not zero. This is fixed with the centering step introduced above. + +One other issue that needs to be addressed is the following. A validator V that has just been elected is moved to the end of the queue. If the validator set is large and/ or other validators have significantly higher power, V will have to wait many runs to be elected. If V removes and re-adds itself to the set, it would make a significant (albeit unfair) "jump" ahead in the queue. + +In order to prevent this, when a new validator is added, its initial priority is set to: + + A(V) = -1.125 * P + +where P is the total voting power of the set including V. + +Curent implementation uses the penalty factor of 1.125 because it provides a small punishment that is efficient to calculate. See [here](https://github.com/tendermint/tendermint/pull/2785#discussion_r235038971) for more details. + +If we consider the validator set where p3 has just been added: + +Validator | p1 | p2 | p3 +----------|--- |--- |--- +VP | 1 | 3 | 8 + +then p3 will start with proposer priority: + + A(p3) = -1.125 * (1 + 3 + 8) ~ -13 + +Note that since current computation uses integer division there is penalty loss when sum of the voting power is less than 8. + +In the next run, p3 will still be ahead in the queue, elected as proposer and moved back in the queue. + +|Priority Run |-13 | -9 | -5 | -2 | -1 | 0 | 1 | 2 | 5 | 6 | 7 |Alg step +|---------------|--- |--- |--- |----|--- |--- |---|---|---|---|---|-------- +|last run | | | | p2 | | | | p1| | | |__add p3__ +| | p3 | | | p2 | | | | p1| | | |A(p3) = -4 +|next run | | p3 | | | | | | p2| | p1| |A(i) -= avg, avg = -4 +| | | | | | p3 | | | | p2| | p1|A(i)+=VP(i) +| | | | p1 | | p3 | | | | p2| | |A(p1)-=P + +### Proposer Priority Range +With the introduction of centering, some interesting cases occur. Low power validators that bind early in a set that includes high power validator(s) benefit from subsequent additions to the set. This is because these early validators run through more right shift operations during centering, operations that increase their priority. + +As an example, consider the set where p2 is added after p1, with priority -1.125 * 80k = -90k. After the selection procedure runs once: + +Validator | p1 | p2 | Comment +----------|-----|---- |--- +VP | 80k | 10 | +A | 0 |-90k | __added p2__ +A |-45k | 45k | __run selection__ + +Then execute the following steps: + +1. Add a new validator p3: + +Validator | p1 | p2 | p3 +----------|-----|--- |---- +VP | 80k | 10 | 10 + +2. Run selection once. The notation '..p'/'p..' means very small deviations compared to column priority. + +|Priority Run | -90k..| -60k | -45k | -15k| 0 | 45k | 75k | 155k | Comment +|--------------|------ |----- |------- |---- |---|---- |----- |------- |--------- +| last run | p3 | | p2 | | | p1 | | | __added p3__ +| next run +| *right_shift*| | p3 | | p2 | | | p1 | | A(i) -= avg,avg=-30k +| | | ..p3| | ..p2| | | | p1 | A(i)+=VP(i) +| | | ..p3| | ..p2| | | p1.. | | A(p1)-=P, P=80k+20 + + +3. Remove p1 and run selection once: + +Validator | p3 | p2 | Comment +----------|----- |---- |-------- +VP | 10 | 10 | +A |-60k |-15k | +A |-22.5k|22.5k| __run selection__ + +At this point, while the total voting power is 20, the distance between priorities is 45k. It will take 4500 runs for p3 to catch up with p2. + +In order to prevent these types of scenarios, the selection algorithm performs scaling of priorities such that the difference between min and max values is smaller than two times the total voting power. + +The modified selection algorithm is: + + def ProposerSelection (vset): + + // scale the priority values + diff = max(A)-min(A) + threshold = 2 * P + if diff > threshold: + scale = diff/threshold + for each validator i in vset: + A(i) = A(i)/scale + + // center priorities around zero + avg = sum(A(i) for i in vset)/len(vset) + for each validator i in vset: + A(i) -= avg + + // compute priorities and elect proposer + for each validator i in vset: + A(i) += VP(i) + prop = max(A) + A(prop) -= P + +Observations: +- With this modification, the maximum distance between priorites becomes 2 * P. + +Note also that even during steady state the priority range may increase beyond 2 * P. The scaling introduced here helps to keep the range bounded. + +### Wrinkles + +#### Validator Power Overflow Conditions +The validator voting power is a positive number stored as an int64. When a validator is added the `1.125 * P` computation must not overflow. As a consequence the code handling validator updates (add and update) checks for overflow conditions making sure the total voting power is never larger than the largest int64 `MAX`, with the property that `1.125 * MAX` is still in the bounds of int64. Fatal error is return when overflow condition is detected. + +#### Proposer Priority Overflow/ Underflow Handling +The proposer priority is stored as an int64. The selection algorithm performs additions and subtractions to these values and in the case of overflows and underflows it limits the values to: + + MaxInt64 = 1 << 63 - 1 + MinInt64 = -1 << 63 + +### Requirement Fulfillment Claims +__[R1]__ + +The proposer algorithm is deterministic giving consistent results across executions with same transactions and validator set modifications. +[WIP - needs more detail] + +__[R2]__ + +Given a set of processes with the total voting power P, during a sequence of elections of length P, the number of times any process is selected as proposer is equal to its voting power. The sequence of the P proposers then repeats. If we consider the validator set: + +Validator | p1| p2 +----------|---|--- +VP | 1 | 3 + +With no other changes to the validator set, the current implementation of proposer selection generates the sequence: +`p2, p1, p2, p2, p2, p1, p2, p2,...` or [`p2, p1, p2, p2`]* +A sequence that starts with any circular permutation of the [`p2, p1, p2, p2`] sub-sequence would also provide the same degree of fairness. In fact these circular permutations show in the sliding window (over the generated sequence) of size equal to the length of the sub-sequence. + +Assigning priorities to each validator based on the voting power and updating them at each run ensures the fairness of the proposer selection. In addition, every time a validator is elected as proposer its priority is decreased with the total voting power. + +Intuitively, a process v jumps ahead in the queue at most (max(A) - min(A))/VP(v) times until it reaches the head and is elected. The frequency is then: + + f(v) ~ VP(v)/(max(A)-min(A)) = 1/k * VP(v)/P + +For current implementation, this means v should be proposer at least VP(v) times out of k * P runs, with scaling factor k=2. From 60b2ae5f5a3e16625af1342e012462448d565394 Mon Sep 17 00:00:00 2001 From: needkane <604476380@qq.com> Date: Wed, 20 Mar 2019 08:00:53 +0800 Subject: [PATCH 30/41] crypto: delete unused code (#3426) --- crypto/doc.go | 3 --- crypto/example_test.go | 7 ------- crypto/hash.go | 8 -------- 3 files changed, 18 deletions(-) diff --git a/crypto/doc.go b/crypto/doc.go index 41b3f302..95ae0af1 100644 --- a/crypto/doc.go +++ b/crypto/doc.go @@ -37,9 +37,6 @@ // sum := crypto.Sha256([]byte("This is Tendermint")) // fmt.Printf("%x\n", sum) -// Ripemd160 -// sum := crypto.Ripemd160([]byte("This is consensus")) -// fmt.Printf("%x\n", sum) package crypto // TODO: Add more docs in here diff --git a/crypto/example_test.go b/crypto/example_test.go index 904e1c61..f1d0013d 100644 --- a/crypto/example_test.go +++ b/crypto/example_test.go @@ -26,10 +26,3 @@ func ExampleSha256() { // Output: // f91afb642f3d1c87c17eb01aae5cb65c242dfdbe7cf1066cc260f4ce5d33b94e } - -func ExampleRipemd160() { - sum := crypto.Ripemd160([]byte("This is Tendermint")) - fmt.Printf("%x\n", sum) - // Output: - // 051e22663e8f0fd2f2302f1210f954adff009005 -} diff --git a/crypto/hash.go b/crypto/hash.go index c1fb41f7..e1d22523 100644 --- a/crypto/hash.go +++ b/crypto/hash.go @@ -2,8 +2,6 @@ package crypto import ( "crypto/sha256" - - "golang.org/x/crypto/ripemd160" ) func Sha256(bytes []byte) []byte { @@ -11,9 +9,3 @@ func Sha256(bytes []byte) []byte { hasher.Write(bytes) return hasher.Sum(nil) } - -func Ripemd160(bytes []byte) []byte { - hasher := ripemd160.New() - hasher.Write(bytes) - return hasher.Sum(nil) -} From 7af4b5086af9268f7cc8b41f5a174ade675d8ab4 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 20 Mar 2019 03:10:54 +0300 Subject: [PATCH 31/41] Remove RepeatTimer and refactor Switch#Broadcast (#3429) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * p2p: refactor Switch#Broadcast func - call wg.Add only once - do not call peers.List twice! * bad for perfomance * peers list can change in between calls! Refs #3306 * p2p: use time.Ticker instead of RepeatTimer no need in RepeatTimer since we don't Reset them Refs #3306 * libs/common: remove RepeatTimer (also TimerMaker and Ticker interface) "ancient code that’s caused no end of trouble" Ethan I believe there's much simplier way to write a ticker than can be reset https://medium.com/@arpith/resetting-a-ticker-in-go-63858a2c17ec --- CHANGELOG_PENDING.md | 1 + libs/common/repeat_timer.go | 232 ------------------------------- libs/common/repeat_timer_test.go | 136 ------------------ p2p/conn/connection.go | 12 +- p2p/switch.go | 15 +- 5 files changed, 17 insertions(+), 379 deletions(-) delete mode 100644 libs/common/repeat_timer.go delete mode 100644 libs/common/repeat_timer_test.go diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 37ae3a51..3cbc63b7 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -9,6 +9,7 @@ * Apps * Go API +- [libs/common] Remove RepeatTimer (also TimerMaker and Ticker interface) * Blockchain Protocol diff --git a/libs/common/repeat_timer.go b/libs/common/repeat_timer.go deleted file mode 100644 index 5d049738..00000000 --- a/libs/common/repeat_timer.go +++ /dev/null @@ -1,232 +0,0 @@ -package common - -import ( - "sync" - "time" -) - -// Used by RepeatTimer the first time, -// and every time it's Reset() after Stop(). -type TickerMaker func(dur time.Duration) Ticker - -// Ticker is a basic ticker interface. -type Ticker interface { - - // Never changes, never closes. - Chan() <-chan time.Time - - // Stopping a stopped Ticker will panic. - Stop() -} - -//---------------------------------------- -// defaultTicker - -var _ Ticker = (*defaultTicker)(nil) - -type defaultTicker time.Ticker - -func defaultTickerMaker(dur time.Duration) Ticker { - ticker := time.NewTicker(dur) - return (*defaultTicker)(ticker) -} - -// Implements Ticker -func (t *defaultTicker) Chan() <-chan time.Time { - return t.C -} - -// Implements Ticker -func (t *defaultTicker) Stop() { - ((*time.Ticker)(t)).Stop() -} - -//---------------------------------------- -// LogicalTickerMaker - -// Construct a TickerMaker that always uses `source`. -// It's useful for simulating a deterministic clock. -func NewLogicalTickerMaker(source chan time.Time) TickerMaker { - return func(dur time.Duration) Ticker { - return newLogicalTicker(source, dur) - } -} - -type logicalTicker struct { - source <-chan time.Time - ch chan time.Time - quit chan struct{} -} - -func newLogicalTicker(source <-chan time.Time, interval time.Duration) Ticker { - lt := &logicalTicker{ - source: source, - ch: make(chan time.Time), - quit: make(chan struct{}), - } - go lt.fireRoutine(interval) - return lt -} - -// We need a goroutine to read times from t.source -// and fire on t.Chan() when `interval` has passed. -func (t *logicalTicker) fireRoutine(interval time.Duration) { - source := t.source - - // Init `lasttime` - lasttime := time.Time{} - select { - case lasttime = <-source: - case <-t.quit: - return - } - // Init `lasttime` end - - for { - select { - case newtime := <-source: - elapsed := newtime.Sub(lasttime) - if interval <= elapsed { - // Block for determinism until the ticker is stopped. - select { - case t.ch <- newtime: - case <-t.quit: - return - } - // Reset timeleft. - // Don't try to "catch up" by sending more. - // "Ticker adjusts the intervals or drops ticks to make up for - // slow receivers" - https://golang.org/pkg/time/#Ticker - lasttime = newtime - } - case <-t.quit: - return // done - } - } -} - -// Implements Ticker -func (t *logicalTicker) Chan() <-chan time.Time { - return t.ch // immutable -} - -// Implements Ticker -func (t *logicalTicker) Stop() { - close(t.quit) // it *should* panic when stopped twice. -} - -//--------------------------------------------------------------------- - -/* - RepeatTimer repeatedly sends a struct{}{} to `.Chan()` after each `dur` - period. (It's good for keeping connections alive.) - A RepeatTimer must be stopped, or it will keep a goroutine alive. -*/ -type RepeatTimer struct { - name string - ch chan time.Time - tm TickerMaker - - mtx sync.Mutex - dur time.Duration - ticker Ticker - quit chan struct{} -} - -// NewRepeatTimer returns a RepeatTimer with a defaultTicker. -func NewRepeatTimer(name string, dur time.Duration) *RepeatTimer { - return NewRepeatTimerWithTickerMaker(name, dur, defaultTickerMaker) -} - -// NewRepeatTimerWithTicker returns a RepeatTimer with the given ticker -// maker. -func NewRepeatTimerWithTickerMaker(name string, dur time.Duration, tm TickerMaker) *RepeatTimer { - var t = &RepeatTimer{ - name: name, - ch: make(chan time.Time), - tm: tm, - dur: dur, - ticker: nil, - quit: nil, - } - t.reset() - return t -} - -// receive ticks on ch, send out on t.ch -func (t *RepeatTimer) fireRoutine(ch <-chan time.Time, quit <-chan struct{}) { - for { - select { - case tick := <-ch: - select { - case t.ch <- tick: - case <-quit: - return - } - case <-quit: // NOTE: `t.quit` races. - return - } - } -} - -func (t *RepeatTimer) Chan() <-chan time.Time { - return t.ch -} - -func (t *RepeatTimer) Stop() { - t.mtx.Lock() - defer t.mtx.Unlock() - - t.stop() -} - -// Wait the duration again before firing. -func (t *RepeatTimer) Reset() { - t.mtx.Lock() - defer t.mtx.Unlock() - - t.reset() -} - -//---------------------------------------- -// Misc. - -// CONTRACT: (non-constructor) caller should hold t.mtx. -func (t *RepeatTimer) reset() { - if t.ticker != nil { - t.stop() - } - t.ticker = t.tm(t.dur) - t.quit = make(chan struct{}) - go t.fireRoutine(t.ticker.Chan(), t.quit) -} - -// CONTRACT: caller should hold t.mtx. -func (t *RepeatTimer) stop() { - if t.ticker == nil { - /* - Similar to the case of closing channels twice: - https://groups.google.com/forum/#!topic/golang-nuts/rhxMiNmRAPk - Stopping a RepeatTimer twice implies that you do - not know whether you are done or not. - If you're calling stop on a stopped RepeatTimer, - you probably have race conditions. - */ - panic("Tried to stop a stopped RepeatTimer") - } - t.ticker.Stop() - t.ticker = nil - /* - From https://golang.org/pkg/time/#Ticker: - "Stop the ticker to release associated resources" - "After Stop, no more ticks will be sent" - So we shouldn't have to do the below. - - select { - case <-t.ch: - // read off channel if there's anything there - default: - } - */ - close(t.quit) -} diff --git a/libs/common/repeat_timer_test.go b/libs/common/repeat_timer_test.go deleted file mode 100644 index f2a7b16c..00000000 --- a/libs/common/repeat_timer_test.go +++ /dev/null @@ -1,136 +0,0 @@ -package common - -import ( - "sync" - "testing" - "time" - - "github.com/fortytw2/leaktest" - "github.com/stretchr/testify/assert" -) - -func TestDefaultTicker(t *testing.T) { - ticker := defaultTickerMaker(time.Millisecond * 10) - <-ticker.Chan() - ticker.Stop() -} - -func TestRepeatTimer(t *testing.T) { - - ch := make(chan time.Time, 100) - mtx := new(sync.Mutex) - - // tick() fires from start to end - // (exclusive) in milliseconds with incr. - // It locks on mtx, so subsequent calls - // run in series. - tick := func(startMs, endMs, incrMs time.Duration) { - mtx.Lock() - go func() { - for tMs := startMs; tMs < endMs; tMs += incrMs { - lt := time.Time{} - lt = lt.Add(tMs * time.Millisecond) - ch <- lt - } - mtx.Unlock() - }() - } - - // tock consumes Ticker.Chan() events and checks them against the ms in "timesMs". - tock := func(t *testing.T, rt *RepeatTimer, timesMs []int64) { - - // Check against timesMs. - for _, timeMs := range timesMs { - tyme := <-rt.Chan() - sinceMs := tyme.Sub(time.Time{}) / time.Millisecond - assert.Equal(t, timeMs, int64(sinceMs)) - } - - // TODO detect number of running - // goroutines to ensure that - // no other times will fire. - // See https://github.com/tendermint/tendermint/libs/issues/120. - time.Sleep(time.Millisecond * 100) - done := true - select { - case <-rt.Chan(): - done = false - default: - } - assert.True(t, done) - } - - tm := NewLogicalTickerMaker(ch) - rt := NewRepeatTimerWithTickerMaker("bar", time.Second, tm) - - /* NOTE: Useful for debugging deadlocks... - go func() { - time.Sleep(time.Second * 3) - trace := make([]byte, 102400) - count := runtime.Stack(trace, true) - fmt.Printf("Stack of %d bytes: %s\n", count, trace) - }() - */ - - tick(0, 1000, 10) - tock(t, rt, []int64{}) - tick(1000, 2000, 10) - tock(t, rt, []int64{1000}) - tick(2005, 5000, 10) - tock(t, rt, []int64{2005, 3005, 4005}) - tick(5001, 5999, 1) - // Read 5005 instead of 5001 because - // it's 1 second greater than 4005. - tock(t, rt, []int64{5005}) - tick(6000, 7005, 1) - tock(t, rt, []int64{6005}) - tick(7033, 8032, 1) - tock(t, rt, []int64{7033}) - - // After a reset, nothing happens - // until two ticks are received. - rt.Reset() - tock(t, rt, []int64{}) - tick(8040, 8041, 1) - tock(t, rt, []int64{}) - tick(9555, 9556, 1) - tock(t, rt, []int64{9555}) - - // After a stop, nothing more is sent. - rt.Stop() - tock(t, rt, []int64{}) - - // Another stop panics. - assert.Panics(t, func() { rt.Stop() }) -} - -func TestRepeatTimerReset(t *testing.T) { - // check that we are not leaking any go-routines - defer leaktest.Check(t)() - - timer := NewRepeatTimer("test", 20*time.Millisecond) - defer timer.Stop() - - // test we don't receive tick before duration ms. - select { - case <-timer.Chan(): - t.Fatal("did not expect to receive tick") - default: - } - - timer.Reset() - - // test we receive tick after Reset is called - select { - case <-timer.Chan(): - // all good - case <-time.After(40 * time.Millisecond): - t.Fatal("expected to receive tick after reset") - } - - // just random calls - for i := 0; i < 100; i++ { - time.Sleep(time.Duration(RandIntn(40)) * time.Millisecond) - timer.Reset() - } -} diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index c1e90ab7..e0ce062a 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -95,13 +95,13 @@ type MConnection struct { stopMtx sync.Mutex flushTimer *cmn.ThrottleTimer // flush writes as necessary but throttled. - pingTimer *cmn.RepeatTimer // send pings periodically + pingTimer *time.Ticker // send pings periodically // close conn if pong is not received in pongTimeout pongTimer *time.Timer pongTimeoutCh chan bool // true - timeout, false - peer sent pong - chStatsTimer *cmn.RepeatTimer // update channel stats periodically + chStatsTimer *time.Ticker // update channel stats periodically created time.Time // time of creation @@ -201,9 +201,9 @@ func (c *MConnection) OnStart() error { return err } c.flushTimer = cmn.NewThrottleTimer("flush", c.config.FlushThrottle) - c.pingTimer = cmn.NewRepeatTimer("ping", c.config.PingInterval) + c.pingTimer = time.NewTicker(c.config.PingInterval) c.pongTimeoutCh = make(chan bool, 1) - c.chStatsTimer = cmn.NewRepeatTimer("chStats", updateStats) + c.chStatsTimer = time.NewTicker(updateStats) c.quitSendRoutine = make(chan struct{}) c.doneSendRoutine = make(chan struct{}) go c.sendRoutine() @@ -401,11 +401,11 @@ FOR_LOOP: // NOTE: flushTimer.Set() must be called every time // something is written to .bufConnWriter. c.flush() - case <-c.chStatsTimer.Chan(): + case <-c.chStatsTimer.C: for _, channel := range c.channels { channel.updateStats() } - case <-c.pingTimer.Chan(): + case <-c.pingTimer.C: c.Logger.Debug("Send Ping") _n, err = cdc.MarshalBinaryLengthPrefixedWriter(c.bufConnWriter, PacketPing{}) if err != nil { diff --git a/p2p/switch.go b/p2p/switch.go index a07f70ce..9e04fe7c 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -234,21 +234,26 @@ func (sw *Switch) OnStop() { // // NOTE: Broadcast uses goroutines, so order of broadcast may not be preserved. func (sw *Switch) Broadcast(chID byte, msgBytes []byte) chan bool { - successChan := make(chan bool, len(sw.peers.List())) sw.Logger.Debug("Broadcast", "channel", chID, "msgBytes", fmt.Sprintf("%X", msgBytes)) + + peers := sw.peers.List() var wg sync.WaitGroup - for _, peer := range sw.peers.List() { - wg.Add(1) - go func(peer Peer) { + wg.Add(len(peers)) + successChan := make(chan bool, len(peers)) + + for _, peer := range peers { + go func(p Peer) { defer wg.Done() - success := peer.Send(chID, msgBytes) + success := p.Send(chID, msgBytes) successChan <- success }(peer) } + go func() { wg.Wait() close(successChan) }() + return successChan } From 03085c2da23b179c4a51f59a03cb40aa4e85a613 Mon Sep 17 00:00:00 2001 From: zjubfd Date: Wed, 20 Mar 2019 08:18:18 +0800 Subject: [PATCH 32/41] rpc: client disable compression (#3430) --- rpc/lib/client/http_client.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/rpc/lib/client/http_client.go b/rpc/lib/client/http_client.go index 97b8dfe7..cfa26e89 100644 --- a/rpc/lib/client/http_client.go +++ b/rpc/lib/client/http_client.go @@ -74,7 +74,9 @@ func makeHTTPClient(remoteAddr string) (string, *http.Client) { protocol, address, dialer := makeHTTPDialer(remoteAddr) return protocol + "://" + address, &http.Client{ Transport: &http.Transport{ - Dial: dialer, + // Set to true to prevent GZIP-bomb DoS attacks + DisableCompression: true, + Dial: dialer, }, } } From 926127c774a2c9110c4284938411818918ffecac Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 20 Mar 2019 03:59:33 +0300 Subject: [PATCH 33/41] blockchain: update the maxHeight when a peer is removed (#3350) * blockchain: update the maxHeight when a peer is removed Refs #2699 * add a changelog entry * make linter pass --- CHANGELOG_PENDING.md | 2 ++ blockchain/pool.go | 52 ++++++++++++++++++++++++++++++++--------- blockchain/pool_test.go | 46 +++++++++++++++++++++++++++++++++--- 3 files changed, 86 insertions(+), 14 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 3cbc63b7..de16fcc2 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -20,3 +20,5 @@ ### IMPROVEMENTS: ### BUG FIXES: + +- [blockchain] \#2699 update the maxHeight when a peer is removed diff --git a/blockchain/pool.go b/blockchain/pool.go index 2cb7dda9..c842c0d1 100644 --- a/blockchain/pool.go +++ b/blockchain/pool.go @@ -69,7 +69,7 @@ type BlockPool struct { height int64 // the lowest key in requesters. // peers peers map[p2p.ID]*bpPeer - maxPeerHeight int64 + maxPeerHeight int64 // the biggest reported height // atomic numPending int32 // number of requests pending assignment or block response @@ -78,6 +78,8 @@ type BlockPool struct { errorsCh chan<- peerError } +// NewBlockPool returns a new BlockPool with the height equal to start. Block +// requests and errors will be sent to requestsCh and errorsCh accordingly. func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool { bp := &BlockPool{ peers: make(map[p2p.ID]*bpPeer), @@ -93,15 +95,15 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p return bp } +// OnStart implements cmn.Service by spawning requesters routine and recording +// pool's start time. func (pool *BlockPool) OnStart() error { go pool.makeRequestersRoutine() pool.startTime = time.Now() return nil } -func (pool *BlockPool) OnStop() {} - -// Run spawns requesters as needed. +// spawns requesters as needed func (pool *BlockPool) makeRequestersRoutine() { for { if !pool.IsRunning() { @@ -150,6 +152,8 @@ func (pool *BlockPool) removeTimedoutPeers() { } } +// GetStatus returns pool's height, numPending requests and the number of +// requesters. func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequesters int) { pool.mtx.Lock() defer pool.mtx.Unlock() @@ -157,6 +161,7 @@ func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequester return pool.height, atomic.LoadInt32(&pool.numPending), len(pool.requesters) } +// IsCaughtUp returns true if this node is caught up, false - otherwise. // TODO: relax conditions, prevent abuse. func (pool *BlockPool) IsCaughtUp() bool { pool.mtx.Lock() @@ -170,8 +175,9 @@ func (pool *BlockPool) IsCaughtUp() bool { // Some conditions to determine if we're caught up. // Ensures we've either received a block or waited some amount of time, - // and that we're synced to the highest known height. Note we use maxPeerHeight - 1 - // because to sync block H requires block H+1 to verify the LastCommit. + // and that we're synced to the highest known height. + // Note we use maxPeerHeight - 1 because to sync block H requires block H+1 + // to verify the LastCommit. receivedBlockOrTimedOut := pool.height > 0 || time.Since(pool.startTime) > 5*time.Second ourChainIsLongestAmongPeers := pool.maxPeerHeight == 0 || pool.height >= (pool.maxPeerHeight-1) isCaughtUp := receivedBlockOrTimedOut && ourChainIsLongestAmongPeers @@ -260,14 +266,14 @@ func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int } } -// MaxPeerHeight returns the highest height reported by a peer. +// MaxPeerHeight returns the highest reported height. func (pool *BlockPool) MaxPeerHeight() int64 { pool.mtx.Lock() defer pool.mtx.Unlock() return pool.maxPeerHeight } -// Sets the peer's alleged blockchain height. +// SetPeerHeight sets the peer's alleged blockchain height. func (pool *BlockPool) SetPeerHeight(peerID p2p.ID, height int64) { pool.mtx.Lock() defer pool.mtx.Unlock() @@ -286,6 +292,8 @@ func (pool *BlockPool) SetPeerHeight(peerID p2p.ID, height int64) { } } +// RemovePeer removes the peer with peerID from the pool. If there's no peer +// with peerID, function is a no-op. func (pool *BlockPool) RemovePeer(peerID p2p.ID) { pool.mtx.Lock() defer pool.mtx.Unlock() @@ -299,10 +307,32 @@ func (pool *BlockPool) removePeer(peerID p2p.ID) { requester.redo(peerID) } } - if p, exist := pool.peers[peerID]; exist && p.timeout != nil { - p.timeout.Stop() + + peer, ok := pool.peers[peerID] + if ok { + if peer.timeout != nil { + peer.timeout.Stop() + } + + delete(pool.peers, peerID) + + // Find a new peer with the biggest height and update maxPeerHeight if the + // peer's height was the biggest. + if peer.height == pool.maxPeerHeight { + pool.updateMaxPeerHeight() + } } - delete(pool.peers, peerID) +} + +// If no peers are left, maxPeerHeight is set to 0. +func (pool *BlockPool) updateMaxPeerHeight() { + var max int64 + for _, peer := range pool.peers { + if peer.height > max { + max = peer.height + } + } + pool.maxPeerHeight = max } // Pick an available peer with at least the given minHeight. diff --git a/blockchain/pool_test.go b/blockchain/pool_test.go index 75a03f63..e24f6131 100644 --- a/blockchain/pool_test.go +++ b/blockchain/pool_test.go @@ -1,12 +1,15 @@ package blockchain import ( + "fmt" "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" ) @@ -66,7 +69,7 @@ func makePeers(numPeers int, minHeight, maxHeight int64) testPeers { return peers } -func TestBasic(t *testing.T) { +func TestBlockPoolBasic(t *testing.T) { start := int64(42) peers := makePeers(10, start+1, 1000) errorsCh := make(chan peerError, 1000) @@ -122,7 +125,7 @@ func TestBasic(t *testing.T) { } } -func TestTimeout(t *testing.T) { +func TestBlockPoolTimeout(t *testing.T) { start := int64(42) peers := makePeers(10, start+1, 1000) errorsCh := make(chan peerError, 1000) @@ -180,3 +183,40 @@ func TestTimeout(t *testing.T) { } } } + +func TestBlockPoolRemovePeer(t *testing.T) { + peers := make(testPeers, 10) + for i := 0; i < 10; i++ { + peerID := p2p.ID(fmt.Sprintf("%d", i+1)) + height := int64(i + 1) + peers[peerID] = testPeer{peerID, height, make(chan inputData)} + } + requestsCh := make(chan BlockRequest) + errorsCh := make(chan peerError) + + pool := NewBlockPool(1, requestsCh, errorsCh) + pool.SetLogger(log.TestingLogger()) + err := pool.Start() + require.NoError(t, err) + defer pool.Stop() + + // add peers + for peerID, peer := range peers { + pool.SetPeerHeight(peerID, peer.height) + } + assert.EqualValues(t, 10, pool.MaxPeerHeight()) + + // remove not-existing peer + assert.NotPanics(t, func() { pool.RemovePeer(p2p.ID("Superman")) }) + + // remove peer with biggest height + pool.RemovePeer(p2p.ID("10")) + assert.EqualValues(t, 9, pool.MaxPeerHeight()) + + // remove all peers + for peerID := range peers { + pool.RemovePeer(peerID) + } + + assert.EqualValues(t, 0, pool.MaxPeerHeight()) +} From 81b9bdf40010c6a4e336133ec53fe6e4e6089911 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 20 Mar 2019 08:29:40 -0400 Subject: [PATCH 34/41] comments on validator ordering (#3452) * comments on validator ordering * NextValidatorsHash --- docs/spec/blockchain/blockchain.md | 2 ++ rpc/core/consensus.go | 2 ++ types/validator_set.go | 3 ++- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/spec/blockchain/blockchain.md b/docs/spec/blockchain/blockchain.md index 00cccfc2..60a07d42 100644 --- a/docs/spec/blockchain/blockchain.md +++ b/docs/spec/blockchain/blockchain.md @@ -332,6 +332,7 @@ block.ValidatorsHash == MerkleRoot(state.Validators) MerkleRoot of the current validator set that is committing the block. This can be used to validate the `LastCommit` included in the next block. +Note the validators are sorted by their address before computing the MerkleRoot. ### NextValidatorsHash @@ -342,6 +343,7 @@ block.NextValidatorsHash == MerkleRoot(state.NextValidators) MerkleRoot of the next validator set that will be the validator set that commits the next block. This is included so that the current validator set gets a chance to sign the next validator sets Merkle root. +Note the validators are sorted by their address before computing the MerkleRoot. ### ConsensusHash diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index b8a91f10..3850999d 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -10,6 +10,8 @@ import ( // Get the validator set at the given block height. // If no height is provided, it will fetch the current validator set. +// Note the validators are sorted by their address - this is the canonical +// order for the validators in the set as used in computing their Merkle root. // // ```shell // curl 'localhost:26657/validators' diff --git a/types/validator_set.go b/types/validator_set.go index 3d31cf7d..36ce67f0 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -31,7 +31,8 @@ const ( // ValidatorSet represent a set of *Validator at a given height. // The validators can be fetched by address or index. // The index is in order of .Address, so the indices are fixed -// for all rounds of a given blockchain height. +// for all rounds of a given blockchain height - ie. the validators +// are sorted by their address. // On the other hand, the .ProposerPriority of each validator and // the designated .GetProposer() of a set changes every round, // upon calling .IncrementProposerPriority(). From 660bd4a53e0dd7642a473689c8686c3f83e3a0ca Mon Sep 17 00:00:00 2001 From: tracebundy <745403419@qq.com> Date: Wed, 20 Mar 2019 20:30:49 +0800 Subject: [PATCH 35/41] fix comment (#3454) --- libs/common/service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/common/service.go b/libs/common/service.go index 96a5e632..21fb0df3 100644 --- a/libs/common/service.go +++ b/libs/common/service.go @@ -209,7 +209,7 @@ func (bs *BaseService) Wait() { <-bs.quit } -// String implements Servce by returning a string representation of the service. +// String implements Service by returning a string representation of the service. func (bs *BaseService) String() string { return bs.name } From 1d4afb179b9660bf13705c67b01e20838a4506eb Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Thu, 21 Mar 2019 11:05:39 +0100 Subject: [PATCH 36/41] replace PB2TM.ConsensusParams with a call to params#Update (#3448) Fixes #3444 --- consensus/replay.go | 2 +- types/protobuf.go | 33 --------------------------------- types/protobuf_test.go | 2 +- 3 files changed, 2 insertions(+), 35 deletions(-) diff --git a/consensus/replay.go b/consensus/replay.go index c8ab8a33..e47d4892 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -324,7 +324,7 @@ func (h *Handshaker) ReplayBlocks( } if res.ConsensusParams != nil { - state.ConsensusParams = types.PB2TM.ConsensusParams(res.ConsensusParams, state.ConsensusParams.Block.TimeIotaMs) + state.ConsensusParams = state.ConsensusParams.Update(res.ConsensusParams) } sm.SaveState(h.stateDB, state) } diff --git a/types/protobuf.go b/types/protobuf.go index e10b9186..c87e82c0 100644 --- a/types/protobuf.go +++ b/types/protobuf.go @@ -220,36 +220,3 @@ func (pb2tm) ValidatorUpdates(vals []abci.ValidatorUpdate) ([]*Validator, error) } return tmVals, nil } - -// BlockParams.TimeIotaMs is not exposed to the application. Therefore a caller -// must provide it. -func (pb2tm) ConsensusParams(csp *abci.ConsensusParams, blockTimeIotaMs int64) ConsensusParams { - params := ConsensusParams{ - Block: BlockParams{}, - Evidence: EvidenceParams{}, - Validator: ValidatorParams{}, - } - - // we must defensively consider any structs may be nil - if csp.Block != nil { - params.Block = BlockParams{ - MaxBytes: csp.Block.MaxBytes, - MaxGas: csp.Block.MaxGas, - TimeIotaMs: blockTimeIotaMs, - } - } - - if csp.Evidence != nil { - params.Evidence = EvidenceParams{ - MaxAge: csp.Evidence.MaxAge, - } - } - - if csp.Validator != nil { - params.Validator = ValidatorParams{ - PubKeyTypes: csp.Validator.PubKeyTypes, - } - } - - return params -} diff --git a/types/protobuf_test.go b/types/protobuf_test.go index 152c92d1..64caa3f4 100644 --- a/types/protobuf_test.go +++ b/types/protobuf_test.go @@ -64,7 +64,7 @@ func TestABCIValidators(t *testing.T) { func TestABCIConsensusParams(t *testing.T) { cp := DefaultConsensusParams() abciCP := TM2PB.ConsensusParams(cp) - cp2 := PB2TM.ConsensusParams(abciCP, cp.Block.TimeIotaMs) + cp2 := cp.Update(abciCP) assert.Equal(t, *cp, cp2) } From 85be2a554e7e7752bed0b9409ab153bf04e05e7b Mon Sep 17 00:00:00 2001 From: Thane Thomson Date: Fri, 22 Mar 2019 09:16:38 -0400 Subject: [PATCH 37/41] tools/tm-signer-harness: update height and round for test harness (#3466) In order to re-enable the test harness for the KMS (see tendermint/kms#227), we need some marginally more realistic proposals and votes. This is because the KMS does some additional sanity checks now to ensure the height and round are increasing over time. --- tools/tm-signer-harness/internal/test_harness.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/tm-signer-harness/internal/test_harness.go b/tools/tm-signer-harness/internal/test_harness.go index 00548913..7fefdfb4 100644 --- a/tools/tm-signer-harness/internal/test_harness.go +++ b/tools/tm-signer-harness/internal/test_harness.go @@ -198,8 +198,8 @@ func (th *TestHarness) TestSignProposal() error { hash := tmhash.Sum([]byte("hash")) prop := &types.Proposal{ Type: types.ProposalType, - Height: 12345, - Round: 23456, + Height: 100, + Round: 0, POLRound: -1, BlockID: types.BlockID{ Hash: hash, @@ -240,8 +240,8 @@ func (th *TestHarness) TestSignVote() error { hash := tmhash.Sum([]byte("hash")) vote := &types.Vote{ Type: voteType, - Height: 12345, - Round: 23456, + Height: 101, + Round: 0, BlockID: types.BlockID{ Hash: hash, PartsHeader: types.PartSetHeader{ From 25a3c8b1724c9611d6edc175b1b0d079f5ee28c1 Mon Sep 17 00:00:00 2001 From: zjubfd Date: Sun, 24 Mar 2019 01:08:15 +0800 Subject: [PATCH 38/41] rpc: support tls rpc (#3469) Refs #3419 --- CHANGELOG_PENDING.md | 1 + config/config.go | 29 +++++++++++++++++++++++++++ config/toml.go | 11 ++++++++++ docs/tendermint-core/configuration.md | 11 ++++++++++ node/node.go | 23 +++++++++++++++------ 5 files changed, 69 insertions(+), 6 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index de16fcc2..7cf3ab4e 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -16,6 +16,7 @@ * P2P Protocol ### FEATURES: +- [rpc] \#3419 Start HTTPS server if `rpc.tls_cert_file` and `rpc.tls_key_file` are provided in the config (@guagualvcha) ### IMPROVEMENTS: diff --git a/config/config.go b/config/config.go index 8342921a..3ac22adb 100644 --- a/config/config.go +++ b/config/config.go @@ -339,6 +339,20 @@ type RPCConfig struct { // global HTTP write timeout, which applies to all connections and endpoints. // See https://github.com/tendermint/tendermint/issues/3435 TimeoutBroadcastTxCommit time.Duration `mapstructure:"timeout_broadcast_tx_commit"` + + // The name of a file containing certificate that is used to create the HTTPS server. + // + // If the certificate is signed by a certificate authority, + // the certFile should be the concatenation of the server's certificate, any intermediates, + // and the CA's certificate. + // + // NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. + TLSCertFile string `mapstructure:"tls_cert_file"` + + // The name of a file containing matching private key that is used to create the HTTPS server. + // + // NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. + TLSKeyFile string `mapstructure:"tls_key_file"` } // DefaultRPCConfig returns a default configuration for the RPC server @@ -357,6 +371,9 @@ func DefaultRPCConfig() *RPCConfig { MaxSubscriptionClients: 100, MaxSubscriptionsPerClient: 5, TimeoutBroadcastTxCommit: 10 * time.Second, + + TLSCertFile: "", + TLSKeyFile: "", } } @@ -395,6 +412,18 @@ func (cfg *RPCConfig) IsCorsEnabled() bool { return len(cfg.CORSAllowedOrigins) != 0 } +func (cfg RPCConfig) KeyFile() string { + return rootify(filepath.Join(defaultConfigDir, cfg.TLSKeyFile), cfg.RootDir) +} + +func (cfg RPCConfig) CertFile() string { + return rootify(filepath.Join(defaultConfigDir, cfg.TLSCertFile), cfg.RootDir) +} + +func (cfg RPCConfig) IsTLSEnabled() bool { + return cfg.TLSCertFile != "" && cfg.TLSKeyFile != "" +} + //----------------------------------------------------------------------------- // P2PConfig diff --git a/config/toml.go b/config/toml.go index a0b651d9..978255ab 100644 --- a/config/toml.go +++ b/config/toml.go @@ -181,6 +181,17 @@ max_subscriptions_per_client = {{ .RPC.MaxSubscriptionsPerClient }} # See https://github.com/tendermint/tendermint/issues/3435 timeout_broadcast_tx_commit = "{{ .RPC.TimeoutBroadcastTxCommit }}" +# The name of a file containing certificate that is used to create the HTTPS server. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. +tls_cert_file = "{{ .RPC.TLSCertFile }}" + +# The name of a file containing matching private key that is used to create the HTTPS server. +# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. +tls_key_file = "{{ .RPC.TLSKeyFile }}" + ##### peer to peer configuration options ##### [p2p] diff --git a/docs/tendermint-core/configuration.md b/docs/tendermint-core/configuration.md index aa275c7a..d19c272f 100644 --- a/docs/tendermint-core/configuration.md +++ b/docs/tendermint-core/configuration.md @@ -127,6 +127,17 @@ max_subscriptions_per_client = 5 # See https://github.com/tendermint/tendermint/issues/3435 timeout_broadcast_tx_commit = "10s" +# The name of a file containing certificate that is used to create the HTTPS server. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. +tls_cert_file = "" + +# The name of a file containing matching private key that is used to create the HTTPS server. +# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. +tls_key_file = "" + ##### peer to peer configuration options ##### [p2p] diff --git a/node/node.go b/node/node.go index 8f71fa31..3501b6a7 100644 --- a/node/node.go +++ b/node/node.go @@ -715,13 +715,24 @@ func (n *Node) startRPC() ([]net.Listener, error) { }) rootHandler = corsMiddleware.Handler(mux) } + if n.config.RPC.IsTLSEnabled() { + go rpcserver.StartHTTPAndTLSServer( + listener, + rootHandler, + n.config.RPC.CertFile(), + n.config.RPC.KeyFile(), + rpcLogger, + config, + ) + } else { + go rpcserver.StartHTTPServer( + listener, + rootHandler, + rpcLogger, + config, + ) + } - go rpcserver.StartHTTPServer( - listener, - rootHandler, - rpcLogger, - config, - ) listeners[i] = listener } From 6de7effb05581f9bea2e8af06e4e74a85c34bc5f Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Tue, 26 Mar 2019 01:27:29 -0700 Subject: [PATCH 39/41] mempool no gossip back (#2778) Closes #1798 This is done by making every mempool tx maintain a list of peers who its received the tx from. Instead of using the 20byte peer ID, it instead uses a local map from peerID to uint16 counter, so every peer adds 2 bytes. (Word aligned to probably make it 8 bytes) This also required resetting the callback function on every CheckTx. This likely has performance ramifications for instruction caching. The actual setting operation isn't costly with the removal of defers in this PR. * Make the mempool not gossip txs back to peers its received it from * Fix adversarial memleak * Don't break interface * Update changelog * Forgot to add a mtx * forgot a mutex * Update mempool/reactor.go Co-Authored-By: ValarDragon * Update mempool/mempool.go Co-Authored-By: ValarDragon * Use unknown peer ID Co-Authored-By: ValarDragon * fix compilation * use next wait chan logic when skipping * Minor fixes * Add TxInfo * Add reverse map * Make activeID's auto-reserve 0 * 0 -> UnknownPeerID Co-Authored-By: ValarDragon * Switch to making the normal case set a callback on the reqres object The recheck case is still done via the global callback, and stats are also set via global callback * fix merge conflict * Addres comments * Add cache tests * add cache tests * minor fixes * update metrics in reqResCb and reformat code * goimport -w mempool/reactor.go * mempool: update memTx senders I had to introduce txsMap for quick mempoolTx lookups. * change senders type from []uint16 to sync.Map Fixes DATA RACE: ``` Read at 0x00c0013fcd3a by goroutine 183: github.com/tendermint/tendermint/mempool.(*MempoolReactor).broadcastTxRoutine() /go/src/github.com/tendermint/tendermint/mempool/reactor.go:195 +0x3c7 Previous write at 0x00c0013fcd3a by D[2019-02-27|10:10:49.058] Read PacketMsg switch=3 peer=35bc1e3558c182927b31987eeff3feb3d58a0fc5@127.0.0.1 :46552 conn=MConn{pipe} packet="PacketMsg{30:2B06579D0A143EB78F3D3299DE8213A51D4E11FB05ACE4D6A14F T:1}" goroutine 190: github.com/tendermint/tendermint/mempool.(*Mempool).CheckTxWithInfo() /go/src/github.com/tendermint/tendermint/mempool/mempool.go:387 +0xdc1 github.com/tendermint/tendermint/mempool.(*MempoolReactor).Receive() /go/src/github.com/tendermint/tendermint/mempool/reactor.go:134 +0xb04 github.com/tendermint/tendermint/p2p.createMConnection.func1() /go/src/github.com/tendermint/tendermint/p2p/peer.go:374 +0x25b github.com/tendermint/tendermint/p2p/conn.(*MConnection).recvRoutine() /go/src/github.com/tendermint/tendermint/p2p/conn/connection.go:599 +0xcce Goroutine 183 (running) created at: D[2019-02-27|10:10:49.058] Send switch=2 peer=1efafad5443abeea4b7a8155218e4369525d987e@127.0.0.1:46193 channel=48 conn=MConn{pipe} m sgBytes=2B06579D0A146194480ADAE00C2836ED7125FEE65C1D9DD51049 github.com/tendermint/tendermint/mempool.(*MempoolReactor).AddPeer() /go/src/github.com/tendermint/tendermint/mempool/reactor.go:105 +0x1b1 github.com/tendermint/tendermint/p2p.(*Switch).startInitPeer() /go/src/github.com/tendermint/tendermint/p2p/switch.go:683 +0x13b github.com/tendermint/tendermint/p2p.(*Switch).addPeer() /go/src/github.com/tendermint/tendermint/p2p/switch.go:650 +0x585 github.com/tendermint/tendermint/p2p.(*Switch).addPeerWithConnection() /go/src/github.com/tendermint/tendermint/p2p/test_util.go:145 +0x939 github.com/tendermint/tendermint/p2p.Connect2Switches.func2() /go/src/github.com/tendermint/tendermint/p2p/test_util.go:109 +0x50 I[2019-02-27|10:10:49.058] Added good transaction validator=0 tx=43B4D1F0F03460BD262835C4AA560DB860CFBBE85BD02386D83DAC38C67B3AD7 res="&{CheckTx:gas_w anted:1 }" height=0 total=375 Goroutine 190 (running) created at: github.com/tendermint/tendermint/p2p/conn.(*MConnection).OnStart() /go/src/github.com/tendermint/tendermint/p2p/conn/connection.go:210 +0x313 github.com/tendermint/tendermint/libs/common.(*BaseService).Start() /go/src/github.com/tendermint/tendermint/libs/common/service.go:139 +0x4df github.com/tendermint/tendermint/p2p.(*peer).OnStart() /go/src/github.com/tendermint/tendermint/p2p/peer.go:179 +0x56 github.com/tendermint/tendermint/libs/common.(*BaseService).Start() /go/src/github.com/tendermint/tendermint/libs/common/service.go:139 +0x4df github.com/tendermint/tendermint/p2p.(*peer).Start() :1 +0x43 github.com/tendermint/tendermint/p2p.(*Switch).startInitPeer() ``` * explain the choice of a map DS for senders * extract ids pool/mapper to a separate struct * fix literal copies lock value from senders: sync.Map contains sync.Mutex * use sync.Map#LoadOrStore instead of Load * fixes after Ismail's review * rename resCbNormal to resCbFirstTime --- CHANGELOG_PENDING.md | 2 + docs/spec/reactors/mempool/reactor.md | 2 + mempool/bench_test.go | 13 +++ mempool/cache_test.go | 101 +++++++++++++++++ mempool/mempool.go | 153 ++++++++++++++++++++------ mempool/mempool_test.go | 42 ++----- mempool/reactor.go | 88 +++++++++++++-- mempool/reactor_test.go | 30 ++++- state/services.go | 17 ++- 9 files changed, 362 insertions(+), 86 deletions(-) create mode 100644 mempool/cache_test.go diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 7cf3ab4e..bebc3e6a 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -20,6 +20,8 @@ ### IMPROVEMENTS: +- [mempool] \#2778 No longer send txs back to peers who sent it to you + ### BUG FIXES: - [blockchain] \#2699 update the maxHeight when a peer is removed diff --git a/docs/spec/reactors/mempool/reactor.md b/docs/spec/reactors/mempool/reactor.md index fa25eeb3..d0b19f7c 100644 --- a/docs/spec/reactors/mempool/reactor.md +++ b/docs/spec/reactors/mempool/reactor.md @@ -12,3 +12,5 @@ for details. Sending incorrectly encoded data or data exceeding `maxMsgSize` will result in stopping the peer. + +The mempool will not send a tx back to any peer which it received it from. \ No newline at end of file diff --git a/mempool/bench_test.go b/mempool/bench_test.go index 8936f8df..0cd394cd 100644 --- a/mempool/bench_test.go +++ b/mempool/bench_test.go @@ -26,6 +26,19 @@ func BenchmarkReap(b *testing.B) { } } +func BenchmarkCheckTx(b *testing.B) { + app := kvstore.NewKVStoreApplication() + cc := proxy.NewLocalClientCreator(app) + mempool, cleanup := newMempoolWithApp(cc) + defer cleanup() + + for i := 0; i < b.N; i++ { + tx := make([]byte, 8) + binary.BigEndian.PutUint64(tx, uint64(i)) + mempool.CheckTx(tx, nil) + } +} + func BenchmarkCacheInsertTime(b *testing.B) { cache := newMapTxCache(b.N) txs := make([][]byte, b.N) diff --git a/mempool/cache_test.go b/mempool/cache_test.go new file mode 100644 index 00000000..26e560b6 --- /dev/null +++ b/mempool/cache_test.go @@ -0,0 +1,101 @@ +package mempool + +import ( + "crypto/rand" + "crypto/sha256" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/proxy" + "github.com/tendermint/tendermint/types" +) + +func TestCacheRemove(t *testing.T) { + cache := newMapTxCache(100) + numTxs := 10 + txs := make([][]byte, numTxs) + for i := 0; i < numTxs; i++ { + // probability of collision is 2**-256 + txBytes := make([]byte, 32) + rand.Read(txBytes) + txs[i] = txBytes + cache.Push(txBytes) + // make sure its added to both the linked list and the map + require.Equal(t, i+1, len(cache.map_)) + require.Equal(t, i+1, cache.list.Len()) + } + for i := 0; i < numTxs; i++ { + cache.Remove(txs[i]) + // make sure its removed from both the map and the linked list + require.Equal(t, numTxs-(i+1), len(cache.map_)) + require.Equal(t, numTxs-(i+1), cache.list.Len()) + } +} + +func TestCacheAfterUpdate(t *testing.T) { + app := kvstore.NewKVStoreApplication() + cc := proxy.NewLocalClientCreator(app) + mempool, cleanup := newMempoolWithApp(cc) + defer cleanup() + + // reAddIndices & txsInCache can have elements > numTxsToCreate + // also assumes max index is 255 for convenience + // txs in cache also checks order of elements + tests := []struct { + numTxsToCreate int + updateIndices []int + reAddIndices []int + txsInCache []int + }{ + {1, []int{}, []int{1}, []int{1, 0}}, // adding new txs works + {2, []int{1}, []int{}, []int{1, 0}}, // update doesn't remove tx from cache + {2, []int{2}, []int{}, []int{2, 1, 0}}, // update adds new tx to cache + {2, []int{1}, []int{1}, []int{1, 0}}, // re-adding after update doesn't make dupe + } + for tcIndex, tc := range tests { + for i := 0; i < tc.numTxsToCreate; i++ { + tx := types.Tx{byte(i)} + err := mempool.CheckTx(tx, nil) + require.NoError(t, err) + } + + updateTxs := []types.Tx{} + for _, v := range tc.updateIndices { + tx := types.Tx{byte(v)} + updateTxs = append(updateTxs, tx) + } + mempool.Update(int64(tcIndex), updateTxs, nil, nil) + + for _, v := range tc.reAddIndices { + tx := types.Tx{byte(v)} + _ = mempool.CheckTx(tx, nil) + } + + cache := mempool.cache.(*mapTxCache) + node := cache.list.Front() + counter := 0 + for node != nil { + require.NotEqual(t, len(tc.txsInCache), counter, + "cache larger than expected on testcase %d", tcIndex) + + nodeVal := node.Value.([sha256.Size]byte) + expectedBz := sha256.Sum256([]byte{byte(tc.txsInCache[len(tc.txsInCache)-counter-1])}) + // Reference for reading the errors: + // >>> sha256('\x00').hexdigest() + // '6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d' + // >>> sha256('\x01').hexdigest() + // '4bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a' + // >>> sha256('\x02').hexdigest() + // 'dbc1b4c900ffe48d575b5da5c638040125f65db0fe3e24494b76ea986457d986' + + require.Equal(t, expectedBz, nodeVal, "Equality failed on index %d, tc %d", counter, tcIndex) + counter++ + node = node.Next() + } + require.Equal(t, len(tc.txsInCache), counter, + "cache smaller than expected on testcase %d", tcIndex) + mempool.Flush() + } +} diff --git a/mempool/mempool.go b/mempool/mempool.go index 41ee59cb..2064b7bc 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -31,6 +31,14 @@ type PreCheckFunc func(types.Tx) error // transaction doesn't require more gas than available for the block. type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error +// TxInfo are parameters that get passed when attempting to add a tx to the +// mempool. +type TxInfo struct { + // We don't use p2p.ID here because it's too big. The gain is to store max 2 + // bytes with each tx to identify the sender rather than 20 bytes. + PeerID uint16 +} + /* The mempool pushes new txs onto the proxyAppConn. @@ -148,9 +156,12 @@ func TxID(tx []byte) string { type Mempool struct { config *cfg.MempoolConfig - proxyMtx sync.Mutex - proxyAppConn proxy.AppConnMempool - txs *clist.CList // concurrent linked-list of good txs + proxyMtx sync.Mutex + proxyAppConn proxy.AppConnMempool + txs *clist.CList // concurrent linked-list of good txs + // map for quick access to txs + // Used in CheckTx to record the tx sender. + txsMap map[[sha256.Size]byte]*clist.CElement height int64 // the last block Update()'d to rechecking int32 // for re-checking filtered txs on Update() recheckCursor *clist.CElement // next expected response @@ -161,7 +172,10 @@ type Mempool struct { postCheck PostCheckFunc // Atomic integers - txsBytes int64 // see TxsBytes + + // Used to check if the mempool size is bigger than the allowed limit. + // See TxsBytes + txsBytes int64 // Keep a cache of already-seen txs. // This reduces the pressure on the proxyApp. @@ -189,6 +203,7 @@ func NewMempool( config: config, proxyAppConn: proxyAppConn, txs: clist.New(), + txsMap: make(map[[sha256.Size]byte]*clist.CElement), height: height, rechecking: 0, recheckCursor: nil, @@ -286,8 +301,8 @@ func (mem *Mempool) TxsBytes() int64 { return atomic.LoadInt64(&mem.txsBytes) } -// FlushAppConn flushes the mempool connection to ensure async resCb calls are -// done e.g. from CheckTx. +// FlushAppConn flushes the mempool connection to ensure async reqResCb calls are +// done. E.g. from CheckTx. func (mem *Mempool) FlushAppConn() error { return mem.proxyAppConn.FlushSync() } @@ -304,6 +319,7 @@ func (mem *Mempool) Flush() { e.DetachPrev() } + mem.txsMap = make(map[[sha256.Size]byte]*clist.CElement) _ = atomic.SwapInt64(&mem.txsBytes, 0) } @@ -327,6 +343,13 @@ func (mem *Mempool) TxsWaitChan() <-chan struct{} { // It gets called from another goroutine. // CONTRACT: Either cb will get called, or err returned. func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) { + return mem.CheckTxWithInfo(tx, cb, TxInfo{PeerID: UnknownPeerID}) +} + +// CheckTxWithInfo performs the same operation as CheckTx, but with extra meta data about the tx. +// Currently this metadata is the peer who sent it, +// used to prevent the tx from being gossiped back to them. +func (mem *Mempool) CheckTxWithInfo(tx types.Tx, cb func(*abci.Response), txInfo TxInfo) (err error) { mem.proxyMtx.Lock() // use defer to unlock mutex because application (*local client*) might panic defer mem.proxyMtx.Unlock() @@ -357,6 +380,17 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) { // CACHE if !mem.cache.Push(tx) { + // record the sender + e, ok := mem.txsMap[sha256.Sum256(tx)] + if ok { // tx may be in cache, but not in the mempool + memTx := e.Value.(*mempoolTx) + if _, loaded := memTx.senders.LoadOrStore(txInfo.PeerID, true); loaded { + // TODO: consider punishing peer for dups, + // its non-trivial since invalid txs can become valid, + // but they can spam the same tx with little cost to them atm. + } + } + return ErrTxInCache } // END CACHE @@ -381,27 +415,77 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) { } reqRes := mem.proxyAppConn.CheckTxAsync(tx) if cb != nil { - reqRes.SetCallback(cb) + composedCallback := func(res *abci.Response) { + mem.reqResCb(tx, txInfo.PeerID)(res) + cb(res) + } + reqRes.SetCallback(composedCallback) + } else { + reqRes.SetCallback(mem.reqResCb(tx, txInfo.PeerID)) } return nil } -// ABCI callback function +// Global callback, which is called in the absence of the specific callback. +// +// In recheckTxs because no reqResCb (specific) callback is set, this callback +// will be called. func (mem *Mempool) resCb(req *abci.Request, res *abci.Response) { if mem.recheckCursor == nil { - mem.resCbNormal(req, res) - } else { - mem.metrics.RecheckTimes.Add(1) - mem.resCbRecheck(req, res) + return } + + mem.metrics.RecheckTimes.Add(1) + mem.resCbRecheck(req, res) + + // update metrics mem.metrics.Size.Set(float64(mem.Size())) } -func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) { +// Specific callback, which allows us to incorporate local information, like +// the peer that sent us this tx, so we can avoid sending it back to the same +// peer. +// +// Used in CheckTxWithInfo to record PeerID who sent us the tx. +func (mem *Mempool) reqResCb(tx []byte, peerID uint16) func(res *abci.Response) { + return func(res *abci.Response) { + if mem.recheckCursor != nil { + return + } + + mem.resCbFirstTime(tx, peerID, res) + + // update metrics + mem.metrics.Size.Set(float64(mem.Size())) + } +} + +func (mem *Mempool) addTx(memTx *mempoolTx) { + e := mem.txs.PushBack(memTx) + mem.txsMap[sha256.Sum256(memTx.tx)] = e + atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx))) + mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx))) +} + +func (mem *Mempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) { + mem.txs.Remove(elem) + elem.DetachPrev() + delete(mem.txsMap, sha256.Sum256(tx)) + atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) + + if removeFromCache { + mem.cache.Remove(tx) + } +} + +// callback, which is called after the app checked the tx for the first time. +// +// The case where the app checks the tx for the second and subsequent times is +// handled by the resCbRecheck callback. +func (mem *Mempool) resCbFirstTime(tx []byte, peerID uint16, res *abci.Response) { switch r := res.Value.(type) { case *abci.Response_CheckTx: - tx := req.GetCheckTx().Tx var postCheckErr error if mem.postCheck != nil { postCheckErr = mem.postCheck(tx, r.CheckTx) @@ -412,15 +496,14 @@ func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) { gasWanted: r.CheckTx.GasWanted, tx: tx, } - mem.txs.PushBack(memTx) - atomic.AddInt64(&mem.txsBytes, int64(len(tx))) + memTx.senders.Store(peerID, true) + mem.addTx(memTx) mem.logger.Info("Added good transaction", "tx", TxID(tx), "res", r, "height", memTx.height, "total", mem.Size(), ) - mem.metrics.TxSizeBytes.Observe(float64(len(tx))) mem.notifyTxsAvailable() } else { // ignore bad transaction @@ -434,6 +517,10 @@ func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) { } } +// callback, which is called after the app rechecked the tx. +// +// The case where the app checks the tx for the first time is handled by the +// resCbFirstTime callback. func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) { switch r := res.Value.(type) { case *abci.Response_CheckTx: @@ -454,12 +541,8 @@ func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) { } else { // Tx became invalidated due to newly committed block. mem.logger.Info("Tx is no longer valid", "tx", TxID(tx), "res", r, "err", postCheckErr) - mem.txs.Remove(mem.recheckCursor) - atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) - mem.recheckCursor.DetachPrev() - - // remove from cache (it might be good later) - mem.cache.Remove(tx) + // NOTE: we remove tx from the cache because it might be good later + mem.removeTx(tx, mem.recheckCursor, true) } if mem.recheckCursor == mem.recheckEnd { mem.recheckCursor = nil @@ -627,12 +710,9 @@ func (mem *Mempool) removeTxs(txs types.Txs) []types.Tx { memTx := e.Value.(*mempoolTx) // Remove the tx if it's already in a block. if _, ok := txsMap[string(memTx.tx)]; ok { - // remove from clist - mem.txs.Remove(e) - atomic.AddInt64(&mem.txsBytes, int64(-len(memTx.tx))) - e.DetachPrev() - // NOTE: we don't remove committed txs from the cache. + mem.removeTx(memTx.tx, e, false) + continue } txsLeft = append(txsLeft, memTx.tx) @@ -650,7 +730,7 @@ func (mem *Mempool) recheckTxs(txs []types.Tx) { mem.recheckEnd = mem.txs.Back() // Push txs to proxyAppConn - // NOTE: resCb() may be called concurrently. + // NOTE: reqResCb may be called concurrently. for _, tx := range txs { mem.proxyAppConn.CheckTxAsync(tx) } @@ -663,6 +743,7 @@ func (mem *Mempool) recheckTxs(txs []types.Tx) { type mempoolTx struct { height int64 // height that this tx had been validated in gasWanted int64 // amount of gas this tx states it will require + senders sync.Map // ids of peers who've sent us this tx (as a map for quick lookups) tx types.Tx // } @@ -679,13 +760,13 @@ type txCache interface { Remove(tx types.Tx) } -// mapTxCache maintains a cache of transactions. This only stores -// the hash of the tx, due to memory concerns. +// mapTxCache maintains a LRU cache of transactions. This only stores the hash +// of the tx, due to memory concerns. type mapTxCache struct { mtx sync.Mutex size int map_ map[[sha256.Size]byte]*list.Element - list *list.List // to remove oldest tx when cache gets too big + list *list.List } var _ txCache = (*mapTxCache)(nil) @@ -707,8 +788,8 @@ func (cache *mapTxCache) Reset() { cache.mtx.Unlock() } -// Push adds the given tx to the cache and returns true. It returns false if tx -// is already in the cache. +// Push adds the given tx to the cache and returns true. It returns +// false if tx is already in the cache. func (cache *mapTxCache) Push(tx types.Tx) bool { cache.mtx.Lock() defer cache.mtx.Unlock() @@ -728,8 +809,8 @@ func (cache *mapTxCache) Push(tx types.Tx) bool { cache.list.Remove(popped) } } - cache.list.PushBack(txHash) - cache.map_[txHash] = cache.list.Back() + e := cache.list.PushBack(txHash) + cache.map_[txHash] = e return true } diff --git a/mempool/mempool_test.go b/mempool/mempool_test.go index 5928fbc5..dc7d595a 100644 --- a/mempool/mempool_test.go +++ b/mempool/mempool_test.go @@ -12,9 +12,10 @@ import ( "time" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/abci/example/counter" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" @@ -63,8 +64,9 @@ func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { } } -func checkTxs(t *testing.T, mempool *Mempool, count int) types.Txs { +func checkTxs(t *testing.T, mempool *Mempool, count int, peerID uint16) types.Txs { txs := make(types.Txs, count) + txInfo := TxInfo{PeerID: peerID} for i := 0; i < count; i++ { txBytes := make([]byte, 20) txs[i] = txBytes @@ -72,7 +74,7 @@ func checkTxs(t *testing.T, mempool *Mempool, count int) types.Txs { if err != nil { t.Error(err) } - if err := mempool.CheckTx(txBytes, nil); err != nil { + if err := mempool.CheckTxWithInfo(txBytes, nil, txInfo); err != nil { // Skip invalid txs. // TestMempoolFilters will fail otherwise. It asserts a number of txs // returned. @@ -92,7 +94,7 @@ func TestReapMaxBytesMaxGas(t *testing.T) { defer cleanup() // Ensure gas calculation behaves as expected - checkTxs(t, mempool, 1) + checkTxs(t, mempool, 1, UnknownPeerID) tx0 := mempool.TxsFront().Value.(*mempoolTx) // assert that kv store has gas wanted = 1. require.Equal(t, app.CheckTx(tx0.tx).GasWanted, int64(1), "KVStore had a gas value neq to 1") @@ -126,7 +128,7 @@ func TestReapMaxBytesMaxGas(t *testing.T) { {20, 20000, 30, 20}, } for tcIndex, tt := range tests { - checkTxs(t, mempool, tt.numTxsToCreate) + checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID) got := mempool.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas) assert.Equal(t, tt.expectedNumTxs, len(got), "Got %d txs, expected %d, tc #%d", len(got), tt.expectedNumTxs, tcIndex) @@ -167,7 +169,7 @@ func TestMempoolFilters(t *testing.T) { } for tcIndex, tt := range tests { mempool.Update(1, emptyTxArr, tt.preFilter, tt.postFilter) - checkTxs(t, mempool, tt.numTxsToCreate) + checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID) require.Equal(t, tt.expectedNumTxs, mempool.Size(), "mempool had the incorrect size, on test case %d", tcIndex) mempool.Flush() } @@ -198,7 +200,7 @@ func TestTxsAvailable(t *testing.T) { ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) // send a bunch of txs, it should only fire once - txs := checkTxs(t, mempool, 100) + txs := checkTxs(t, mempool, 100, UnknownPeerID) ensureFire(t, mempool.TxsAvailable(), timeoutMS) ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) @@ -213,7 +215,7 @@ func TestTxsAvailable(t *testing.T) { ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) // send a bunch more txs. we already fired for this height so it shouldnt fire again - moreTxs := checkTxs(t, mempool, 50) + moreTxs := checkTxs(t, mempool, 50, UnknownPeerID) ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) // now call update with all the txs. it should not fire as there are no txs left @@ -224,7 +226,7 @@ func TestTxsAvailable(t *testing.T) { ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) // send a bunch more txs, it should only fire once - checkTxs(t, mempool, 100) + checkTxs(t, mempool, 100, UnknownPeerID) ensureFire(t, mempool.TxsAvailable(), timeoutMS) ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) } @@ -340,28 +342,6 @@ func TestSerialReap(t *testing.T) { reapCheck(600) } -func TestCacheRemove(t *testing.T) { - cache := newMapTxCache(100) - numTxs := 10 - txs := make([][]byte, numTxs) - for i := 0; i < numTxs; i++ { - // probability of collision is 2**-256 - txBytes := make([]byte, 32) - rand.Read(txBytes) - txs[i] = txBytes - cache.Push(txBytes) - // make sure its added to both the linked list and the map - require.Equal(t, i+1, len(cache.map_)) - require.Equal(t, i+1, cache.list.Len()) - } - for i := 0; i < numTxs; i++ { - cache.Remove(txs[i]) - // make sure its removed from both the map and the linked list - require.Equal(t, numTxs-(i+1), len(cache.map_)) - require.Equal(t, numTxs-(i+1), cache.list.Len()) - } -} - func TestMempoolCloseWAL(t *testing.T) { // 1. Create the temporary directory for mempool and WAL testing. rootDir, err := ioutil.TempDir("", "mempool-test") diff --git a/mempool/reactor.go b/mempool/reactor.go index ff87f050..555f38b8 100644 --- a/mempool/reactor.go +++ b/mempool/reactor.go @@ -3,13 +3,14 @@ package mempool import ( "fmt" "reflect" + "sync" "time" amino "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/libs/clist" - "github.com/tendermint/tendermint/libs/log" cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/clist" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" ) @@ -21,13 +22,70 @@ const ( maxTxSize = maxMsgSize - 8 // account for amino overhead of TxMessage peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount + + // UnknownPeerID is the peer ID to use when running CheckTx when there is + // no peer (e.g. RPC) + UnknownPeerID uint16 = 0 ) // MempoolReactor handles mempool tx broadcasting amongst peers. +// It maintains a map from peer ID to counter, to prevent gossiping txs to the +// peers you received it from. type MempoolReactor struct { p2p.BaseReactor config *cfg.MempoolConfig Mempool *Mempool + ids *mempoolIDs +} + +type mempoolIDs struct { + mtx sync.RWMutex + peerMap map[p2p.ID]uint16 + nextID uint16 // assumes that a node will never have over 65536 active peers + activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter +} + +// Reserve searches for the next unused ID and assignes it to the peer. +func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) { + ids.mtx.Lock() + defer ids.mtx.Unlock() + + curID := ids.nextPeerID() + ids.peerMap[peer.ID()] = curID + ids.activeIDs[curID] = struct{}{} +} + +// nextPeerID returns the next unused peer ID to use. +// This assumes that ids's mutex is already locked. +func (ids *mempoolIDs) nextPeerID() uint16 { + _, idExists := ids.activeIDs[ids.nextID] + for idExists { + ids.nextID++ + _, idExists = ids.activeIDs[ids.nextID] + } + curID := ids.nextID + ids.nextID++ + return curID +} + +// Reclaim returns the ID reserved for the peer back to unused pool. +func (ids *mempoolIDs) Reclaim(peer p2p.Peer) { + ids.mtx.Lock() + defer ids.mtx.Unlock() + + removedID, ok := ids.peerMap[peer.ID()] + if ok { + delete(ids.activeIDs, removedID) + delete(ids.peerMap, peer.ID()) + } +} + +// GetForPeer returns an ID reserved for the peer. +func (ids *mempoolIDs) GetForPeer(peer p2p.Peer) uint16 { + ids.mtx.RLock() + defer ids.mtx.RUnlock() + + return ids.peerMap[peer.ID()] } // NewMempoolReactor returns a new MempoolReactor with the given config and mempool. @@ -35,6 +93,11 @@ func NewMempoolReactor(config *cfg.MempoolConfig, mempool *Mempool) *MempoolReac memR := &MempoolReactor{ config: config, Mempool: mempool, + ids: &mempoolIDs{ + peerMap: make(map[p2p.ID]uint16), + activeIDs: map[uint16]struct{}{0: {}}, + nextID: 1, // reserve unknownPeerID(0) for mempoolReactor.BroadcastTx + }, } memR.BaseReactor = *p2p.NewBaseReactor("MempoolReactor", memR) return memR @@ -68,11 +131,13 @@ func (memR *MempoolReactor) GetChannels() []*p2p.ChannelDescriptor { // AddPeer implements Reactor. // It starts a broadcast routine ensuring all txs are forwarded to the given peer. func (memR *MempoolReactor) AddPeer(peer p2p.Peer) { + memR.ids.ReserveForPeer(peer) go memR.broadcastTxRoutine(peer) } // RemovePeer implements Reactor. func (memR *MempoolReactor) RemovePeer(peer p2p.Peer, reason interface{}) { + memR.ids.Reclaim(peer) // broadcast routine checks if peer is gone and returns } @@ -89,7 +154,8 @@ func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { switch msg := msg.(type) { case *TxMessage: - err := memR.Mempool.CheckTx(msg.Tx, nil) + peerID := memR.ids.GetForPeer(src) + err := memR.Mempool.CheckTxWithInfo(msg.Tx, nil, TxInfo{PeerID: peerID}) if err != nil { memR.Logger.Info("Could not check tx", "tx", TxID(msg.Tx), "err", err) } @@ -110,6 +176,7 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) { return } + peerID := memR.ids.GetForPeer(peer) var next *clist.CElement for { // This happens because the CElement we were looking at got garbage @@ -146,12 +213,15 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) { continue } - // send memTx - msg := &TxMessage{Tx: memTx.tx} - success := peer.Send(MempoolChannel, cdc.MustMarshalBinaryBare(msg)) - if !success { - time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) - continue + // ensure peer hasn't already sent us this tx + if _, ok := memTx.senders.Load(peerID); !ok { + // send memTx + msg := &TxMessage{Tx: memTx.tx} + success := peer.Send(MempoolChannel, cdc.MustMarshalBinaryBare(msg)) + if !success { + time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) + continue + } } select { diff --git a/mempool/reactor_test.go b/mempool/reactor_test.go index 51d13018..f16f8447 100644 --- a/mempool/reactor_test.go +++ b/mempool/reactor_test.go @@ -7,15 +7,13 @@ import ( "time" "github.com/fortytw2/leaktest" + "github.com/go-kit/kit/log/term" "github.com/pkg/errors" "github.com/stretchr/testify/assert" - "github.com/go-kit/kit/log/term" - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/libs/log" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" @@ -102,6 +100,12 @@ func _waitForTxs(t *testing.T, wg *sync.WaitGroup, txs types.Txs, reactorIdx int wg.Done() } +// ensure no txs on reactor after some timeout +func ensureNoTxs(t *testing.T, reactor *MempoolReactor, timeout time.Duration) { + time.Sleep(timeout) // wait for the txs in all mempools + assert.Zero(t, reactor.Mempool.Size()) +} + const ( NUM_TXS = 1000 TIMEOUT = 120 * time.Second // ridiculously high because CircleCI is slow @@ -124,10 +128,26 @@ func TestReactorBroadcastTxMessage(t *testing.T) { // send a bunch of txs to the first reactor's mempool // and wait for them all to be received in the others - txs := checkTxs(t, reactors[0].Mempool, NUM_TXS) + txs := checkTxs(t, reactors[0].Mempool, NUM_TXS, UnknownPeerID) waitForTxs(t, txs, reactors) } +func TestReactorNoBroadcastToSender(t *testing.T) { + config := cfg.TestConfig() + const N = 2 + reactors := makeAndConnectMempoolReactors(config, N) + defer func() { + for _, r := range reactors { + r.Stop() + } + }() + + // send a bunch of txs to the first reactor's mempool, claiming it came from peer + // ensure peer gets no txs + checkTxs(t, reactors[0].Mempool, NUM_TXS, 1) + ensureNoTxs(t, reactors[1], 100*time.Millisecond) +} + func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") diff --git a/state/services.go b/state/services.go index 02c3aa7d..07d12c5a 100644 --- a/state/services.go +++ b/state/services.go @@ -23,6 +23,7 @@ type Mempool interface { Size() int CheckTx(types.Tx, func(*abci.Response)) error + CheckTxWithInfo(types.Tx, func(*abci.Response), mempool.TxInfo) error ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs Update(int64, types.Txs, mempool.PreCheckFunc, mempool.PostCheckFunc) error Flush() @@ -37,11 +38,17 @@ type MockMempool struct{} var _ Mempool = MockMempool{} -func (MockMempool) Lock() {} -func (MockMempool) Unlock() {} -func (MockMempool) Size() int { return 0 } -func (MockMempool) CheckTx(_ types.Tx, _ func(*abci.Response)) error { return nil } -func (MockMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } +func (MockMempool) Lock() {} +func (MockMempool) Unlock() {} +func (MockMempool) Size() int { return 0 } +func (MockMempool) CheckTx(_ types.Tx, _ func(*abci.Response)) error { + return nil +} +func (MockMempool) CheckTxWithInfo(_ types.Tx, _ func(*abci.Response), + _ mempool.TxInfo) error { + return nil +} +func (MockMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } func (MockMempool) Update( _ int64, _ types.Txs, From 1bb8e02a962a1ffd7ce4e7355f648b644105f649 Mon Sep 17 00:00:00 2001 From: HaoyangLiu Date: Tue, 26 Mar 2019 16:29:06 +0800 Subject: [PATCH 40/41] mempool: fix broadcastTxRoutine leak (#3478) Refs #3306, irisnet@fdbb676 I ran an irishub validator. After the validator node ran several days, I dump the whole goroutine stack. I found that there were hundreds of broadcastTxRoutine. However, the connected peer quantity was less than 30. So I belive that there must be broadcastTxRoutine leakage issue. According to my analysis, I think the root cause of this issue locate in below code: select { case <-next.NextWaitChan(): // see the start of the for loop for nil check next = next.Next() case <-peer.Quit(): return case <-memR.Quit(): return } As we know, if multiple paths are avaliable in the same time, then a random path will be selected. Suppose that next.NextWaitChan() and peer.Quit() are both avaliable, and next.NextWaitChan() is chosen. // send memTx msg := &TxMessage{Tx: memTx.tx} success := peer.Send(MempoolChannel, cdc.MustMarshalBinaryBare(msg)) if !success { time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) continue } Then next will be non-empty and the peer send operation won't be success. As a result, this go routine will be track into infinite loop and won't be released. My proposal is to check peer.Quit() and memR.Quit() in every loop no matter whether next is nil. --- mempool/reactor.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mempool/reactor.go b/mempool/reactor.go index 555f38b8..23fec270 100644 --- a/mempool/reactor.go +++ b/mempool/reactor.go @@ -179,6 +179,10 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) { peerID := memR.ids.GetForPeer(peer) var next *clist.CElement for { + // In case of both next.NextWaitChan() and peer.Quit() are variable at the same time + if !memR.IsRunning() || !peer.IsRunning() { + return + } // This happens because the CElement we were looking at got garbage // collected (removed). That is, .NextWait() returned nil. Go ahead and // start from the beginning. From a4d9539544ba4377f16e797fea01090bc974e1b5 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 26 Mar 2019 09:44:49 +0100 Subject: [PATCH 41/41] rpc/client: include NetworkClient interface into Client interface (#3473) I think it's nice when the Client interface has all the methods. If someone does not need a particular method/set of methods, she can use individual interfaces (e.g. NetworkClient, MempoolClient) or write her own interface. technically breaking Fixes #3458 --- CHANGELOG_PENDING.md | 1 + rpc/client/httpclient.go | 6 +----- rpc/client/interface.go | 10 ++++------ rpc/client/localclient.go | 6 +----- rpc/client/mock/client.go | 12 ++++++++++++ 5 files changed, 19 insertions(+), 16 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index bebc3e6a..eaf08928 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -10,6 +10,7 @@ * Go API - [libs/common] Remove RepeatTimer (also TimerMaker and Ticker interface) +- [rpc/client] \#3458 Include NetworkClient interface into Client interface * Blockchain Protocol diff --git a/rpc/client/httpclient.go b/rpc/client/httpclient.go index e982292e..55c7b4f1 100644 --- a/rpc/client/httpclient.go +++ b/rpc/client/httpclient.go @@ -52,11 +52,7 @@ func NewHTTP(remote, wsEndpoint string) *HTTP { } } -var ( - _ Client = (*HTTP)(nil) - _ NetworkClient = (*HTTP)(nil) - _ EventsClient = (*HTTP)(nil) -) +var _ Client = (*HTTP)(nil) func (c *HTTP) Status() (*ctypes.ResultStatus, error) { result := new(ctypes.ResultStatus) diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 605d84ba..8f9ed937 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -72,17 +72,15 @@ type StatusClient interface { type Client interface { cmn.Service ABCIClient - SignClient - HistoryClient - StatusClient EventsClient + HistoryClient + NetworkClient + SignClient + StatusClient } // NetworkClient is general info about the network state. May not // be needed usually. -// -// Not included in the Client interface, but generally implemented -// by concrete implementations. type NetworkClient interface { NetInfo() (*ctypes.ResultNetInfo, error) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) diff --git a/rpc/client/localclient.go b/rpc/client/localclient.go index 976c9892..d57ced31 100644 --- a/rpc/client/localclient.go +++ b/rpc/client/localclient.go @@ -58,11 +58,7 @@ func NewLocal(node *nm.Node) *Local { } } -var ( - _ Client = (*Local)(nil) - _ NetworkClient = (*Local)(nil) - _ EventsClient = (*Local)(nil) -) +var _ Client = (*Local)(nil) // SetLogger allows to set a logger on the client. func (c *Local) SetLogger(l log.Logger) { diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index 9c0eb75b..c2e19b6d 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -108,6 +108,18 @@ func (c Client) NetInfo() (*ctypes.ResultNetInfo, error) { return core.NetInfo(&rpctypes.Context{}) } +func (c Client) ConsensusState() (*ctypes.ResultConsensusState, error) { + return core.ConsensusState(&rpctypes.Context{}) +} + +func (c Client) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { + return core.DumpConsensusState(&rpctypes.Context{}) +} + +func (c Client) Health() (*ctypes.ResultHealth, error) { + return core.Health(&rpctypes.Context{}) +} + func (c Client) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) { return core.UnsafeDialSeeds(&rpctypes.Context{}, seeds) }