Compare commits

...

58 Commits

Author SHA1 Message Date
Christopher Goes
73de99ecab Set evidence.Type 2018-05-31 06:35:26 +02:00
Christopher Goes
2046b346a1 Merge pull request #1652 from tendermint/cwgoes/abci-evidence-type-constants
Constants for evidence types
2018-05-31 05:24:47 +02:00
Christopher Goes
c9514f077b DUPLICATE_VOTE evidence type 2018-05-31 05:24:15 +02:00
Christopher Goes
3bf9a7dc50 Validator public key, not address (somehow this was lost in cherry-pick) 2018-05-29 07:51:28 +02:00
Christopher Goes
53b0c67f75 Switch to tagged ABCI version 2018-05-29 05:49:33 +02:00
Christopher Goes
3b8c1ae119 Pin to an ABCI version 2018-05-29 04:59:18 +02:00
Christopher Goes
849ffaf43d Cherry-pick 2018-05-29 01:02:44 +02:00
Christopher Goes
058867669e Pass validator set to ExecCommitBlock, update testcases 2018-05-29 00:50:44 +02:00
Christopher Goes
923e0b02bf Retarget 2018-05-29 00:49:57 +02:00
Zach
ec34c8f9d2 docs: update ABCI output (#1635) 2018-05-28 22:06:02 +04:00
Anton Kaliaev
6004587347 expect all tags to be strings (#1498)
* expect all tags to be strings

Refs #1369

* port changes from https://github.com/tendermint/tmlibs/pull/204

Refs #1369
2018-05-28 14:37:11 +04:00
Zach
7f20eb5f8e generate RPC docs using Slate (#1612)
* generate RPC docs using Slate (#691)

* update changelog

* skip if branch not develop

* slate: only build if rpc/core has changes

* fetch develop to compare against

* slate: build on master only

* [rpc/core] use original repo, not fork in README
2018-05-25 15:59:24 +04:00
Anton Kaliaev
eeabb4c06b Merge pull request #1607 from tendermint/1600-wal-bug
[wal] small fixes in SearchEndHeight & replay logic
2018-05-25 15:41:57 +04:00
Anton Kaliaev
4da81aa0b7 commented out TestPEXReactorRunning 2018-05-25 15:11:32 +04:00
Anton Kaliaev
67068a34f2 log requesting addresses 2018-05-25 15:11:32 +04:00
Anton Kaliaev
2a0e9f93ce provide arg to error
BEFORE:

```
E[05-24|11:55:37.229] Dialing failed                               pex=0 addr=022ec801d79025caab3afbbf816d92ff8450d040@127.0.0.2:6593 err="Connect to self: <nil>" attempts=0
```

AFTER:

```
E[05-24|11:55:37.229] Dialing failed                               pex=0 addr=022ec801d79025caab3afbbf816d92ff8450d040@127.0.0.2:6593 err="Connect to self: 022ec801d79025caab3afbbf816d92ff8450d040@127.0.0.2:6593" attempts=0
```
2018-05-25 15:11:32 +04:00
Anton Kaliaev
708f35e5c1 do not look for height in older files if we've seen height - 1
Refs #1600
2018-05-25 15:11:15 +04:00
Anton Kaliaev
f3f5c7f472 we must only return io.EOF to progress to the next file in auto.Group
since we never write msg partially, if we've encountered io.EOF in the
middle of the msg, we must abort
2018-05-25 15:10:51 +04:00
Anton Kaliaev
68f6226bea data is corrupted, but this requires manual intervention
i.e., can't be skipped

and we should only return DataCorruptionError if we can skip a msg safely
2018-05-25 15:10:51 +04:00
Anton Kaliaev
118b86b1ef fix nil panic error
msg is nil and if we continue executing, we'll get nil exception at
`msg.Msg.(....)`
2018-05-25 15:10:51 +04:00
Anton Kaliaev
b9afcbe3a2 fix typo 2018-05-25 15:10:51 +04:00
Anton Kaliaev
a885af0826 Merge pull request #1574 from tendermint/847-separate-internal-pubsub
[pubsub] Prioritise internal subscribers (e.g. reactor) over external (e.g. RPC)
2018-05-24 21:09:56 +04:00
Ethan Buchman
3a947b0117 Merge pull request #1619 from tendermint/zach/cleaner-repo
clean up links & spec docs
2018-05-23 21:09:15 -04:00
Ethan Buchman
caf5afc084 Merge pull request #1520 from tendermint/bucky/p2p-same-ip
p2p: prevent connections from same ip
2018-05-23 20:57:17 -04:00
Zach Ramsay
2aa5285c66 fix from self-review 2018-05-23 10:08:57 -04:00
Zach Ramsay
b166831fb5 link to both consensus specs 2018-05-23 10:05:03 -04:00
Zach Ramsay
423fef1416 docs: use absolute links (#1617) 2018-05-23 10:01:32 -04:00
Zach Ramsay
b4d10b5b91 consensus: link to spec from readme (#1609) 2018-05-23 09:41:54 -04:00
Ethan Buchman
6f1bfb6280 Merge pull request #1616 from tendermint/zach/no-debora
remove debora scripts
2018-05-23 08:43:22 -04:00
Ethan Buchman
5e7177053c Merge pull request #1611 from tendermint/zach/dead-links
fix dead links & other doc updates
2018-05-23 08:42:25 -04:00
Zach Ramsay
a0201e7862 docker readme: update 2018-05-23 08:28:55 -04:00
Zach Ramsay
126ddca1a6 remove debora scripts (#1610) 2018-05-23 08:01:07 -04:00
Alexander Simmerl
186d38dd8a Use different loopback addresses for test switch 2018-05-23 02:36:48 +02:00
Alexander Simmerl
01fd102dba Incoporate review feedback 2018-05-23 01:56:03 +02:00
Alexander Simmerl
e11f3167ff Fix pex reactor test 2018-05-23 01:35:03 +02:00
Alexander Simmerl
7d98cfd3d6 Test duplicate IP guard in peer set 2018-05-23 01:24:27 +02:00
Alexander Simmerl
4848e88737 Fix persistent peer switch test 2018-05-23 00:24:40 +02:00
Zach Ramsay
60d7486de2 docs: fix dead links, closes #1608 2018-05-22 14:46:56 -04:00
Ethan Buchman
229c18f1bd Merge branch 'master' into develop 2018-05-21 16:13:11 -04:00
Alexander Simmerl
91b6d3f18c Do not set address for self error 2018-05-21 18:47:14 +02:00
Alexander Simmerl
20e9dd0737 Return fake IP even when there is no conn 2018-05-21 17:55:40 +02:00
Alexander Simmerl
7b02b5b66b Add RemoteIP to test implementation 2018-05-21 17:41:34 +02:00
Alexander Simmerl
0cd92a4948 Fix race in test suffix 2018-05-21 17:35:49 +02:00
Anton Kaliaev
a9d0adbdef update changelog 2018-05-21 10:56:33 +04:00
Anton Kaliaev
3485edf4f5 update test docker image Go version to 1.10 2018-05-21 10:51:47 +04:00
Anton Kaliaev
c6f612bfc3 subscribe before state emits NewRoundStep
I had to alter events package for that. Hope that's fine.
Refs #847
2018-05-21 10:51:47 +04:00
Anton Kaliaev
bb9aa85d22 copy events and pubsub packages from tmlibs
Refs #847
2018-05-21 10:51:47 +04:00
Anton Kaliaev
c4fef499b6 switch to events package 2018-05-21 10:50:55 +04:00
Anton Kaliaev
b77d5344fc rename methods for clarity 2018-05-21 10:50:55 +04:00
Anton Kaliaev
21f5f3faa7 use channels to send votes, ... from consensus state to reactor
Refs #847
2018-05-21 10:50:55 +04:00
Ethan Buchman
bf6527fc59 Merge pull request #1382 from EugeneChung/develop
remove Heap.Update() call when setting Proposer field
2018-05-20 19:32:13 -04:00
Ethan Buchman
383c255f35 dev version bump 2018-05-20 16:54:21 -04:00
Alexander Simmerl
d596ed1bc2 Let peerConn handle IPs in for tests 2018-05-18 16:27:57 +02:00
Alexander Simmerl
b698a9febc Remove double locking in HasIP 2018-05-16 19:21:12 +02:00
Alexander Simmerl
c5f45275ec Use remotePeer for test switch 2018-05-16 19:21:12 +02:00
Alexander Simmerl
77f09f5b5e Move to ne.IP 2018-05-16 19:21:12 +02:00
Ethan Buchman
1fe41be929 p2p: prevent connections from same ip 2018-05-16 19:21:12 +02:00
Eugene Chung
34f5d439ee remove Heap.Update() call when setting Proposer field
In for loop of IncrementAccum(), Heap.Update() call is unnecessary when i == times - 1.
2018-03-28 12:58:53 +09:00
91 changed files with 4250 additions and 588 deletions

View File

@@ -77,6 +77,22 @@ jobs:
paths:
- "bin/abci*"
build_slate:
<<: *defaults
steps:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v1-pkg-cache
- restore_cache:
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
- run:
name: slate docs
command: |
set -ex
export PATH="$GOBIN:$PATH"
make build-slate
lint:
<<: *defaults
steps:
@@ -180,6 +196,9 @@ workflows:
test-suite:
jobs:
- setup_dependencies
- build_slate:
requires:
- setup_dependencies
- setup_abci:
requires:
- setup_dependencies

4
.gitignore vendored
View File

@@ -5,7 +5,6 @@
.DS_Store
build/*
rpc/test/.tendermint
.debora
.tendermint
remote_dump
.revision
@@ -13,7 +12,6 @@ vendor
.vagrant
test/p2p/data/
test/logs
.glide
coverage.txt
docs/_build
docs/tools
@@ -25,3 +23,5 @@ scripts/cutWALUntil/cutWALUntil
.idea/
*.iml
libs/pubsub/query/fuzz_test/output

View File

@@ -1,5 +1,24 @@
# Changelog
## 0.20.0
BREAKING:
- [libs/pubsub] TagMap#Get returns a string value
- [libs/pubsub] NewTagMap accepts a map of strings
## 0.19.6
FEATURES
- [rpc] the RPC documentation is now published to https://tendermint.github.io/slate
IMPROVEMENTS:
- [consensus] consensus reactor now receives events from a separate event bus,
which is not dependant on external RPC load
- [consensus/wal] do not look for height in older files if we've seen height - 1
## 0.19.5
*May 20th, 2018*

View File

@@ -17,7 +17,7 @@
# Quick reference
* **Where to get help:**
https://tendermint.com/community
https://cosmos.network/community
* **Where to file issues:**
https://github.com/tendermint/tendermint/issues
@@ -37,25 +37,29 @@ To get started developing applications, see the [application developers guide](h
## Start one instance of the Tendermint core with the `kvstore` app
A very simple example of a built-in app and Tendermint core in one container.
A quick example of a built-in app and Tendermint core in one container.
```
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app=kvstore
```
## mintnet-kubernetes
# Local cluster
If you want to see many containers talking to each other, consider using [mintnet-kubernetes](https://github.com/tendermint/tools/tree/master/mintnet-kubernetes), which is a tool for running Tendermint-based applications on a Kubernetes cluster.
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/master/Makefile) and run:
```
make build-linux
make build-docker-localnode
make localnet-start
```
Note that this will build and use a different image than the ones provided here.
# License
View [license information](https://raw.githubusercontent.com/tendermint/tendermint/master/LICENSE) for the software contained in this image.
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/master/LICENSE).
# User Feedback
# Contributing
## Contributing
You are invited to contribute new features, fixes, or updates, large or small; we are always thrilled to receive pull requests, and do our best to process them as fast as we can.
Before you start to code, we recommend discussing your plans through a [GitHub](https://github.com/tendermint/tendermint/issues) issue, especially for more ambitious contributions. This gives other contributors a chance to point you in the right direction, give you feedback on your design, and help you find out if someone else is working on the same thing.
Contributions are most welcome! See the [contributing file](https://github.com/tendermint/tendermint/blob/master/CONTRIBUTING.md) for more information.

8
Gopkg.lock generated
View File

@@ -238,8 +238,8 @@
"server",
"types"
]
revision = "78a8905690ef54f9d57e3b2b0ee7ad3a04ef3f1f"
version = "v0.10.3"
revision = "f9dce537281ffba5d1e047e6729429f7e5fb90c9"
version = "v0.11.0-rc0"
[[projects]]
branch = "master"
@@ -281,8 +281,6 @@
"flowrate",
"log",
"merkle",
"pubsub",
"pubsub/query",
"test"
]
revision = "cc5f287c4798ffe88c04d02df219ecb6932080fd"
@@ -384,6 +382,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "52a0dcbebdf8714612444914cfce59a3af8c47c4453a2d43c4ccc5ff1a91d8ea"
inputs-digest = "90dc14750c1499107a3e6728ae696f9977f56bee2855c2f1c0a14831a165cc0e"
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -71,7 +71,7 @@
[[constraint]]
name = "github.com/tendermint/abci"
version = "~0.10.3"
version = "0.11.0-rc0"
[[constraint]]
name = "github.com/tendermint/go-crypto"

View File

@@ -226,8 +226,11 @@ sentry-stop:
@if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi
cd networks/remote/terraform && terraform destroy -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_KEY_FILE="$(HOME)/.ssh/id_rsa.pub"
# meant for the CI, inspect script & adapt accordingly
build-slate:
bash scripts/slate.sh
# To avoid unintended conflicts with file names, always add to .PHONY
# unless there is a reason not to.
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
.PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop
.PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate

View File

@@ -1,6 +1,7 @@
package blockchain
import (
"net"
"testing"
cmn "github.com/tendermint/tmlibs/common"
@@ -204,3 +205,4 @@ func (tp *bcrTestPeer) IsOutbound() bool { return false }
func (tp *bcrTestPeer) IsPersistent() bool { return true }
func (tp *bcrTestPeer) Get(s string) interface{} { return s }
func (tp *bcrTestPeer) Set(string, interface{}) {}
func (tp *bcrTestPeer) RemoteIP() net.IP { return []byte{127, 0, 0, 1} }

View File

@@ -1,18 +1 @@
# The core consensus algorithm.
* state.go - The state machine as detailed in the whitepaper
* reactor.go - A reactor that connects the state machine to the gossip network
# Go-routine summary
The reactor runs 2 go-routines for each added peer: gossipDataRoutine and gossipVotesRoutine.
The consensus state runs two persistent go-routines: timeoutRoutine and receiveRoutine.
Go-routines are also started to trigger timeouts and to avoid blocking when the internalMsgQueue is really backed up.
# Replay/WAL
A write-ahead log is used to record all messages processed by the receiveRoutine,
which amounts to all inputs to the consensus state machine:
messages from peers, messages from ourselves, and timeouts.
They can be played back deterministically at startup or using the replay console.
See the [consensus spec](https://github.com/tendermint/tendermint/tree/master/docs/spec/consensus) and the [reactor consensus spec](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/consensus) for more information.

View File

@@ -27,7 +27,7 @@ func init() {
// Heal partition and ensure A sees the commit
func TestByzantine(t *testing.T) {
N := 4
logger := consensusLogger()
logger := consensusLogger().With("test", "byzantine")
css := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), newCounter)
// give the byzantine validator a normal ticker

View File

@@ -264,7 +264,7 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.S
// mock the evidence pool
evpool := types.MockEvidencePool{}
// Make ConsensusReactor
// Make ConsensusState
stateDB := dbm.NewMemDB()
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
cs := NewConsensusState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)

View File

@@ -1,7 +1,6 @@
package consensus
import (
"context"
"fmt"
"reflect"
"sync"
@@ -14,6 +13,7 @@ import (
"github.com/tendermint/tmlibs/log"
cstypes "github.com/tendermint/tendermint/consensus/types"
tmevents "github.com/tendermint/tendermint/libs/events"
"github.com/tendermint/tendermint/p2p"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
@@ -43,7 +43,8 @@ type ConsensusReactor struct {
eventBus *types.EventBus
}
// NewConsensusReactor returns a new ConsensusReactor with the given consensusState.
// NewConsensusReactor returns a new ConsensusReactor with the given
// consensusState.
func NewConsensusReactor(consensusState *ConsensusState, fastSync bool) *ConsensusReactor {
conR := &ConsensusReactor{
conS: consensusState,
@@ -53,17 +54,15 @@ func NewConsensusReactor(consensusState *ConsensusState, fastSync bool) *Consens
return conR
}
// OnStart implements BaseService.
// OnStart implements BaseService by subscribing to events, which later will be
// broadcasted to other peers and starting state if we're not in fast sync.
func (conR *ConsensusReactor) OnStart() error {
conR.Logger.Info("ConsensusReactor ", "fastSync", conR.FastSync())
if err := conR.BaseReactor.OnStart(); err != nil {
return err
}
err := conR.startBroadcastRoutine()
if err != nil {
return err
}
conR.subscribeToBroadcastEvents()
if !conR.FastSync() {
err := conR.conS.Start()
@@ -75,9 +74,11 @@ func (conR *ConsensusReactor) OnStart() error {
return nil
}
// OnStop implements BaseService
// OnStop implements BaseService by unsubscribing from events and stopping
// state.
func (conR *ConsensusReactor) OnStop() {
conR.BaseReactor.OnStop()
conR.unsubscribeFromBroadcastEvents()
conR.conS.Stop()
}
@@ -101,6 +102,7 @@ func (conR *ConsensusReactor) SwitchToConsensus(state sm.State, blocksSynced int
err := conR.conS.Start()
if err != nil {
conR.Logger.Error("Error starting conS", "err", err)
return
}
}
@@ -345,77 +347,40 @@ func (conR *ConsensusReactor) FastSync() bool {
//--------------------------------------
// startBroadcastRoutine subscribes for new round steps, votes and proposal
// heartbeats using the event bus and starts a go routine to broadcasts events
// to peers upon receiving them.
func (conR *ConsensusReactor) startBroadcastRoutine() error {
// subscribeToBroadcastEvents subscribes for new round steps, votes and
// proposal heartbeats using internal pubsub defined on state to broadcast
// them to peers upon receiving.
func (conR *ConsensusReactor) subscribeToBroadcastEvents() {
const subscriber = "consensus-reactor"
ctx := context.Background()
conR.conS.evsw.AddListenerForEvent(subscriber, types.EventNewRoundStep,
func(data tmevents.EventData) {
conR.broadcastNewRoundStepMessages(data.(*cstypes.RoundState))
})
// new round steps
stepsCh := make(chan interface{})
err := conR.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep, stepsCh)
if err != nil {
return errors.Wrapf(err, "failed to subscribe %s to %s", subscriber, types.EventQueryNewRoundStep)
}
conR.conS.evsw.AddListenerForEvent(subscriber, types.EventVote,
func(data tmevents.EventData) {
conR.broadcastHasVoteMessage(data.(*types.Vote))
})
// votes
votesCh := make(chan interface{})
err = conR.eventBus.Subscribe(ctx, subscriber, types.EventQueryVote, votesCh)
if err != nil {
return errors.Wrapf(err, "failed to subscribe %s to %s", subscriber, types.EventQueryVote)
}
// proposal heartbeats
heartbeatsCh := make(chan interface{})
err = conR.eventBus.Subscribe(ctx, subscriber, types.EventQueryProposalHeartbeat, heartbeatsCh)
if err != nil {
return errors.Wrapf(err, "failed to subscribe %s to %s", subscriber, types.EventQueryProposalHeartbeat)
}
go func() {
var data interface{}
var ok bool
for {
select {
case data, ok = <-stepsCh:
if ok { // a receive from a closed channel returns the zero value immediately
edrs := data.(types.EventDataRoundState)
conR.broadcastNewRoundStep(edrs.RoundState.(*cstypes.RoundState))
}
case data, ok = <-votesCh:
if ok {
edv := data.(types.EventDataVote)
conR.broadcastHasVoteMessage(edv.Vote)
}
case data, ok = <-heartbeatsCh:
if ok {
edph := data.(types.EventDataProposalHeartbeat)
conR.broadcastProposalHeartbeatMessage(edph)
}
case <-conR.Quit():
conR.eventBus.UnsubscribeAll(ctx, subscriber)
return
}
if !ok {
conR.eventBus.UnsubscribeAll(ctx, subscriber)
return
}
}
}()
return nil
conR.conS.evsw.AddListenerForEvent(subscriber, types.EventProposalHeartbeat,
func(data tmevents.EventData) {
conR.broadcastProposalHeartbeatMessage(data.(*types.Heartbeat))
})
}
func (conR *ConsensusReactor) broadcastProposalHeartbeatMessage(heartbeat types.EventDataProposalHeartbeat) {
hb := heartbeat.Heartbeat
func (conR *ConsensusReactor) unsubscribeFromBroadcastEvents() {
const subscriber = "consensus-reactor"
conR.conS.evsw.RemoveListener(subscriber)
}
func (conR *ConsensusReactor) broadcastProposalHeartbeatMessage(hb *types.Heartbeat) {
conR.Logger.Debug("Broadcasting proposal heartbeat message",
"height", hb.Height, "round", hb.Round, "sequence", hb.Sequence)
msg := &ProposalHeartbeatMessage{hb}
conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(msg))
}
func (conR *ConsensusReactor) broadcastNewRoundStep(rs *cstypes.RoundState) {
func (conR *ConsensusReactor) broadcastNewRoundStepMessages(rs *cstypes.RoundState) {
nrsMsg, csMsg := makeRoundStepMessages(rs)
if nrsMsg != nil {
conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg))

View File

@@ -269,8 +269,8 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
if appBlockHeight == 0 {
validators := types.TM2PB.Validators(state.Validators)
req := abci.RequestInitChain{
Validators: validators,
AppStateBytes: h.appState,
Validators: validators,
GenesisBytes: h.appState,
}
_, err := proxyApp.Consensus().InitChainSync(req)
if err != nil {
@@ -365,7 +365,8 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl
for i := appBlockHeight + 1; i <= finalBlock; i++ {
h.logger.Info("Applying block", "height", i)
block := h.store.LoadBlock(i)
appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger)
appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, new(types.ValidatorSet))
// TODO: Temporary, see above comment.
if err != nil {
return nil, err
}

View File

@@ -15,6 +15,7 @@ import (
cfg "github.com/tendermint/tendermint/config"
cstypes "github.com/tendermint/tendermint/consensus/types"
tmevents "github.com/tendermint/tendermint/libs/events"
"github.com/tendermint/tendermint/p2p"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
@@ -110,6 +111,10 @@ type ConsensusState struct {
// closed when we finish shutting down
done chan struct{}
// synchronous pubsub between consensus state and reactor.
// state only emits EventNewRoundStep, EventVote and EventProposalHeartbeat
evsw tmevents.EventSwitch
}
// NewConsensusState returns a new ConsensusState.
@@ -126,6 +131,7 @@ func NewConsensusState(config *cfg.ConsensusConfig, state sm.State, blockExec *s
doWALCatchup: true,
wal: nilWAL{},
evpool: evpool,
evsw: tmevents.NewEventSwitch(),
}
// set function defaults (may be overwritten before calling Start)
cs.decideProposal = cs.defaultDecideProposal
@@ -227,6 +233,10 @@ func (cs *ConsensusState) LoadCommit(height int64) *types.Commit {
// OnStart implements cmn.Service.
// It loads the latest state via the WAL, and starts the timeout and receive routines.
func (cs *ConsensusState) OnStart() error {
if err := cs.evsw.Start(); err != nil {
return err
}
// we may set the WAL in testing before calling Start,
// so only OpenWAL if its still the nilWAL
if _, ok := cs.wal.(nilWAL); ok {
@@ -244,8 +254,7 @@ func (cs *ConsensusState) OnStart() error {
// NOTE: we will get a build up of garbage go routines
// firing on the tockChan until the receiveRoutine is started
// to deal with them (by that point, at most one will be valid)
err := cs.timeoutTicker.Start()
if err != nil {
if err := cs.timeoutTicker.Start(); err != nil {
return err
}
@@ -284,6 +293,8 @@ func (cs *ConsensusState) startRoutines(maxSteps int) {
func (cs *ConsensusState) OnStop() {
cs.BaseService.OnStop()
cs.evsw.Stop()
cs.timeoutTicker.Stop()
// Make BaseService.Wait() wait until cs.wal.Wait()
@@ -509,6 +520,7 @@ func (cs *ConsensusState) newStep() {
// newStep is called by updateToStep in NewConsensusState before the eventBus is set!
if cs.eventBus != nil {
cs.eventBus.PublishEventNewRoundStep(rs)
cs.evsw.FireEvent(types.EventNewRoundStep, &cs.RoundState)
}
}
@@ -752,6 +764,7 @@ func (cs *ConsensusState) proposalHeartbeat(height int64, round int) {
}
cs.privValidator.SignHeartbeat(chainID, heartbeat)
cs.eventBus.PublishEventProposalHeartbeat(types.EventDataProposalHeartbeat{heartbeat})
cs.evsw.FireEvent(types.EventProposalHeartbeat, heartbeat)
counter++
time.Sleep(proposalHeartbeatIntervalSeconds * time.Second)
}
@@ -1418,6 +1431,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
cs.Logger.Info(cmn.Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
cs.evsw.FireEvent(types.EventVote, vote)
// if we can skip timeoutCommit and have all the votes now,
if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() {
@@ -1445,6 +1459,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
}
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
cs.evsw.FireEvent(types.EventVote, vote)
switch vote.Type {
case types.VoteTypePrevote:

View File

@@ -11,7 +11,7 @@ import (
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
tmpubsub "github.com/tendermint/tmlibs/pubsub"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
)
func init() {

View File

@@ -111,7 +111,7 @@ func (wal *baseWAL) OnStop() {
}
// Write is called in newStep and for each receive on the
// peerMsgQueue and the timoutTicker.
// peerMsgQueue and the timeoutTicker.
// NOTE: does not call fsync()
func (wal *baseWAL) Write(msg WALMessage) {
if wal == nil {
@@ -144,13 +144,14 @@ type WALSearchOptions struct {
IgnoreDataCorruptionErrors bool
}
// SearchForEndHeight searches for the EndHeightMessage with the height and
// returns an auto.GroupReader, whenever it was found or not and an error.
// SearchForEndHeight searches for the EndHeightMessage with the given height
// and returns an auto.GroupReader, whenever it was found or not and an error.
// Group reader will be nil if found equals false.
//
// CONTRACT: caller must close group reader.
func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) {
var msg *TimedWALMessage
lastHeightFound := int64(-1)
// NOTE: starting from the last file in the group because we're usually
// searching for the last height. See replay.go
@@ -166,17 +167,25 @@ func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions)
for {
msg, err = dec.Decode()
if err == io.EOF {
// OPTIMISATION: no need to look for height in older files if we've seen h < height
if lastHeightFound > 0 && lastHeightFound < height {
gr.Close()
return nil, false, nil
}
// check next file
break
}
if options.IgnoreDataCorruptionErrors && IsDataCorruptionError(err) {
wal.Logger.Debug("Corrupted entry. Skipping...", "err", err)
// do nothing
continue
} else if err != nil {
gr.Close()
return nil, false, err
}
if m, ok := msg.Msg.(EndHeightMessage); ok {
lastHeightFound = m.Height
if m.Height == height { // found
wal.Logger.Debug("Found", "height", height, "index", index)
return gr, true, nil
@@ -271,23 +280,17 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) {
b = make([]byte, 4)
_, err = dec.rd.Read(b)
if err == io.EOF {
return nil, err
}
if err != nil {
return nil, fmt.Errorf("failed to read length: %v", err)
}
length := binary.BigEndian.Uint32(b)
if length > maxMsgSizeBytes {
return nil, DataCorruptionError{fmt.Errorf("length %d exceeded maximum possible value of %d bytes", length, maxMsgSizeBytes)}
return nil, fmt.Errorf("length %d exceeded maximum possible value of %d bytes", length, maxMsgSizeBytes)
}
data := make([]byte, length)
_, err = dec.rd.Read(data)
if err == io.EOF {
return nil, err
}
if err != nil {
return nil, fmt.Errorf("failed to read data: %v", err)
}

View File

@@ -183,6 +183,7 @@ Try running these commands:
> commit
-> code: OK
-> data.hex: 0x0000000000000000
> deliver_tx "abc"
-> code: OK
@@ -194,7 +195,7 @@ Try running these commands:
> commit
-> code: OK
-> data.hex: 0x49DFD15CCDACDEAE9728CB01FBB5E8688CA58B91
-> data.hex: 0x0200000000000000
> query "abc"
-> code: OK
@@ -208,7 +209,7 @@ Try running these commands:
> commit
-> code: OK
-> data.hex: 0x70102DB32280373FBF3F9F89DA2A20CE2CD62B0B
-> data.hex: 0x0400000000000000
> query "def"
-> code: OK
@@ -301,6 +302,7 @@ In another window, start the ``abci-cli console``:
> set_option serial on
-> code: OK
-> log: OK (SetOption doesn't return anything.)
> check_tx 0x00
-> code: OK

View File

@@ -14,9 +14,9 @@ please submit them to our [bug bounty](https://tendermint.com/security)!
### Data Structures
- [Encoding and Digests](./blockchain/encoding.md)
- [Blockchain](./blockchain/blockchain.md)
- [State](./blockchain/state.md)
- [Encoding and Digests](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/encoding.md)
- [Blockchain](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/blockchain.md)
- [State](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/state.md)
### Consensus Protocol
@@ -24,11 +24,11 @@ please submit them to our [bug bounty](https://tendermint.com/security)!
### P2P and Network Protocols
- [The Base P2P Layer](p2p): multiplex the protocols ("reactors") on authenticated and encrypted TCP connections
- [Peer Exchange (PEX)](reactors/pex): gossip known peer addresses so peers can find each other
- [Block Sync](reactors/block_sync): gossip blocks so peers can catch up quickly
- [Consensus](reactors/consensus): gossip votes and block parts so new blocks can be committed
- [Mempool](reactors/mempool): gossip transactions so they get included in blocks
- [The Base P2P Layer](https://github.com/tendermint/tendermint/tree/master/docs/spec/p2p): multiplex the protocols ("reactors") on authenticated and encrypted TCP connections
- [Peer Exchange (PEX)](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/pex): gossip known peer addresses so peers can find each other
- [Block Sync](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/block_sync): gossip blocks so peers can catch up quickly
- [Consensus](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/consensus): gossip votes and block parts so new blocks can be committed
- [Mempool](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/mempool): gossip transactions so they get included in blocks
- Evidence: TODO
### More

View File

@@ -162,7 +162,7 @@ We refer to certain globally available objects:
and `state` keeps track of the validator set, the consensus parameters
and other results from the application.
Elements of an object are accessed as expected,
ie. `block.Header`. See [here](state.md) for the definition of `state`.
ie. `block.Header`. See [here](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/state.md) for the definition of `state`.
### Header

View File

@@ -2,7 +2,7 @@
## Amino
Tendermint uses the Protobuf3 derrivative [Amino]() for all data structures.
Tendermint uses the Protobuf3 derivative [Amino](https://github.com/tendermint/go-amino) for all data structures.
Think of Amino as an object-oriented Protobuf3 with native JSON support.
The goal of the Amino encoding protocol is to bring parity between application
logic objects and persistence objects.
@@ -51,8 +51,8 @@ Notice that when encoding byte-arrays, the length of the byte-array is appended
to the PrefixBytes. Thus the encoding of a byte array becomes `<PrefixBytes>
<Length> <ByteArray>`
(NOTE: the remainder of this section on Public Key Cryptography can be generated
from [this script](./scripts/crypto.go))
NOTE: the remainder of this section on Public Key Cryptography can be generated
from [this script](https://github.com/tendermint/tendermint/blob/master/docs/spec/scripts/crypto.go)
### PubKeyEd25519
@@ -290,6 +290,7 @@ Amino also supports JSON encoding - registered types are simply encoded as:
"type": "<DisfixBytes>",
"value": <JSON>
}
```
For instance, an ED25519 PubKey would look like:

View File

@@ -77,5 +77,4 @@ func TotalVotingPower(vals []Validators) int64{
### ConsensusParams
TODO:
TODO

View File

@@ -58,7 +58,7 @@ message Validator {
```
The `pub_key` is the Amino encoded public key for the validator. For details on
Amino encoded public keys, see the [section of the encoding spec](./encoding.md#public-key-cryptography).
Amino encoded public keys, see the [section of the encoding spec](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/encoding.md#public-key-cryptography).
For Ed25519 pubkeys, the Amino prefix is always "1624DE6220". For example, the 32-byte Ed25519 pubkey
`76852933A4686A721442E931A8415F62F5F1AEDF4910F1F252FB393F74C40C85` would be
@@ -121,7 +121,6 @@ stateBlockHeight = height of the last block for which Tendermint completed all
block processing and saved all ABCI results to disk
appBlockHeight = height of the last block for which ABCI app succesfully
completely Commit
```
Note we always have `storeBlockHeight >= stateBlockHeight` and `storeBlockHeight >= appBlockHeight`
@@ -165,4 +164,3 @@ If `storeBlockHeight == stateBlockHeight+1`
If appBlockHeight == storeBlockHeight {
update the state using the saved ABCI responses but dont run the block against the real app.
This happens if we crashed after the app finished Commit but before Tendermint saved the state.

View File

@@ -12,7 +12,7 @@ Seeds should operate full nodes with the PEX reactor in a "crawler" mode
that continuously explores to validate the availability of peers.
Seeds should only respond with some top percentile of the best peers it knows about.
See [the peer-exchange docs](/docs/specification/new-spec/reactors/pex/pex.md)for details on peer quality.
See [the peer-exchange docs](https://github.com/tendermint/tendermint/blob/master/docs/spec/reactors/pex/pex.md)for details on peer quality.
## New Full Node

View File

@@ -2,7 +2,7 @@
This document explains how Tendermint Peers are identified and how they connect to one another.
For details on peer discovery, see the [peer exchange (PEX) reactor doc](/docs/specification/new-spec/reactors/pex/pex.md).
For details on peer discovery, see the [peer exchange (PEX) reactor doc](https://github.com/tendermint/tendermint/blob/master/docs/spec/reactors/pex/pex.md).
## Peer Identity

View File

@@ -16,7 +16,7 @@ explained in a forthcoming document.
For efficiency reasons, validators in Tendermint consensus protocol do not agree directly on the
block as the block size is big, i.e., they don't embed the block inside `Proposal` and
`VoteMessage`. Instead, they reach agreement on the `BlockID` (see `BlockID` definition in
[Blockchain](blockchain.md) section) that uniquely identifies each block. The block itself is
[Blockchain](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/blockchain.md#blockid) section) that uniquely identifies each block. The block itself is
disseminated to validator processes using peer-to-peer gossiping protocol. It starts by having a
proposer first splitting a block into a number of block parts, that are then gossiped between
processes using `BlockPartMessage`.
@@ -69,7 +69,7 @@ BlockID contains PartSetHeader.
## VoteMessage
VoteMessage is sent to vote for some block (or to inform others that a process does not vote in the
current round). Vote is defined in [Blockchain](blockchain.md) section and contains validator's
current round). Vote is defined in the [Blockchain](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/blockchain.md#blockid) section and contains validator's
information (validator address and index), height and round for which the vote is sent, vote type,
blockID if process vote for some block (`nil` otherwise) and a timestamp when the vote is sent. The
message is signed by the validator private key.

View File

@@ -44,4 +44,3 @@ p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p1, p0, p0, p0, p0, p0, etc
This basically means that almost all rounds have the same proposer. But in this case, the process p0 has anyway enough
voting power to decide whatever he wants, so the fact that he coordinates almost all rounds seems correct.

View File

@@ -1,11 +0,0 @@
# Mempool Specification
This package contains documents specifying the functionality
of the mempool module.
Components:
* [Config](./config.md) - how to configure it
* [External Messages](./messages.md) - The messages we accept over p2p and rpc interfaces
* [Functionality](./functionality.md) - high-level description of the functionality it provides
* [Concurrency Model](./concurrency.md) - What guarantees we provide, what locks we require.

View File

@@ -32,6 +32,7 @@ wait before returning (sync makes sure CheckTx passes, commit
makes sure it was included in a signed block).
Request (`POST http://gaia.zone:46657/`):
```json
{
"id": "",
@@ -43,8 +44,8 @@ Request (`POST http://gaia.zone:46657/`):
}
```
Response:
```json
{
"error": "",

View File

@@ -117,7 +117,7 @@ current, past, and rate-of-change data to inform peer quality.
While a PID trust metric has been implemented, it remains for future work
to use it in the PEX.
See the [trustmetric](../../../architecture/adr-006-trust-metric.md )
and [trustmetric useage](../../../architecture/adr-007-trust-metric-usage.md )
See the [trustmetric](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-006-trust-metric.md)
and [trustmetric useage](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-007-trust-metric-usage.md)
architecture docs for more details.

View File

@@ -1 +1 @@
Spec moved to [docs/spec](/docs/spec).
Spec moved to [docs/spec](https://github.com/tendermint/tendermint/tree/master/docs/spec).

View File

@@ -1,190 +1,4 @@
RPC
===
Coming soon: RPC docs powered by `slate <https://github.com/lord/slate>`__. Until then, read on.
Tendermint supports the following RPC protocols:
- URI over HTTP
- JSONRPC over HTTP
- JSONRPC over websockets
Tendermint RPC is build using `our own RPC
library <https://github.com/tendermint/tendermint/tree/master/rpc/lib>`__.
Documentation and tests for that library could be found at
``tendermint/rpc/lib`` directory.
Configuration
~~~~~~~~~~~~~
Set the ``laddr`` config parameter under ``[rpc]`` table in the
$TMHOME/config/config.toml file or the ``--rpc.laddr`` command-line flag to the
desired protocol://host:port setting. Default: ``tcp://0.0.0.0:46657``.
Arguments
~~~~~~~~~
Arguments which expect strings or byte arrays may be passed as quoted
strings, like ``"abc"`` or as ``0x``-prefixed strings, like
``0x616263``.
URI/HTTP
~~~~~~~~
Example request:
.. code:: bash
curl -s 'http://localhost:46657/broadcast_tx_sync?tx="abc"' | jq .
Response:
.. code:: json
{
"error": "",
"result": {
"hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF",
"log": "",
"data": "",
"code": 0
},
"id": "",
"jsonrpc": "2.0"
}
The first entry in the result-array (``96``) is the method this response
correlates with. ``96`` refers to "ResultTypeBroadcastTx", see
`responses.go <https://github.com/tendermint/tendermint/blob/master/rpc/core/types/responses.go>`__
for a complete overview.
JSONRPC/HTTP
~~~~~~~~~~~~
JSONRPC requests can be POST'd to the root RPC endpoint via HTTP (e.g.
``http://localhost:46657/``).
Example request:
.. code:: json
{
"method": "broadcast_tx_sync",
"jsonrpc": "2.0",
"params": [ "abc" ],
"id": "dontcare"
}
JSONRPC/websockets
~~~~~~~~~~~~~~~~~~
JSONRPC requests can be made via websocket. The websocket endpoint is at
``/websocket``, e.g. ``http://localhost:46657/websocket``. Asynchronous
RPC functions like event ``subscribe`` and ``unsubscribe`` are only
available via websockets.
Endpoints
~~~~~~~~~
An HTTP Get request to the root RPC endpoint (e.g.
``http://localhost:46657``) shows a list of available endpoints.
::
Available endpoints:
http://localhost:46657/abci_info
http://localhost:46657/dump_consensus_state
http://localhost:46657/genesis
http://localhost:46657/net_info
http://localhost:46657/num_unconfirmed_txs
http://localhost:46657/health
http://localhost:46657/status
http://localhost:46657/unconfirmed_txs
http://localhost:46657/unsafe_flush_mempool
http://localhost:46657/unsafe_stop_cpu_profiler
http://localhost:46657/validators
Endpoints that require arguments:
http://localhost:46657/abci_query?path=_&data=_&prove=_
http://localhost:46657/block?height=_
http://localhost:46657/blockchain?minHeight=_&maxHeight=_
http://localhost:46657/broadcast_tx_async?tx=_
http://localhost:46657/broadcast_tx_commit?tx=_
http://localhost:46657/broadcast_tx_sync?tx=_
http://localhost:46657/commit?height=_
http://localhost:46657/dial_seeds?seeds=_
http://localhost:46657/dial_peers?peers=_&persistent=_
http://localhost:46657/subscribe?event=_
http://localhost:46657/tx?hash=_&prove=_
http://localhost:46657/unsafe_start_cpu_profiler?filename=_
http://localhost:46657/unsafe_write_heap_profile?filename=_
http://localhost:46657/unsubscribe?event=_
tx
~~
Returns a transaction matching the given transaction hash.
**Parameters**
1. hash - the transaction hash
2. prove - include a proof of the transaction inclusion in the block in
the result (optional, default: false)
**Returns**
- ``proof``: the ``types.TxProof`` object
- ``tx``: ``[]byte`` - the transaction
- ``tx_result``: the ``abci.Result`` object
- ``index``: ``int`` - index of the transaction
- ``height``: ``int`` - height of the block where this transaction was
in
**Example**
.. code:: bash
curl -s 'http://localhost:46657/broadcast_tx_commit?tx="abc"' | jq .
# {
# "error": "",
# "result": {
# "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF",
# "log": "",
# "data": "",
# "code": 0
# },
# "id": "",
# "jsonrpc": "2.0"
# }
curl -s 'http://localhost:46657/tx?hash=0x2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF' | jq .
# {
# "error": "",
# "result": {
# "proof": {
# "Proof": {
# "aunts": []
# },
# "Data": "YWJjZA==",
# "RootHash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF",
# "Total": 1,
# "Index": 0
# },
# "tx": "YWJjZA==",
# "tx_result": {
# "log": "",
# "data": "",
# "code": 0
# },
# "index": 0,
# "height": 52
# },
# "id": "",
# "jsonrpc": "2.0"
# }
More Examples
~~~~~~~~~~~~~
See the various bash tests using curl in ``test/``, and examples using
the ``Go`` API in ``rpc/client/``.
The RPC documentation is hosted `here <https://tendermint.github.io/slate>`__ and is generated by the CI from our `Slate repo <https://github.com/tendermint/slate>`__. To update the documentation, edit the relevant ``godoc`` comments in the `rpc/core directory <https://github.com/tendermint/tendermint/tree/develop/rpc/core>`__.

9
libs/events/Makefile Normal file
View File

@@ -0,0 +1,9 @@
.PHONY: docs
REPO:=github.com/tendermint/tendermint/libs/events
docs:
@go get github.com/davecheney/godoc2md
godoc2md $(REPO) > README.md
test:
go test -v ./...

175
libs/events/README.md Normal file
View File

@@ -0,0 +1,175 @@
# events
`import "github.com/tendermint/tendermint/libs/events"`
* [Overview](#pkg-overview)
* [Index](#pkg-index)
## <a name="pkg-overview">Overview</a>
Pub-Sub in go with event caching
## <a name="pkg-index">Index</a>
* [type EventCache](#EventCache)
* [func NewEventCache(evsw Fireable) *EventCache](#NewEventCache)
* [func (evc *EventCache) FireEvent(event string, data EventData)](#EventCache.FireEvent)
* [func (evc *EventCache) Flush()](#EventCache.Flush)
* [type EventCallback](#EventCallback)
* [type EventData](#EventData)
* [type EventSwitch](#EventSwitch)
* [func NewEventSwitch() EventSwitch](#NewEventSwitch)
* [type Eventable](#Eventable)
* [type Fireable](#Fireable)
#### <a name="pkg-files">Package files</a>
[event_cache.go](/src/github.com/tendermint/tendermint/libs/events/event_cache.go) [events.go](/src/github.com/tendermint/tendermint/libs/events/events.go)
## <a name="EventCache">type</a> [EventCache](/src/target/event_cache.go?s=116:179#L5)
``` go
type EventCache struct {
// contains filtered or unexported fields
}
```
An EventCache buffers events for a Fireable
All events are cached. Filtering happens on Flush
### <a name="NewEventCache">func</a> [NewEventCache](/src/target/event_cache.go?s=239:284#L11)
``` go
func NewEventCache(evsw Fireable) *EventCache
```
Create a new EventCache with an EventSwitch as backend
### <a name="EventCache.FireEvent">func</a> (\*EventCache) [FireEvent](/src/target/event_cache.go?s=449:511#L24)
``` go
func (evc *EventCache) FireEvent(event string, data EventData)
```
Cache an event to be fired upon finality.
### <a name="EventCache.Flush">func</a> (\*EventCache) [Flush](/src/target/event_cache.go?s=735:765#L31)
``` go
func (evc *EventCache) Flush()
```
Fire events by running evsw.FireEvent on all cached events. Blocks.
Clears cached events
## <a name="EventCallback">type</a> [EventCallback](/src/target/events.go?s=4201:4240#L185)
``` go
type EventCallback func(data EventData)
```
## <a name="EventData">type</a> [EventData](/src/target/events.go?s=243:294#L14)
``` go
type EventData interface {
}
```
Generic event data can be typed and registered with tendermint/go-amino
via concrete implementation of this interface
## <a name="EventSwitch">type</a> [EventSwitch](/src/target/events.go?s=560:771#L29)
``` go
type EventSwitch interface {
cmn.Service
Fireable
AddListenerForEvent(listenerID, event string, cb EventCallback)
RemoveListenerForEvent(event string, listenerID string)
RemoveListener(listenerID string)
}
```
### <a name="NewEventSwitch">func</a> [NewEventSwitch](/src/target/events.go?s=917:950#L46)
``` go
func NewEventSwitch() EventSwitch
```
## <a name="Eventable">type</a> [Eventable](/src/target/events.go?s=378:440#L20)
``` go
type Eventable interface {
SetEventSwitch(evsw EventSwitch)
}
```
reactors and other modules should export
this interface to become eventable
## <a name="Fireable">type</a> [Fireable](/src/target/events.go?s=490:558#L25)
``` go
type Fireable interface {
FireEvent(event string, data EventData)
}
```
an event switch or cache implements fireable
- - -
Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)

View File

@@ -0,0 +1,37 @@
package events
// An EventCache buffers events for a Fireable
// All events are cached. Filtering happens on Flush
type EventCache struct {
evsw Fireable
events []eventInfo
}
// Create a new EventCache with an EventSwitch as backend
func NewEventCache(evsw Fireable) *EventCache {
return &EventCache{
evsw: evsw,
}
}
// a cached event
type eventInfo struct {
event string
data EventData
}
// Cache an event to be fired upon finality.
func (evc *EventCache) FireEvent(event string, data EventData) {
// append to list (go will grow our backing array exponentially)
evc.events = append(evc.events, eventInfo{event, data})
}
// Fire events by running evsw.FireEvent on all cached events. Blocks.
// Clears cached events
func (evc *EventCache) Flush() {
for _, ei := range evc.events {
evc.evsw.FireEvent(ei.event, ei.data)
}
// Clear the buffer, since we only add to it with append it's safe to just set it to nil and maybe safe an allocation
evc.events = nil
}

View File

@@ -0,0 +1,35 @@
package events
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestEventCache_Flush(t *testing.T) {
evsw := NewEventSwitch()
evsw.Start()
evsw.AddListenerForEvent("nothingness", "", func(data EventData) {
// Check we are not initialising an empty buffer full of zeroed eventInfos in the EventCache
require.FailNow(t, "We should never receive a message on this switch since none are fired")
})
evc := NewEventCache(evsw)
evc.Flush()
// Check after reset
evc.Flush()
fail := true
pass := false
evsw.AddListenerForEvent("somethingness", "something", func(data EventData) {
if fail {
require.FailNow(t, "Shouldn't see a message until flushed")
}
pass = true
})
evc.FireEvent("something", struct{ int }{1})
evc.FireEvent("something", struct{ int }{2})
evc.FireEvent("something", struct{ int }{3})
fail = false
evc.Flush()
assert.True(t, pass)
}

220
libs/events/events.go Normal file
View File

@@ -0,0 +1,220 @@
/*
Pub-Sub in go with event caching
*/
package events
import (
"sync"
cmn "github.com/tendermint/tmlibs/common"
)
// Generic event data can be typed and registered with tendermint/go-amino
// via concrete implementation of this interface
type EventData interface {
//AssertIsEventData()
}
// reactors and other modules should export
// this interface to become eventable
type Eventable interface {
SetEventSwitch(evsw EventSwitch)
}
// an event switch or cache implements fireable
type Fireable interface {
FireEvent(event string, data EventData)
}
type EventSwitch interface {
cmn.Service
Fireable
AddListenerForEvent(listenerID, event string, cb EventCallback)
RemoveListenerForEvent(event string, listenerID string)
RemoveListener(listenerID string)
}
type eventSwitch struct {
cmn.BaseService
mtx sync.RWMutex
eventCells map[string]*eventCell
listeners map[string]*eventListener
}
func NewEventSwitch() EventSwitch {
evsw := &eventSwitch{
eventCells: make(map[string]*eventCell),
listeners: make(map[string]*eventListener),
}
evsw.BaseService = *cmn.NewBaseService(nil, "EventSwitch", evsw)
return evsw
}
func (evsw *eventSwitch) OnStart() error {
return nil
}
func (evsw *eventSwitch) OnStop() {}
func (evsw *eventSwitch) AddListenerForEvent(listenerID, event string, cb EventCallback) {
// Get/Create eventCell and listener
evsw.mtx.Lock()
eventCell := evsw.eventCells[event]
if eventCell == nil {
eventCell = newEventCell()
evsw.eventCells[event] = eventCell
}
listener := evsw.listeners[listenerID]
if listener == nil {
listener = newEventListener(listenerID)
evsw.listeners[listenerID] = listener
}
evsw.mtx.Unlock()
// Add event and listener
eventCell.AddListener(listenerID, cb)
listener.AddEvent(event)
}
func (evsw *eventSwitch) RemoveListener(listenerID string) {
// Get and remove listener
evsw.mtx.RLock()
listener := evsw.listeners[listenerID]
evsw.mtx.RUnlock()
if listener == nil {
return
}
evsw.mtx.Lock()
delete(evsw.listeners, listenerID)
evsw.mtx.Unlock()
// Remove callback for each event.
listener.SetRemoved()
for _, event := range listener.GetEvents() {
evsw.RemoveListenerForEvent(event, listenerID)
}
}
func (evsw *eventSwitch) RemoveListenerForEvent(event string, listenerID string) {
// Get eventCell
evsw.mtx.Lock()
eventCell := evsw.eventCells[event]
evsw.mtx.Unlock()
if eventCell == nil {
return
}
// Remove listenerID from eventCell
numListeners := eventCell.RemoveListener(listenerID)
// Maybe garbage collect eventCell.
if numListeners == 0 {
// Lock again and double check.
evsw.mtx.Lock() // OUTER LOCK
eventCell.mtx.Lock() // INNER LOCK
if len(eventCell.listeners) == 0 {
delete(evsw.eventCells, event)
}
eventCell.mtx.Unlock() // INNER LOCK
evsw.mtx.Unlock() // OUTER LOCK
}
}
func (evsw *eventSwitch) FireEvent(event string, data EventData) {
// Get the eventCell
evsw.mtx.RLock()
eventCell := evsw.eventCells[event]
evsw.mtx.RUnlock()
if eventCell == nil {
return
}
// Fire event for all listeners in eventCell
eventCell.FireEvent(data)
}
//-----------------------------------------------------------------------------
// eventCell handles keeping track of listener callbacks for a given event.
type eventCell struct {
mtx sync.RWMutex
listeners map[string]EventCallback
}
func newEventCell() *eventCell {
return &eventCell{
listeners: make(map[string]EventCallback),
}
}
func (cell *eventCell) AddListener(listenerID string, cb EventCallback) {
cell.mtx.Lock()
cell.listeners[listenerID] = cb
cell.mtx.Unlock()
}
func (cell *eventCell) RemoveListener(listenerID string) int {
cell.mtx.Lock()
delete(cell.listeners, listenerID)
numListeners := len(cell.listeners)
cell.mtx.Unlock()
return numListeners
}
func (cell *eventCell) FireEvent(data EventData) {
cell.mtx.RLock()
for _, listener := range cell.listeners {
listener(data)
}
cell.mtx.RUnlock()
}
//-----------------------------------------------------------------------------
type EventCallback func(data EventData)
type eventListener struct {
id string
mtx sync.RWMutex
removed bool
events []string
}
func newEventListener(id string) *eventListener {
return &eventListener{
id: id,
removed: false,
events: nil,
}
}
func (evl *eventListener) AddEvent(event string) {
evl.mtx.Lock()
defer evl.mtx.Unlock()
if evl.removed {
return
}
evl.events = append(evl.events, event)
}
func (evl *eventListener) GetEvents() []string {
evl.mtx.RLock()
defer evl.mtx.RUnlock()
events := make([]string, len(evl.events))
copy(events, evl.events)
return events
}
func (evl *eventListener) SetRemoved() {
evl.mtx.Lock()
defer evl.mtx.Unlock()
evl.removed = true
}

380
libs/events/events_test.go Normal file
View File

@@ -0,0 +1,380 @@
package events
import (
"fmt"
"math/rand"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
// TestAddListenerForEventFireOnce sets up an EventSwitch, subscribes a single
// listener to an event, and sends a string "data".
func TestAddListenerForEventFireOnce(t *testing.T) {
evsw := NewEventSwitch()
err := evsw.Start()
if err != nil {
t.Errorf("Failed to start EventSwitch, error: %v", err)
}
messages := make(chan EventData)
evsw.AddListenerForEvent("listener", "event",
func(data EventData) {
messages <- data
})
go evsw.FireEvent("event", "data")
received := <-messages
if received != "data" {
t.Errorf("Message received does not match: %v", received)
}
}
// TestAddListenerForEventFireMany sets up an EventSwitch, subscribes a single
// listener to an event, and sends a thousand integers.
func TestAddListenerForEventFireMany(t *testing.T) {
evsw := NewEventSwitch()
err := evsw.Start()
if err != nil {
t.Errorf("Failed to start EventSwitch, error: %v", err)
}
doneSum := make(chan uint64)
doneSending := make(chan uint64)
numbers := make(chan uint64, 4)
// subscribe one listener for one event
evsw.AddListenerForEvent("listener", "event",
func(data EventData) {
numbers <- data.(uint64)
})
// collect received events
go sumReceivedNumbers(numbers, doneSum)
// go fire events
go fireEvents(evsw, "event", doneSending, uint64(1))
checkSum := <-doneSending
close(numbers)
eventSum := <-doneSum
if checkSum != eventSum {
t.Errorf("Not all messages sent were received.\n")
}
}
// TestAddListenerForDifferentEvents sets up an EventSwitch, subscribes a single
// listener to three different events and sends a thousand integers for each
// of the three events.
func TestAddListenerForDifferentEvents(t *testing.T) {
evsw := NewEventSwitch()
err := evsw.Start()
if err != nil {
t.Errorf("Failed to start EventSwitch, error: %v", err)
}
doneSum := make(chan uint64)
doneSending1 := make(chan uint64)
doneSending2 := make(chan uint64)
doneSending3 := make(chan uint64)
numbers := make(chan uint64, 4)
// subscribe one listener to three events
evsw.AddListenerForEvent("listener", "event1",
func(data EventData) {
numbers <- data.(uint64)
})
evsw.AddListenerForEvent("listener", "event2",
func(data EventData) {
numbers <- data.(uint64)
})
evsw.AddListenerForEvent("listener", "event3",
func(data EventData) {
numbers <- data.(uint64)
})
// collect received events
go sumReceivedNumbers(numbers, doneSum)
// go fire events
go fireEvents(evsw, "event1", doneSending1, uint64(1))
go fireEvents(evsw, "event2", doneSending2, uint64(1))
go fireEvents(evsw, "event3", doneSending3, uint64(1))
var checkSum uint64 = 0
checkSum += <-doneSending1
checkSum += <-doneSending2
checkSum += <-doneSending3
close(numbers)
eventSum := <-doneSum
if checkSum != eventSum {
t.Errorf("Not all messages sent were received.\n")
}
}
// TestAddDifferentListenerForDifferentEvents sets up an EventSwitch,
// subscribes a first listener to three events, and subscribes a second
// listener to two of those three events, and then sends a thousand integers
// for each of the three events.
func TestAddDifferentListenerForDifferentEvents(t *testing.T) {
evsw := NewEventSwitch()
err := evsw.Start()
if err != nil {
t.Errorf("Failed to start EventSwitch, error: %v", err)
}
doneSum1 := make(chan uint64)
doneSum2 := make(chan uint64)
doneSending1 := make(chan uint64)
doneSending2 := make(chan uint64)
doneSending3 := make(chan uint64)
numbers1 := make(chan uint64, 4)
numbers2 := make(chan uint64, 4)
// subscribe two listener to three events
evsw.AddListenerForEvent("listener1", "event1",
func(data EventData) {
numbers1 <- data.(uint64)
})
evsw.AddListenerForEvent("listener1", "event2",
func(data EventData) {
numbers1 <- data.(uint64)
})
evsw.AddListenerForEvent("listener1", "event3",
func(data EventData) {
numbers1 <- data.(uint64)
})
evsw.AddListenerForEvent("listener2", "event2",
func(data EventData) {
numbers2 <- data.(uint64)
})
evsw.AddListenerForEvent("listener2", "event3",
func(data EventData) {
numbers2 <- data.(uint64)
})
// collect received events for listener1
go sumReceivedNumbers(numbers1, doneSum1)
// collect received events for listener2
go sumReceivedNumbers(numbers2, doneSum2)
// go fire events
go fireEvents(evsw, "event1", doneSending1, uint64(1))
go fireEvents(evsw, "event2", doneSending2, uint64(1001))
go fireEvents(evsw, "event3", doneSending3, uint64(2001))
checkSumEvent1 := <-doneSending1
checkSumEvent2 := <-doneSending2
checkSumEvent3 := <-doneSending3
checkSum1 := checkSumEvent1 + checkSumEvent2 + checkSumEvent3
checkSum2 := checkSumEvent2 + checkSumEvent3
close(numbers1)
close(numbers2)
eventSum1 := <-doneSum1
eventSum2 := <-doneSum2
if checkSum1 != eventSum1 ||
checkSum2 != eventSum2 {
t.Errorf("Not all messages sent were received for different listeners to different events.\n")
}
}
// TestAddAndRemoveListener sets up an EventSwitch, subscribes a listener to
// two events, fires a thousand integers for the first event, then unsubscribes
// the listener and fires a thousand integers for the second event.
func TestAddAndRemoveListener(t *testing.T) {
evsw := NewEventSwitch()
err := evsw.Start()
if err != nil {
t.Errorf("Failed to start EventSwitch, error: %v", err)
}
doneSum1 := make(chan uint64)
doneSum2 := make(chan uint64)
doneSending1 := make(chan uint64)
doneSending2 := make(chan uint64)
numbers1 := make(chan uint64, 4)
numbers2 := make(chan uint64, 4)
// subscribe two listener to three events
evsw.AddListenerForEvent("listener", "event1",
func(data EventData) {
numbers1 <- data.(uint64)
})
evsw.AddListenerForEvent("listener", "event2",
func(data EventData) {
numbers2 <- data.(uint64)
})
// collect received events for event1
go sumReceivedNumbers(numbers1, doneSum1)
// collect received events for event2
go sumReceivedNumbers(numbers2, doneSum2)
// go fire events
go fireEvents(evsw, "event1", doneSending1, uint64(1))
checkSumEvent1 := <-doneSending1
// after sending all event1, unsubscribe for all events
evsw.RemoveListener("listener")
go fireEvents(evsw, "event2", doneSending2, uint64(1001))
checkSumEvent2 := <-doneSending2
close(numbers1)
close(numbers2)
eventSum1 := <-doneSum1
eventSum2 := <-doneSum2
if checkSumEvent1 != eventSum1 ||
// correct value asserted by preceding tests, suffices to be non-zero
checkSumEvent2 == uint64(0) ||
eventSum2 != uint64(0) {
t.Errorf("Not all messages sent were received or unsubscription did not register.\n")
}
}
// TestRemoveListener does basic tests on adding and removing
func TestRemoveListener(t *testing.T) {
evsw := NewEventSwitch()
err := evsw.Start()
if err != nil {
t.Errorf("Failed to start EventSwitch, error: %v", err)
}
count := 10
sum1, sum2 := 0, 0
// add some listeners and make sure they work
evsw.AddListenerForEvent("listener", "event1",
func(data EventData) {
sum1++
})
evsw.AddListenerForEvent("listener", "event2",
func(data EventData) {
sum2++
})
for i := 0; i < count; i++ {
evsw.FireEvent("event1", true)
evsw.FireEvent("event2", true)
}
assert.Equal(t, count, sum1)
assert.Equal(t, count, sum2)
// remove one by event and make sure it is gone
evsw.RemoveListenerForEvent("event2", "listener")
for i := 0; i < count; i++ {
evsw.FireEvent("event1", true)
evsw.FireEvent("event2", true)
}
assert.Equal(t, count*2, sum1)
assert.Equal(t, count, sum2)
// remove the listener entirely and make sure both gone
evsw.RemoveListener("listener")
for i := 0; i < count; i++ {
evsw.FireEvent("event1", true)
evsw.FireEvent("event2", true)
}
assert.Equal(t, count*2, sum1)
assert.Equal(t, count, sum2)
}
// TestAddAndRemoveListenersAsync sets up an EventSwitch, subscribes two
// listeners to three events, and fires a thousand integers for each event.
// These two listeners serve as the baseline validation while other listeners
// are randomly subscribed and unsubscribed.
// More precisely it randomly subscribes new listeners (different from the first
// two listeners) to one of these three events. At the same time it starts
// randomly unsubscribing these additional listeners from all events they are
// at that point subscribed to.
// NOTE: it is important to run this test with race conditions tracking on,
// `go test -race`, to examine for possible race conditions.
func TestRemoveListenersAsync(t *testing.T) {
evsw := NewEventSwitch()
err := evsw.Start()
if err != nil {
t.Errorf("Failed to start EventSwitch, error: %v", err)
}
doneSum1 := make(chan uint64)
doneSum2 := make(chan uint64)
doneSending1 := make(chan uint64)
doneSending2 := make(chan uint64)
doneSending3 := make(chan uint64)
numbers1 := make(chan uint64, 4)
numbers2 := make(chan uint64, 4)
// subscribe two listener to three events
evsw.AddListenerForEvent("listener1", "event1",
func(data EventData) {
numbers1 <- data.(uint64)
})
evsw.AddListenerForEvent("listener1", "event2",
func(data EventData) {
numbers1 <- data.(uint64)
})
evsw.AddListenerForEvent("listener1", "event3",
func(data EventData) {
numbers1 <- data.(uint64)
})
evsw.AddListenerForEvent("listener2", "event1",
func(data EventData) {
numbers2 <- data.(uint64)
})
evsw.AddListenerForEvent("listener2", "event2",
func(data EventData) {
numbers2 <- data.(uint64)
})
evsw.AddListenerForEvent("listener2", "event3",
func(data EventData) {
numbers2 <- data.(uint64)
})
// collect received events for event1
go sumReceivedNumbers(numbers1, doneSum1)
// collect received events for event2
go sumReceivedNumbers(numbers2, doneSum2)
addListenersStress := func() {
s1 := rand.NewSource(time.Now().UnixNano())
r1 := rand.New(s1)
for k := uint16(0); k < 400; k++ {
listenerNumber := r1.Intn(100) + 3
eventNumber := r1.Intn(3) + 1
go evsw.AddListenerForEvent(fmt.Sprintf("listener%v", listenerNumber),
fmt.Sprintf("event%v", eventNumber),
func(_ EventData) {})
}
}
removeListenersStress := func() {
s2 := rand.NewSource(time.Now().UnixNano())
r2 := rand.New(s2)
for k := uint16(0); k < 80; k++ {
listenerNumber := r2.Intn(100) + 3
go evsw.RemoveListener(fmt.Sprintf("listener%v", listenerNumber))
}
}
addListenersStress()
// go fire events
go fireEvents(evsw, "event1", doneSending1, uint64(1))
removeListenersStress()
go fireEvents(evsw, "event2", doneSending2, uint64(1001))
go fireEvents(evsw, "event3", doneSending3, uint64(2001))
checkSumEvent1 := <-doneSending1
checkSumEvent2 := <-doneSending2
checkSumEvent3 := <-doneSending3
checkSum := checkSumEvent1 + checkSumEvent2 + checkSumEvent3
close(numbers1)
close(numbers2)
eventSum1 := <-doneSum1
eventSum2 := <-doneSum2
if checkSum != eventSum1 ||
checkSum != eventSum2 {
t.Errorf("Not all messages sent were received.\n")
}
}
//------------------------------------------------------------------------------
// Helper functions
// sumReceivedNumbers takes two channels and adds all numbers received
// until the receiving channel `numbers` is closed; it then sends the sum
// on `doneSum` and closes that channel. Expected to be run in a go-routine.
func sumReceivedNumbers(numbers, doneSum chan uint64) {
var sum uint64 = 0
for {
j, more := <-numbers
sum += j
if !more {
doneSum <- sum
close(doneSum)
return
}
}
}
// fireEvents takes an EventSwitch and fires a thousand integers under
// a given `event` with the integers mootonically increasing from `offset`
// to `offset` + 999. It additionally returns the addition of all integers
// sent on `doneChan` for assertion that all events have been sent, and enabling
// the test to assert all events have also been received.
func fireEvents(evsw EventSwitch, event string, doneChan chan uint64,
offset uint64) {
var sentSum uint64 = 0
for i := offset; i <= offset+uint64(999); i++ {
sentSum += i
evsw.FireEvent(event, i)
}
doneChan <- sentSum
close(doneChan)
}

View File

@@ -0,0 +1,28 @@
package pubsub_test
import (
"context"
"testing"
"github.com/stretchr/testify/require"
"github.com/tendermint/tmlibs/log"
"github.com/tendermint/tendermint/libs/pubsub"
"github.com/tendermint/tendermint/libs/pubsub/query"
)
func TestExample(t *testing.T) {
s := pubsub.NewServer()
s.SetLogger(log.TestingLogger())
s.Start()
defer s.Stop()
ctx := context.Background()
ch := make(chan interface{}, 1)
err := s.Subscribe(ctx, "example-client", query.MustParse("abci.account.name='John'"), ch)
require.NoError(t, err)
err = s.PublishWithTags(ctx, "Tombstone", pubsub.NewTagMap(map[string]string{"abci.account.name": "John"}))
require.NoError(t, err)
assertReceive(t, "Tombstone", ch)
}

344
libs/pubsub/pubsub.go Normal file
View File

@@ -0,0 +1,344 @@
// Package pubsub implements a pub-sub model with a single publisher (Server)
// and multiple subscribers (clients).
//
// Though you can have multiple publishers by sharing a pointer to a server or
// by giving the same channel to each publisher and publishing messages from
// that channel (fan-in).
//
// Clients subscribe for messages, which could be of any type, using a query.
// When some message is published, we match it with all queries. If there is a
// match, this message will be pushed to all clients, subscribed to that query.
// See query subpackage for our implementation.
package pubsub
import (
"context"
"errors"
"sync"
cmn "github.com/tendermint/tmlibs/common"
)
type operation int
const (
sub operation = iota
pub
unsub
shutdown
)
var (
// ErrSubscriptionNotFound is returned when a client tries to unsubscribe
// from not existing subscription.
ErrSubscriptionNotFound = errors.New("subscription not found")
// ErrAlreadySubscribed is returned when a client tries to subscribe twice or
// more using the same query.
ErrAlreadySubscribed = errors.New("already subscribed")
)
type cmd struct {
op operation
query Query
ch chan<- interface{}
clientID string
msg interface{}
tags TagMap
}
// Query defines an interface for a query to be used for subscribing.
type Query interface {
Matches(tags TagMap) bool
String() string
}
// Server allows clients to subscribe/unsubscribe for messages, publishing
// messages with or without tags, and manages internal state.
type Server struct {
cmn.BaseService
cmds chan cmd
cmdsCap int
mtx sync.RWMutex
subscriptions map[string]map[string]Query // subscriber -> query (string) -> Query
}
// Option sets a parameter for the server.
type Option func(*Server)
// TagMap is used to associate tags to a message.
// They can be queried by subscribers to choose messages they will received.
type TagMap interface {
// Get returns the value for a key, or nil if no value is present.
// The ok result indicates whether value was found in the tags.
Get(key string) (value string, ok bool)
// Len returns the number of tags.
Len() int
}
type tagMap map[string]string
var _ TagMap = (*tagMap)(nil)
// NewTagMap constructs a new immutable tag set from a map.
func NewTagMap(data map[string]string) TagMap {
return tagMap(data)
}
// Get returns the value for a key, or nil if no value is present.
// The ok result indicates whether value was found in the tags.
func (ts tagMap) Get(key string) (value string, ok bool) {
value, ok = ts[key]
return
}
// Len returns the number of tags.
func (ts tagMap) Len() int {
return len(ts)
}
// NewServer returns a new server. See the commentary on the Option functions
// for a detailed description of how to configure buffering. If no options are
// provided, the resulting server's queue is unbuffered.
func NewServer(options ...Option) *Server {
s := &Server{
subscriptions: make(map[string]map[string]Query),
}
s.BaseService = *cmn.NewBaseService(nil, "PubSub", s)
for _, option := range options {
option(s)
}
// if BufferCapacity option was not set, the channel is unbuffered
s.cmds = make(chan cmd, s.cmdsCap)
return s
}
// BufferCapacity allows you to specify capacity for the internal server's
// queue. Since the server, given Y subscribers, could only process X messages,
// this option could be used to survive spikes (e.g. high amount of
// transactions during peak hours).
func BufferCapacity(cap int) Option {
return func(s *Server) {
if cap > 0 {
s.cmdsCap = cap
}
}
}
// BufferCapacity returns capacity of the internal server's queue.
func (s *Server) BufferCapacity() int {
return s.cmdsCap
}
// Subscribe creates a subscription for the given client. It accepts a channel
// on which messages matching the given query can be received. An error will be
// returned to the caller if the context is canceled or if subscription already
// exist for pair clientID and query.
func (s *Server) Subscribe(ctx context.Context, clientID string, query Query, out chan<- interface{}) error {
s.mtx.RLock()
clientSubscriptions, ok := s.subscriptions[clientID]
if ok {
_, ok = clientSubscriptions[query.String()]
}
s.mtx.RUnlock()
if ok {
return ErrAlreadySubscribed
}
select {
case s.cmds <- cmd{op: sub, clientID: clientID, query: query, ch: out}:
s.mtx.Lock()
if _, ok = s.subscriptions[clientID]; !ok {
s.subscriptions[clientID] = make(map[string]Query)
}
s.subscriptions[clientID][query.String()] = query
s.mtx.Unlock()
return nil
case <-ctx.Done():
return ctx.Err()
}
}
// Unsubscribe removes the subscription on the given query. An error will be
// returned to the caller if the context is canceled or if subscription does
// not exist.
func (s *Server) Unsubscribe(ctx context.Context, clientID string, query Query) error {
var origQuery Query
s.mtx.RLock()
clientSubscriptions, ok := s.subscriptions[clientID]
if ok {
origQuery, ok = clientSubscriptions[query.String()]
}
s.mtx.RUnlock()
if !ok {
return ErrSubscriptionNotFound
}
// original query is used here because we're using pointers as map keys
select {
case s.cmds <- cmd{op: unsub, clientID: clientID, query: origQuery}:
s.mtx.Lock()
delete(clientSubscriptions, query.String())
s.mtx.Unlock()
return nil
case <-ctx.Done():
return ctx.Err()
}
}
// UnsubscribeAll removes all client subscriptions. An error will be returned
// to the caller if the context is canceled or if subscription does not exist.
func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error {
s.mtx.RLock()
_, ok := s.subscriptions[clientID]
s.mtx.RUnlock()
if !ok {
return ErrSubscriptionNotFound
}
select {
case s.cmds <- cmd{op: unsub, clientID: clientID}:
s.mtx.Lock()
delete(s.subscriptions, clientID)
s.mtx.Unlock()
return nil
case <-ctx.Done():
return ctx.Err()
}
}
// Publish publishes the given message. An error will be returned to the caller
// if the context is canceled.
func (s *Server) Publish(ctx context.Context, msg interface{}) error {
return s.PublishWithTags(ctx, msg, NewTagMap(make(map[string]string)))
}
// PublishWithTags publishes the given message with the set of tags. The set is
// matched with clients queries. If there is a match, the message is sent to
// the client.
func (s *Server) PublishWithTags(ctx context.Context, msg interface{}, tags TagMap) error {
select {
case s.cmds <- cmd{op: pub, msg: msg, tags: tags}:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
// OnStop implements Service.OnStop by shutting down the server.
func (s *Server) OnStop() {
s.cmds <- cmd{op: shutdown}
}
// NOTE: not goroutine safe
type state struct {
// query -> client -> ch
queries map[Query]map[string]chan<- interface{}
// client -> query -> struct{}
clients map[string]map[Query]struct{}
}
// OnStart implements Service.OnStart by starting the server.
func (s *Server) OnStart() error {
go s.loop(state{
queries: make(map[Query]map[string]chan<- interface{}),
clients: make(map[string]map[Query]struct{}),
})
return nil
}
// OnReset implements Service.OnReset
func (s *Server) OnReset() error {
return nil
}
func (s *Server) loop(state state) {
loop:
for cmd := range s.cmds {
switch cmd.op {
case unsub:
if cmd.query != nil {
state.remove(cmd.clientID, cmd.query)
} else {
state.removeAll(cmd.clientID)
}
case shutdown:
for clientID := range state.clients {
state.removeAll(clientID)
}
break loop
case sub:
state.add(cmd.clientID, cmd.query, cmd.ch)
case pub:
state.send(cmd.msg, cmd.tags)
}
}
}
func (state *state) add(clientID string, q Query, ch chan<- interface{}) {
// add query if needed
if _, ok := state.queries[q]; !ok {
state.queries[q] = make(map[string]chan<- interface{})
}
// create subscription
state.queries[q][clientID] = ch
// add client if needed
if _, ok := state.clients[clientID]; !ok {
state.clients[clientID] = make(map[Query]struct{})
}
state.clients[clientID][q] = struct{}{}
}
func (state *state) remove(clientID string, q Query) {
clientToChannelMap, ok := state.queries[q]
if !ok {
return
}
ch, ok := clientToChannelMap[clientID]
if ok {
close(ch)
delete(state.clients[clientID], q)
// if it not subscribed to anything else, remove the client
if len(state.clients[clientID]) == 0 {
delete(state.clients, clientID)
}
delete(state.queries[q], clientID)
}
}
func (state *state) removeAll(clientID string) {
queryMap, ok := state.clients[clientID]
if !ok {
return
}
for q := range queryMap {
ch := state.queries[q][clientID]
close(ch)
delete(state.queries[q], clientID)
}
delete(state.clients, clientID)
}
func (state *state) send(msg interface{}, tags TagMap) {
for q, clientToChannelMap := range state.queries {
if q.Matches(tags) {
for _, ch := range clientToChannelMap {
ch <- msg
}
}
}
}

253
libs/pubsub/pubsub_test.go Normal file
View File

@@ -0,0 +1,253 @@
package pubsub_test
import (
"context"
"fmt"
"runtime/debug"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tmlibs/log"
"github.com/tendermint/tendermint/libs/pubsub"
"github.com/tendermint/tendermint/libs/pubsub/query"
)
const (
clientID = "test-client"
)
func TestSubscribe(t *testing.T) {
s := pubsub.NewServer()
s.SetLogger(log.TestingLogger())
s.Start()
defer s.Stop()
ctx := context.Background()
ch := make(chan interface{}, 1)
err := s.Subscribe(ctx, clientID, query.Empty{}, ch)
require.NoError(t, err)
err = s.Publish(ctx, "Ka-Zar")
require.NoError(t, err)
assertReceive(t, "Ka-Zar", ch)
err = s.Publish(ctx, "Quicksilver")
require.NoError(t, err)
assertReceive(t, "Quicksilver", ch)
}
func TestDifferentClients(t *testing.T) {
s := pubsub.NewServer()
s.SetLogger(log.TestingLogger())
s.Start()
defer s.Stop()
ctx := context.Background()
ch1 := make(chan interface{}, 1)
err := s.Subscribe(ctx, "client-1", query.MustParse("tm.events.type='NewBlock'"), ch1)
require.NoError(t, err)
err = s.PublishWithTags(ctx, "Iceman", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock"}))
require.NoError(t, err)
assertReceive(t, "Iceman", ch1)
ch2 := make(chan interface{}, 1)
err = s.Subscribe(ctx, "client-2", query.MustParse("tm.events.type='NewBlock' AND abci.account.name='Igor'"), ch2)
require.NoError(t, err)
err = s.PublishWithTags(ctx, "Ultimo", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock", "abci.account.name": "Igor"}))
require.NoError(t, err)
assertReceive(t, "Ultimo", ch1)
assertReceive(t, "Ultimo", ch2)
ch3 := make(chan interface{}, 1)
err = s.Subscribe(ctx, "client-3", query.MustParse("tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10"), ch3)
require.NoError(t, err)
err = s.PublishWithTags(ctx, "Valeria Richards", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewRoundStep"}))
require.NoError(t, err)
assert.Zero(t, len(ch3))
}
func TestClientSubscribesTwice(t *testing.T) {
s := pubsub.NewServer()
s.SetLogger(log.TestingLogger())
s.Start()
defer s.Stop()
ctx := context.Background()
q := query.MustParse("tm.events.type='NewBlock'")
ch1 := make(chan interface{}, 1)
err := s.Subscribe(ctx, clientID, q, ch1)
require.NoError(t, err)
err = s.PublishWithTags(ctx, "Goblin Queen", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock"}))
require.NoError(t, err)
assertReceive(t, "Goblin Queen", ch1)
ch2 := make(chan interface{}, 1)
err = s.Subscribe(ctx, clientID, q, ch2)
require.Error(t, err)
err = s.PublishWithTags(ctx, "Spider-Man", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock"}))
require.NoError(t, err)
assertReceive(t, "Spider-Man", ch1)
}
func TestUnsubscribe(t *testing.T) {
s := pubsub.NewServer()
s.SetLogger(log.TestingLogger())
s.Start()
defer s.Stop()
ctx := context.Background()
ch := make(chan interface{})
err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"), ch)
require.NoError(t, err)
err = s.Unsubscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"))
require.NoError(t, err)
err = s.Publish(ctx, "Nick Fury")
require.NoError(t, err)
assert.Zero(t, len(ch), "Should not receive anything after Unsubscribe")
_, ok := <-ch
assert.False(t, ok)
}
func TestResubscribe(t *testing.T) {
s := pubsub.NewServer()
s.SetLogger(log.TestingLogger())
s.Start()
defer s.Stop()
ctx := context.Background()
ch := make(chan interface{})
err := s.Subscribe(ctx, clientID, query.Empty{}, ch)
require.NoError(t, err)
err = s.Unsubscribe(ctx, clientID, query.Empty{})
require.NoError(t, err)
ch = make(chan interface{})
err = s.Subscribe(ctx, clientID, query.Empty{}, ch)
require.NoError(t, err)
err = s.Publish(ctx, "Cable")
require.NoError(t, err)
assertReceive(t, "Cable", ch)
}
func TestUnsubscribeAll(t *testing.T) {
s := pubsub.NewServer()
s.SetLogger(log.TestingLogger())
s.Start()
defer s.Stop()
ctx := context.Background()
ch1, ch2 := make(chan interface{}, 1), make(chan interface{}, 1)
err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"), ch1)
require.NoError(t, err)
err = s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlockHeader'"), ch2)
require.NoError(t, err)
err = s.UnsubscribeAll(ctx, clientID)
require.NoError(t, err)
err = s.Publish(ctx, "Nick Fury")
require.NoError(t, err)
assert.Zero(t, len(ch1), "Should not receive anything after UnsubscribeAll")
assert.Zero(t, len(ch2), "Should not receive anything after UnsubscribeAll")
_, ok := <-ch1
assert.False(t, ok)
_, ok = <-ch2
assert.False(t, ok)
}
func TestBufferCapacity(t *testing.T) {
s := pubsub.NewServer(pubsub.BufferCapacity(2))
s.SetLogger(log.TestingLogger())
assert.Equal(t, 2, s.BufferCapacity())
ctx := context.Background()
err := s.Publish(ctx, "Nighthawk")
require.NoError(t, err)
err = s.Publish(ctx, "Sage")
require.NoError(t, err)
ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond)
defer cancel()
err = s.Publish(ctx, "Ironclad")
if assert.Error(t, err) {
assert.Equal(t, context.DeadlineExceeded, err)
}
}
func Benchmark10Clients(b *testing.B) { benchmarkNClients(10, b) }
func Benchmark100Clients(b *testing.B) { benchmarkNClients(100, b) }
func Benchmark1000Clients(b *testing.B) { benchmarkNClients(1000, b) }
func Benchmark10ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(10, b) }
func Benchmark100ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(100, b) }
func Benchmark1000ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(1000, b) }
func benchmarkNClients(n int, b *testing.B) {
s := pubsub.NewServer()
s.Start()
defer s.Stop()
ctx := context.Background()
for i := 0; i < n; i++ {
ch := make(chan interface{})
go func() {
for range ch {
}
}()
s.Subscribe(ctx, clientID, query.MustParse(fmt.Sprintf("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = %d", i)), ch)
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
s.PublishWithTags(ctx, "Gamora", pubsub.NewTagMap(map[string]string{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": string(i)}))
}
}
func benchmarkNClientsOneQuery(n int, b *testing.B) {
s := pubsub.NewServer()
s.Start()
defer s.Stop()
ctx := context.Background()
q := query.MustParse("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = 1")
for i := 0; i < n; i++ {
ch := make(chan interface{})
go func() {
for range ch {
}
}()
s.Subscribe(ctx, clientID, q, ch)
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
s.PublishWithTags(ctx, "Gamora", pubsub.NewTagMap(map[string]string{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": "1"}))
}
}
///////////////////////////////////////////////////////////////////////////////
/// HELPERS
///////////////////////////////////////////////////////////////////////////////
func assertReceive(t *testing.T, expected interface{}, ch <-chan interface{}, msgAndArgs ...interface{}) {
select {
case actual := <-ch:
if actual != nil {
assert.Equal(t, expected, actual, msgAndArgs...)
}
case <-time.After(1 * time.Second):
t.Errorf("Expected to receive %v from the channel, got nothing after 1s", expected)
debug.PrintStack()
}
}

View File

@@ -0,0 +1,11 @@
gen_query_parser:
@go get github.com/pointlander/peg
peg -inline -switch query.peg
fuzzy_test:
@go get github.com/dvyukov/go-fuzz/go-fuzz
@go get github.com/dvyukov/go-fuzz/go-fuzz-build
go-fuzz-build github.com/tendermint/tendermint/libs/pubsub/query/fuzz_test
go-fuzz -bin=./fuzz_test-fuzz.zip -workdir=./fuzz_test/output
.PHONY: gen_query_parser fuzzy_test

View File

@@ -0,0 +1,16 @@
package query
import "github.com/tendermint/tendermint/libs/pubsub"
// Empty query matches any set of tags.
type Empty struct {
}
// Matches always returns true.
func (Empty) Matches(tags pubsub.TagMap) bool {
return true
}
func (Empty) String() string {
return "empty"
}

View File

@@ -0,0 +1,18 @@
package query_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tendermint/tendermint/libs/pubsub"
"github.com/tendermint/tendermint/libs/pubsub/query"
)
func TestEmptyQueryMatchesAnything(t *testing.T) {
q := query.Empty{}
assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{})))
assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{"Asher": "Roth"})))
assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{"Route": "66"})))
assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{"Route": "66", "Billy": "Blue"})))
}

View File

@@ -0,0 +1,30 @@
package fuzz_test
import (
"fmt"
"github.com/tendermint/tendermint/libs/pubsub/query"
)
func Fuzz(data []byte) int {
sdata := string(data)
q0, err := query.New(sdata)
if err != nil {
return 0
}
sdata1 := q0.String()
q1, err := query.New(sdata1)
if err != nil {
panic(err)
}
sdata2 := q1.String()
if sdata1 != sdata2 {
fmt.Printf("q0: %q\n", sdata1)
fmt.Printf("q1: %q\n", sdata2)
panic("query changed")
}
return 1
}

View File

@@ -0,0 +1,92 @@
package query_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tendermint/tendermint/libs/pubsub/query"
)
// TODO: fuzzy testing?
func TestParser(t *testing.T) {
cases := []struct {
query string
valid bool
}{
{"tm.events.type='NewBlock'", true},
{"tm.events.type = 'NewBlock'", true},
{"tm.events.name = ''", true},
{"tm.events.type='TIME'", true},
{"tm.events.type='DATE'", true},
{"tm.events.type='='", true},
{"tm.events.type='TIME", false},
{"tm.events.type=TIME'", false},
{"tm.events.type==", false},
{"tm.events.type=NewBlock", false},
{">==", false},
{"tm.events.type 'NewBlock' =", false},
{"tm.events.type>'NewBlock'", false},
{"", false},
{"=", false},
{"='NewBlock'", false},
{"tm.events.type=", false},
{"tm.events.typeNewBlock", false},
{"tm.events.type'NewBlock'", false},
{"'NewBlock'", false},
{"NewBlock", false},
{"", false},
{"tm.events.type='NewBlock' AND abci.account.name='Igor'", true},
{"tm.events.type='NewBlock' AND", false},
{"tm.events.type='NewBlock' AN", false},
{"tm.events.type='NewBlock' AN tm.events.type='NewBlockHeader'", false},
{"AND tm.events.type='NewBlock' ", false},
{"abci.account.name CONTAINS 'Igor'", true},
{"tx.date > DATE 2013-05-03", true},
{"tx.date < DATE 2013-05-03", true},
{"tx.date <= DATE 2013-05-03", true},
{"tx.date >= DATE 2013-05-03", true},
{"tx.date >= DAT 2013-05-03", false},
{"tx.date <= DATE2013-05-03", false},
{"tx.date <= DATE -05-03", false},
{"tx.date >= DATE 20130503", false},
{"tx.date >= DATE 2013+01-03", false},
// incorrect year, month, day
{"tx.date >= DATE 0013-01-03", false},
{"tx.date >= DATE 2013-31-03", false},
{"tx.date >= DATE 2013-01-83", false},
{"tx.date > TIME 2013-05-03T14:45:00+07:00", true},
{"tx.date < TIME 2013-05-03T14:45:00-02:00", true},
{"tx.date <= TIME 2013-05-03T14:45:00Z", true},
{"tx.date >= TIME 2013-05-03T14:45:00Z", true},
{"tx.date >= TIME2013-05-03T14:45:00Z", false},
{"tx.date = IME 2013-05-03T14:45:00Z", false},
{"tx.date = TIME 2013-05-:45:00Z", false},
{"tx.date >= TIME 2013-05-03T14:45:00", false},
{"tx.date >= TIME 0013-00-00T14:45:00Z", false},
{"tx.date >= TIME 2013+05=03T14:45:00Z", false},
{"account.balance=100", true},
{"account.balance >= 200", true},
{"account.balance >= -300", false},
{"account.balance >>= 400", false},
{"account.balance=33.22.1", false},
{"hash='136E18F7E4C348B780CF873A0BF43922E5BAFA63'", true},
{"hash=136E18F7E4C348B780CF873A0BF43922E5BAFA63", false},
}
for _, c := range cases {
_, err := query.New(c.query)
if c.valid {
assert.NoErrorf(t, err, "Query was '%s'", c.query)
} else {
assert.Errorf(t, err, "Query was '%s'", c.query)
}
}
}

339
libs/pubsub/query/query.go Normal file
View File

@@ -0,0 +1,339 @@
// Package query provides a parser for a custom query format:
//
// abci.invoice.number=22 AND abci.invoice.owner=Ivan
//
// See query.peg for the grammar, which is a https://en.wikipedia.org/wiki/Parsing_expression_grammar.
// More: https://github.com/PhilippeSigaud/Pegged/wiki/PEG-Basics
//
// It has a support for numbers (integer and floating point), dates and times.
package query
import (
"fmt"
"reflect"
"strconv"
"strings"
"time"
"github.com/tendermint/tendermint/libs/pubsub"
)
// Query holds the query string and the query parser.
type Query struct {
str string
parser *QueryParser
}
// Condition represents a single condition within a query and consists of tag
// (e.g. "tx.gas"), operator (e.g. "=") and operand (e.g. "7").
type Condition struct {
Tag string
Op Operator
Operand interface{}
}
// New parses the given string and returns a query or error if the string is
// invalid.
func New(s string) (*Query, error) {
p := &QueryParser{Buffer: fmt.Sprintf(`"%s"`, s)}
p.Init()
if err := p.Parse(); err != nil {
return nil, err
}
return &Query{str: s, parser: p}, nil
}
// MustParse turns the given string into a query or panics; for tests or others
// cases where you know the string is valid.
func MustParse(s string) *Query {
q, err := New(s)
if err != nil {
panic(fmt.Sprintf("failed to parse %s: %v", s, err))
}
return q
}
// String returns the original string.
func (q *Query) String() string {
return q.str
}
// Operator is an operator that defines some kind of relation between tag and
// operand (equality, etc.).
type Operator uint8
const (
// "<="
OpLessEqual Operator = iota
// ">="
OpGreaterEqual
// "<"
OpLess
// ">"
OpGreater
// "="
OpEqual
// "CONTAINS"; used to check if a string contains a certain sub string.
OpContains
)
const (
// DateLayout defines a layout for all dates (`DATE date`)
DateLayout = "2006-01-02"
// TimeLayout defines a layout for all times (`TIME time`)
TimeLayout = time.RFC3339
)
// Conditions returns a list of conditions.
func (q *Query) Conditions() []Condition {
conditions := make([]Condition, 0)
buffer, begin, end := q.parser.Buffer, 0, 0
var tag string
var op Operator
// tokens must be in the following order: tag ("tx.gas") -> operator ("=") -> operand ("7")
for _, token := range q.parser.Tokens() {
switch token.pegRule {
case rulePegText:
begin, end = int(token.begin), int(token.end)
case ruletag:
tag = buffer[begin:end]
case rulele:
op = OpLessEqual
case rulege:
op = OpGreaterEqual
case rulel:
op = OpLess
case ruleg:
op = OpGreater
case ruleequal:
op = OpEqual
case rulecontains:
op = OpContains
case rulevalue:
// strip single quotes from value (i.e. "'NewBlock'" -> "NewBlock")
valueWithoutSingleQuotes := buffer[begin+1 : end-1]
conditions = append(conditions, Condition{tag, op, valueWithoutSingleQuotes})
case rulenumber:
number := buffer[begin:end]
if strings.ContainsAny(number, ".") { // if it looks like a floating-point number
value, err := strconv.ParseFloat(number, 64)
if err != nil {
panic(fmt.Sprintf("got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", err, number))
}
conditions = append(conditions, Condition{tag, op, value})
} else {
value, err := strconv.ParseInt(number, 10, 64)
if err != nil {
panic(fmt.Sprintf("got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", err, number))
}
conditions = append(conditions, Condition{tag, op, value})
}
case ruletime:
value, err := time.Parse(TimeLayout, buffer[begin:end])
if err != nil {
panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)", err, buffer[begin:end]))
}
conditions = append(conditions, Condition{tag, op, value})
case ruledate:
value, err := time.Parse("2006-01-02", buffer[begin:end])
if err != nil {
panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / '2006-01-02' (should never happen if the grammar is correct)", err, buffer[begin:end]))
}
conditions = append(conditions, Condition{tag, op, value})
}
}
return conditions
}
// Matches returns true if the query matches the given set of tags, false otherwise.
//
// For example, query "name=John" matches tags = {"name": "John"}. More
// examples could be found in parser_test.go and query_test.go.
func (q *Query) Matches(tags pubsub.TagMap) bool {
if tags.Len() == 0 {
return false
}
buffer, begin, end := q.parser.Buffer, 0, 0
var tag string
var op Operator
// tokens must be in the following order: tag ("tx.gas") -> operator ("=") -> operand ("7")
for _, token := range q.parser.Tokens() {
switch token.pegRule {
case rulePegText:
begin, end = int(token.begin), int(token.end)
case ruletag:
tag = buffer[begin:end]
case rulele:
op = OpLessEqual
case rulege:
op = OpGreaterEqual
case rulel:
op = OpLess
case ruleg:
op = OpGreater
case ruleequal:
op = OpEqual
case rulecontains:
op = OpContains
case rulevalue:
// strip single quotes from value (i.e. "'NewBlock'" -> "NewBlock")
valueWithoutSingleQuotes := buffer[begin+1 : end-1]
// see if the triplet (tag, operator, operand) matches any tag
// "tx.gas", "=", "7", { "tx.gas": 7, "tx.ID": "4AE393495334" }
if !match(tag, op, reflect.ValueOf(valueWithoutSingleQuotes), tags) {
return false
}
case rulenumber:
number := buffer[begin:end]
if strings.ContainsAny(number, ".") { // if it looks like a floating-point number
value, err := strconv.ParseFloat(number, 64)
if err != nil {
panic(fmt.Sprintf("got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", err, number))
}
if !match(tag, op, reflect.ValueOf(value), tags) {
return false
}
} else {
value, err := strconv.ParseInt(number, 10, 64)
if err != nil {
panic(fmt.Sprintf("got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", err, number))
}
if !match(tag, op, reflect.ValueOf(value), tags) {
return false
}
}
case ruletime:
value, err := time.Parse(TimeLayout, buffer[begin:end])
if err != nil {
panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)", err, buffer[begin:end]))
}
if !match(tag, op, reflect.ValueOf(value), tags) {
return false
}
case ruledate:
value, err := time.Parse("2006-01-02", buffer[begin:end])
if err != nil {
panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / '2006-01-02' (should never happen if the grammar is correct)", err, buffer[begin:end]))
}
if !match(tag, op, reflect.ValueOf(value), tags) {
return false
}
}
}
return true
}
// match returns true if the given triplet (tag, operator, operand) matches any tag.
//
// First, it looks up the tag in tags and if it finds one, tries to compare the
// value from it to the operand using the operator.
//
// "tx.gas", "=", "7", { "tx.gas": 7, "tx.ID": "4AE393495334" }
func match(tag string, op Operator, operand reflect.Value, tags pubsub.TagMap) bool {
// look up the tag from the query in tags
value, ok := tags.Get(tag)
if !ok {
return false
}
switch operand.Kind() {
case reflect.Struct: // time
operandAsTime := operand.Interface().(time.Time)
// try our best to convert value from tags to time.Time
var (
v time.Time
err error
)
if strings.ContainsAny(value, "T") {
v, err = time.Parse(TimeLayout, value)
} else {
v, err = time.Parse(DateLayout, value)
}
if err != nil {
panic(fmt.Sprintf("Failed to convert value %v from tag to time.Time: %v", value, err))
}
switch op {
case OpLessEqual:
return v.Before(operandAsTime) || v.Equal(operandAsTime)
case OpGreaterEqual:
return v.Equal(operandAsTime) || v.After(operandAsTime)
case OpLess:
return v.Before(operandAsTime)
case OpGreater:
return v.After(operandAsTime)
case OpEqual:
return v.Equal(operandAsTime)
}
case reflect.Float64:
operandFloat64 := operand.Interface().(float64)
var v float64
// try our best to convert value from tags to float64
v, err := strconv.ParseFloat(value, 64)
if err != nil {
panic(fmt.Sprintf("Failed to convert value %v from tag to float64: %v", value, err))
}
switch op {
case OpLessEqual:
return v <= operandFloat64
case OpGreaterEqual:
return v >= operandFloat64
case OpLess:
return v < operandFloat64
case OpGreater:
return v > operandFloat64
case OpEqual:
return v == operandFloat64
}
case reflect.Int64:
operandInt := operand.Interface().(int64)
var v int64
// if value looks like float, we try to parse it as float
if strings.ContainsAny(value, ".") {
v1, err := strconv.ParseFloat(value, 64)
if err != nil {
panic(fmt.Sprintf("Failed to convert value %v from tag to float64: %v", value, err))
}
v = int64(v1)
} else {
var err error
// try our best to convert value from tags to int64
v, err = strconv.ParseInt(value, 10, 64)
if err != nil {
panic(fmt.Sprintf("Failed to convert value %v from tag to int64: %v", value, err))
}
}
switch op {
case OpLessEqual:
return v <= operandInt
case OpGreaterEqual:
return v >= operandInt
case OpLess:
return v < operandInt
case OpGreater:
return v > operandInt
case OpEqual:
return v == operandInt
}
case reflect.String:
switch op {
case OpEqual:
return value == operand.String()
case OpContains:
return strings.Contains(value, operand.String())
}
default:
panic(fmt.Sprintf("Unknown kind of operand %v", operand.Kind()))
}
return false
}

View File

@@ -0,0 +1,33 @@
package query
type QueryParser Peg {
}
e <- '\"' condition ( ' '+ and ' '+ condition )* '\"' !.
condition <- tag ' '* (le ' '* (number / time / date)
/ ge ' '* (number / time / date)
/ l ' '* (number / time / date)
/ g ' '* (number / time / date)
/ equal ' '* (number / time / date / value)
/ contains ' '* value
)
tag <- < (![ \t\n\r\\()"'=><] .)+ >
value <- < '\'' (!["'] .)* '\''>
number <- < ('0'
/ [1-9] digit* ('.' digit*)?) >
digit <- [0-9]
time <- "TIME " < year '-' month '-' day 'T' digit digit ':' digit digit ':' digit digit (('-' / '+') digit digit ':' digit digit / 'Z') >
date <- "DATE " < year '-' month '-' day >
year <- ('1' / '2') digit digit digit
month <- ('0' / '1') digit
day <- ('0' / '1' / '2' / '3') digit
and <- "AND"
equal <- "="
contains <- "CONTAINS"
le <- "<="
ge <- ">="
l <- "<"
g <- ">"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,87 @@
package query_test
import (
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/libs/pubsub"
"github.com/tendermint/tendermint/libs/pubsub/query"
)
func TestMatches(t *testing.T) {
var (
txDate = "2017-01-01"
txTime = "2018-05-03T14:45:00Z"
)
testCases := []struct {
s string
tags map[string]string
err bool
matches bool
}{
{"tm.events.type='NewBlock'", map[string]string{"tm.events.type": "NewBlock"}, false, true},
{"tx.gas > 7", map[string]string{"tx.gas": "8"}, false, true},
{"tx.gas > 7 AND tx.gas < 9", map[string]string{"tx.gas": "8"}, false, true},
{"body.weight >= 3.5", map[string]string{"body.weight": "3.5"}, false, true},
{"account.balance < 1000.0", map[string]string{"account.balance": "900"}, false, true},
{"apples.kg <= 4", map[string]string{"apples.kg": "4.0"}, false, true},
{"body.weight >= 4.5", map[string]string{"body.weight": fmt.Sprintf("%v", float32(4.5))}, false, true},
{"oranges.kg < 4 AND watermellons.kg > 10", map[string]string{"oranges.kg": "3", "watermellons.kg": "12"}, false, true},
{"peaches.kg < 4", map[string]string{"peaches.kg": "5"}, false, false},
{"tx.date > DATE 2017-01-01", map[string]string{"tx.date": time.Now().Format(query.DateLayout)}, false, true},
{"tx.date = DATE 2017-01-01", map[string]string{"tx.date": txDate}, false, true},
{"tx.date = DATE 2018-01-01", map[string]string{"tx.date": txDate}, false, false},
{"tx.time >= TIME 2013-05-03T14:45:00Z", map[string]string{"tx.time": time.Now().Format(query.TimeLayout)}, false, true},
{"tx.time = TIME 2013-05-03T14:45:00Z", map[string]string{"tx.time": txTime}, false, false},
{"abci.owner.name CONTAINS 'Igor'", map[string]string{"abci.owner.name": "Igor,Ivan"}, false, true},
{"abci.owner.name CONTAINS 'Igor'", map[string]string{"abci.owner.name": "Pavel,Ivan"}, false, false},
}
for _, tc := range testCases {
q, err := query.New(tc.s)
if !tc.err {
require.Nil(t, err)
}
if tc.matches {
assert.True(t, q.Matches(pubsub.NewTagMap(tc.tags)), "Query '%s' should match %v", tc.s, tc.tags)
} else {
assert.False(t, q.Matches(pubsub.NewTagMap(tc.tags)), "Query '%s' should not match %v", tc.s, tc.tags)
}
}
}
func TestMustParse(t *testing.T) {
assert.Panics(t, func() { query.MustParse("=") })
assert.NotPanics(t, func() { query.MustParse("tm.events.type='NewBlock'") })
}
func TestConditions(t *testing.T) {
txTime, err := time.Parse(time.RFC3339, "2013-05-03T14:45:00Z")
require.NoError(t, err)
testCases := []struct {
s string
conditions []query.Condition
}{
{s: "tm.events.type='NewBlock'", conditions: []query.Condition{query.Condition{Tag: "tm.events.type", Op: query.OpEqual, Operand: "NewBlock"}}},
{s: "tx.gas > 7 AND tx.gas < 9", conditions: []query.Condition{query.Condition{Tag: "tx.gas", Op: query.OpGreater, Operand: int64(7)}, query.Condition{Tag: "tx.gas", Op: query.OpLess, Operand: int64(9)}}},
{s: "tx.time >= TIME 2013-05-03T14:45:00Z", conditions: []query.Condition{query.Condition{Tag: "tx.time", Op: query.OpGreaterEqual, Operand: txTime}}},
}
for _, tc := range testCases {
q, err := query.New(tc.s)
require.Nil(t, err)
assert.Equal(t, tc.conditions, q.Conditions())
}
}

View File

@@ -4,8 +4,8 @@ The p2p package provides an abstraction around peer-to-peer communication.
Docs:
- [Connection](../docs/specification/new-spec/p2p/connection.md) for details on how connections and multiplexing work
- [Peer](../docs/specification/new-spec/p2p/peer.md) for details on peer ID, handshakes, and peer exchange
- [Node](../docs/specification/new-spec/p2p/node.md) for details about different types of nodes and how they should work
- [Pex](../docs/specification/new-spec/p2p/pex.md) for details on peer discovery and exchange
- [Config](../docs/specification/new-spec/p2p/config.md) for details on some config option
- [Connection](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/p2p/connection.md) for details on how connections and multiplexing work
- [Peer](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/p2p/peer.md) for details on peer ID, handshakes, and peer exchange
- [Node](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/p2p/node.md) for details about different types of nodes and how they should work
- [Pex](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/reactors/pex/pex.md) for details on peer discovery and exchange
- [Config](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/p2p/config.md) for details on some config option

View File

@@ -1,6 +1,8 @@
package dummy
import (
"net"
p2p "github.com/tendermint/tendermint/p2p"
tmconn "github.com/tendermint/tendermint/p2p/conn"
cmn "github.com/tendermint/tmlibs/common"
@@ -19,6 +21,7 @@ func NewPeer() *peer {
kv: make(map[string]interface{}),
}
p.BaseService = *cmn.NewBaseService(nil, "peer", p)
return p
}
@@ -42,6 +45,11 @@ func (p *peer) NodeInfo() p2p.NodeInfo {
return p2p.NodeInfo{}
}
// RemoteIP always returns localhost.
func (p *peer) RemoteIP() net.IP {
return net.ParseIP("127.0.0.1")
}
// Status always returns empry connection status.
func (p *peer) Status() tmconn.ConnectionStatus {
return tmconn.ConnectionStatus{}

View File

@@ -1,14 +1,38 @@
package p2p
import (
"errors"
"fmt"
"net"
)
var (
ErrSwitchDuplicatePeer = errors.New("Duplicate peer")
ErrSwitchConnectToSelf = errors.New("Connect to self")
)
// ErrSwitchDuplicatePeerID to be raised when a peer is connecting with a known
// ID.
type ErrSwitchDuplicatePeerID struct {
ID ID
}
func (e ErrSwitchDuplicatePeerID) Error() string {
return fmt.Sprintf("Duplicate peer ID %v", e.ID)
}
// ErrSwitchDuplicatePeerIP to be raised whena a peer is connecting with a known
// IP.
type ErrSwitchDuplicatePeerIP struct {
IP net.IP
}
func (e ErrSwitchDuplicatePeerIP) Error() string {
return fmt.Sprintf("Duplicate peer IP %v", e.IP.String())
}
// ErrSwitchConnectToSelf to be raised when trying to connect to itself.
type ErrSwitchConnectToSelf struct {
Addr *NetAddress
}
func (e ErrSwitchConnectToSelf) Error() string {
return fmt.Sprintf("Connect to self: %v", e.Addr)
}
type ErrSwitchAuthenticationFailure struct {
Dialed *NetAddress
@@ -16,7 +40,11 @@ type ErrSwitchAuthenticationFailure struct {
}
func (e ErrSwitchAuthenticationFailure) Error() string {
return fmt.Sprintf("Failed to authenticate peer. Dialed %v, but got peer with ID %s", e.Dialed, e.Got)
return fmt.Sprintf(
"Failed to authenticate peer. Dialed %v, but got peer with ID %s",
e.Dialed,
e.Got,
)
}
//-------------------------------------------------------------------

View File

@@ -3,20 +3,24 @@ package p2p
import (
"fmt"
"net"
"sync/atomic"
"time"
"github.com/tendermint/go-crypto"
crypto "github.com/tendermint/go-crypto"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
tmconn "github.com/tendermint/tendermint/p2p/conn"
)
var testIPSuffix uint32 = 0
// Peer is an interface representing a peer connected on a reactor.
type Peer interface {
cmn.Service
ID() ID // peer's cryptographic ID
RemoteIP() net.IP // remote IP of the connection
IsOutbound() bool // did we dial the peer
IsPersistent() bool // do we redial this peer when we disconnect
NodeInfo() NodeInfo // peer's info
@@ -37,6 +41,7 @@ type peerConn struct {
persistent bool
config *PeerConfig
conn net.Conn // source connection
ip net.IP
}
// ID only exists for SecretConnection.
@@ -45,6 +50,35 @@ func (pc peerConn) ID() ID {
return PubKeyToID(pc.conn.(*tmconn.SecretConnection).RemotePubKey())
}
// Return the IP from the connection RemoteAddr
func (pc peerConn) RemoteIP() net.IP {
if pc.ip != nil {
return pc.ip
}
// In test cases a conn could not be present at all or be an in-memory
// implementation where we want to return a fake ip.
if pc.conn == nil || pc.conn.RemoteAddr().String() == "pipe" {
pc.ip = net.IP{172, 16, 0, byte(atomic.AddUint32(&testIPSuffix, 1))}
return pc.ip
}
host, _, err := net.SplitHostPort(pc.conn.RemoteAddr().String())
if err != nil {
panic(err)
}
ips, err := net.LookupIP(host)
if err != nil {
panic(err)
}
pc.ip = ips[0]
return pc.ip
}
// peer implements Peer.
//
// Before using a peer, you will need to perform a handshake on connection.

View File

@@ -1,12 +1,14 @@
package p2p
import (
"net"
"sync"
)
// IPeerSet has a (immutable) subset of the methods of PeerSet.
type IPeerSet interface {
Has(key ID) bool
HasIP(ip net.IP) bool
Get(key ID) Peer
List() []Peer
Size() int
@@ -36,12 +38,17 @@ func NewPeerSet() *PeerSet {
}
// Add adds the peer to the PeerSet.
// It returns ErrSwitchDuplicatePeer if the peer is already present.
// It returns an error carrying the reason, if the peer is already present.
func (ps *PeerSet) Add(peer Peer) error {
ps.mtx.Lock()
defer ps.mtx.Unlock()
if ps.lookup[peer.ID()] != nil {
return ErrSwitchDuplicatePeer
return ErrSwitchDuplicatePeerID{peer.ID()}
}
if ps.hasIP(peer.RemoteIP()) {
return ErrSwitchDuplicatePeerIP{peer.RemoteIP()}
}
index := len(ps.list)
@@ -61,6 +68,27 @@ func (ps *PeerSet) Has(peerKey ID) bool {
return ok
}
// HasIP returns true if the PeerSet contains the peer referred to by this IP
// address.
func (ps *PeerSet) HasIP(peerIP net.IP) bool {
ps.mtx.Lock()
defer ps.mtx.Unlock()
return ps.hasIP(peerIP)
}
// hasIP does not acquire a lock so it can be used in public methods which
// already lock.
func (ps *PeerSet) hasIP(peerIP net.IP) bool {
for _, item := range ps.lookup {
if item.peer.RemoteIP().Equal(peerIP) {
return true
}
}
return false
}
// Get looks up a peer by the provided peerKey.
func (ps *PeerSet) Get(peerKey ID) Peer {
ps.mtx.Lock()
@@ -76,6 +104,7 @@ func (ps *PeerSet) Get(peerKey ID) Peer {
func (ps *PeerSet) Remove(peer Peer) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
item := ps.lookup[peer.ID()]
if item == nil {
return

View File

@@ -2,6 +2,7 @@ package p2p
import (
"math/rand"
"net"
"sync"
"testing"
@@ -12,23 +13,32 @@ import (
)
// Returns an empty kvstore peer
func randPeer() *peer {
func randPeer(ip net.IP) *peer {
if ip == nil {
ip = net.IP{127, 0, 0, 1}
}
nodeKey := NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
return &peer{
p := &peer{
nodeInfo: NodeInfo{
ID: nodeKey.ID(),
ListenAddr: cmn.Fmt("%v.%v.%v.%v:46656", rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256),
},
}
p.ip = ip
return p
}
func TestPeerSetAddRemoveOne(t *testing.T) {
t.Parallel()
peerSet := NewPeerSet()
var peerList []Peer
for i := 0; i < 5; i++ {
p := randPeer()
p := randPeer(net.IP{127, 0, 0, byte(i)})
if err := peerSet.Add(p); err != nil {
t.Error(err)
}
@@ -72,7 +82,7 @@ func TestPeerSetAddRemoveMany(t *testing.T) {
peers := []Peer{}
N := 100
for i := 0; i < N; i++ {
peer := randPeer()
peer := randPeer(net.IP{127, 0, 0, byte(i)})
if err := peerSet.Add(peer); err != nil {
t.Errorf("Failed to add new peer")
}
@@ -96,7 +106,7 @@ func TestPeerSetAddRemoveMany(t *testing.T) {
func TestPeerSetAddDuplicate(t *testing.T) {
t.Parallel()
peerSet := NewPeerSet()
peer := randPeer()
peer := randPeer(nil)
n := 20
errsChan := make(chan error)
@@ -112,25 +122,49 @@ func TestPeerSetAddDuplicate(t *testing.T) {
}
// Now collect and tally the results
errsTally := make(map[error]int)
errsTally := make(map[string]int)
for i := 0; i < n; i++ {
err := <-errsChan
errsTally[err]++
switch err.(type) {
case ErrSwitchDuplicatePeerID:
errsTally["duplicateID"]++
default:
errsTally["other"]++
}
}
// Our next procedure is to ensure that only one addition
// succeeded and that the rest are each ErrSwitchDuplicatePeer.
wantErrCount, gotErrCount := n-1, errsTally[ErrSwitchDuplicatePeer]
wantErrCount, gotErrCount := n-1, errsTally["duplicateID"]
assert.Equal(t, wantErrCount, gotErrCount, "invalid ErrSwitchDuplicatePeer count")
wantNilErrCount, gotNilErrCount := 1, errsTally[nil]
wantNilErrCount, gotNilErrCount := 1, errsTally["other"]
assert.Equal(t, wantNilErrCount, gotNilErrCount, "invalid nil errCount")
}
func TestPeerSetAddDuplicateIP(t *testing.T) {
t.Parallel()
peerSet := NewPeerSet()
if err := peerSet.Add(randPeer(net.IP{172, 0, 0, 1})); err != nil {
t.Fatal(err)
}
// Add peer with same IP.
err := peerSet.Add(randPeer(net.IP{172, 0, 0, 1}))
assert.Equal(t, ErrSwitchDuplicatePeerIP{IP: net.IP{172, 0, 0, 1}}, err)
}
func TestPeerSetGet(t *testing.T) {
t.Parallel()
peerSet := NewPeerSet()
peer := randPeer()
var (
peerSet = NewPeerSet()
peer = randPeer(nil)
)
assert.Nil(t, peerSet.Get(peer.ID()), "expecting a nil lookup, before .Add")
if err := peerSet.Add(peer); err != nil {
@@ -144,8 +178,8 @@ func TestPeerSetGet(t *testing.T) {
wg.Add(1)
go func(i int) {
defer wg.Done()
got, want := peerSet.Get(peer.ID()), peer
assert.Equal(t, got, want, "#%d: got=%v want=%v", i, got, want)
have, want := peerSet.Get(peer.ID()), peer
assert.Equal(t, have, want, "%d: have %v, want %v", i, have, want)
}(i)
}
wg.Wait()

View File

@@ -11,6 +11,7 @@ import (
crypto "github.com/tendermint/go-crypto"
tmconn "github.com/tendermint/tendermint/p2p/conn"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
)
@@ -111,35 +112,44 @@ func createOutboundPeerAndPerformHandshake(addr *NetAddress, config *PeerConfig)
}
type remotePeer struct {
PrivKey crypto.PrivKey
Config *PeerConfig
addr *NetAddress
quit chan struct{}
PrivKey crypto.PrivKey
Config *PeerConfig
addr *NetAddress
quit chan struct{}
channels cmn.HexBytes
listenAddr string
}
func (p *remotePeer) Addr() *NetAddress {
return p.addr
func (rp *remotePeer) Addr() *NetAddress {
return rp.addr
}
func (p *remotePeer) ID() ID {
return PubKeyToID(p.PrivKey.PubKey())
func (rp *remotePeer) ID() ID {
return PubKeyToID(rp.PrivKey.PubKey())
}
func (p *remotePeer) Start() {
l, e := net.Listen("tcp", "127.0.0.1:0") // any available address
func (rp *remotePeer) Start() {
if rp.listenAddr == "" {
rp.listenAddr = "127.0.0.1:0"
}
l, e := net.Listen("tcp", rp.listenAddr) // any available address
if e != nil {
golog.Fatalf("net.Listen tcp :0: %+v", e)
}
p.addr = NewNetAddress(PubKeyToID(p.PrivKey.PubKey()), l.Addr())
p.quit = make(chan struct{})
go p.accept(l)
rp.addr = NewNetAddress(PubKeyToID(rp.PrivKey.PubKey()), l.Addr())
rp.quit = make(chan struct{})
if rp.channels == nil {
rp.channels = []byte{testCh}
}
go rp.accept(l)
}
func (p *remotePeer) Stop() {
close(p.quit)
func (rp *remotePeer) Stop() {
close(rp.quit)
}
func (p *remotePeer) accept(l net.Listener) {
func (rp *remotePeer) accept(l net.Listener) {
conns := []net.Conn{}
for {
@@ -147,17 +157,19 @@ func (p *remotePeer) accept(l net.Listener) {
if err != nil {
golog.Fatalf("Failed to accept conn: %+v", err)
}
pc, err := newInboundPeerConn(conn, p.Config, p.PrivKey)
pc, err := newInboundPeerConn(conn, rp.Config, rp.PrivKey)
if err != nil {
golog.Fatalf("Failed to create a peer: %+v", err)
}
_, err = pc.HandshakeTimeout(NodeInfo{
ID: p.Addr().ID,
ID: rp.Addr().ID,
Moniker: "remote_peer",
Network: "testing",
Version: "123.123.123",
ListenAddr: l.Addr().String(),
Channels: []byte{testCh},
Channels: rp.channels,
}, 1*time.Second)
if err != nil {
golog.Fatalf("Failed to perform handshake: %+v", err)
@@ -166,7 +178,7 @@ func (p *remotePeer) accept(l net.Listener) {
conns = append(conns, conn)
select {
case <-p.quit:
case <-rp.quit:
for _, conn := range conns {
if err := conn.Close(); err != nil {
golog.Fatal(err)

View File

@@ -7,7 +7,7 @@ import (
"sync"
"time"
"github.com/tendermint/go-amino"
amino "github.com/tendermint/go-amino"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tendermint/p2p"
@@ -281,6 +281,7 @@ func (r *PEXReactor) receiveRequest(src Peer) error {
// RequestAddrs asks peer for more addresses if we do not already
// have a request out for this peer.
func (r *PEXReactor) RequestAddrs(p Peer) {
r.Logger.Debug("Request addrs", "from", p)
id := string(p.ID())
if r.requestsSent.Has(id) {
return

View File

@@ -3,6 +3,7 @@ package pex
import (
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"testing"
@@ -58,59 +59,69 @@ func TestPEXReactorAddRemovePeer(t *testing.T) {
assert.Equal(t, size+1, book.Size())
}
func TestPEXReactorRunning(t *testing.T) {
N := 3
switches := make([]*p2p.Switch, N)
// --- FAIL: TestPEXReactorRunning (11.10s)
// pex_reactor_test.go:411: expected all switches to be connected to at
// least one peer (switches: 0 => {outbound: 1, inbound: 0}, 1 =>
// {outbound: 0, inbound: 1}, 2 => {outbound: 0, inbound: 0}, )
//
// EXPLANATION: peers are getting rejected because in switch#addPeer we check
// if any peer (who we already connected to) has the same IP. Even though local
// peers have different IP addresses, they all have the same underlying remote
// IP: 127.0.0.1.
//
// func TestPEXReactorRunning(t *testing.T) {
// N := 3
// switches := make([]*p2p.Switch, N)
// directory to store address books
dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(t, err)
defer os.RemoveAll(dir) // nolint: errcheck
// // directory to store address books
// dir, err := ioutil.TempDir("", "pex_reactor")
// require.Nil(t, err)
// defer os.RemoveAll(dir) // nolint: errcheck
books := make([]*addrBook, N)
logger := log.TestingLogger()
// books := make([]*addrBook, N)
// logger := log.TestingLogger()
// create switches
for i := 0; i < N; i++ {
switches[i] = p2p.MakeSwitch(config, i, "127.0.0.1", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch {
books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false)
books[i].SetLogger(logger.With("pex", i))
sw.SetAddrBook(books[i])
// // create switches
// for i := 0; i < N; i++ {
// switches[i] = p2p.MakeSwitch(config, i, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch {
// books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false)
// books[i].SetLogger(logger.With("pex", i))
// sw.SetAddrBook(books[i])
sw.SetLogger(logger.With("pex", i))
// sw.SetLogger(logger.With("pex", i))
r := NewPEXReactor(books[i], &PEXReactorConfig{})
r.SetLogger(logger.With("pex", i))
r.SetEnsurePeersPeriod(250 * time.Millisecond)
sw.AddReactor("pex", r)
// r := NewPEXReactor(books[i], &PEXReactorConfig{})
// r.SetLogger(logger.With("pex", i))
// r.SetEnsurePeersPeriod(250 * time.Millisecond)
// sw.AddReactor("pex", r)
return sw
})
}
// return sw
// })
// }
addOtherNodeAddrToAddrBook := func(switchIndex, otherSwitchIndex int) {
addr := switches[otherSwitchIndex].NodeInfo().NetAddress()
books[switchIndex].AddAddress(addr, addr)
}
// addOtherNodeAddrToAddrBook := func(switchIndex, otherSwitchIndex int) {
// addr := switches[otherSwitchIndex].NodeInfo().NetAddress()
// books[switchIndex].AddAddress(addr, addr)
// }
addOtherNodeAddrToAddrBook(0, 1)
addOtherNodeAddrToAddrBook(1, 0)
addOtherNodeAddrToAddrBook(2, 1)
// addOtherNodeAddrToAddrBook(0, 1)
// addOtherNodeAddrToAddrBook(1, 0)
// addOtherNodeAddrToAddrBook(2, 1)
for i, sw := range switches {
sw.AddListener(p2p.NewDefaultListener("tcp", sw.NodeInfo().ListenAddr, true, logger.With("pex", i)))
// for i, sw := range switches {
// sw.AddListener(p2p.NewDefaultListener("tcp", sw.NodeInfo().ListenAddr, true, logger.With("pex", i)))
err := sw.Start() // start switch and reactors
require.Nil(t, err)
}
// err := sw.Start() // start switch and reactors
// require.Nil(t, err)
// }
assertPeersWithTimeout(t, switches, 10*time.Millisecond, 10*time.Second, N-1)
// assertPeersWithTimeout(t, switches, 10*time.Millisecond, 10*time.Second, N-1)
// stop them
for _, s := range switches {
s.Stop()
}
}
// // stop them
// for _, s := range switches {
// s.Stop()
// }
// }
func TestPEXReactorReceive(t *testing.T) {
r, book := createReactor(&PEXReactorConfig{})
@@ -365,6 +376,7 @@ func (mp mockPeer) NodeInfo() p2p.NodeInfo {
ListenAddr: mp.addr.DialString(),
}
}
func (mp mockPeer) RemoteIP() net.IP { return net.ParseIP("127.0.0.1") }
func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} }
func (mp mockPeer) Send(byte, []byte) bool { return false }
func (mp mockPeer) TrySend(byte, []byte) bool { return false }

View File

@@ -403,8 +403,8 @@ func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent b
sw.randomSleep(0)
err := sw.DialPeerWithAddress(addr, persistent)
if err != nil {
switch err {
case ErrSwitchConnectToSelf, ErrSwitchDuplicatePeer:
switch err.(type) {
case ErrSwitchConnectToSelf, ErrSwitchDuplicatePeerID:
sw.Logger.Debug("Error dialing peer", "err", err)
default:
sw.Logger.Error("Error dialing peer", "err", err)
@@ -564,20 +564,22 @@ func (sw *Switch) addPeer(pc peerConn) error {
// Avoid self
if sw.nodeKey.ID() == peerID {
addr := peerNodeInfo.NetAddress()
// remove the given address from the address book if we added it earlier
// remove the given address from the address book
// and add to our addresses to avoid dialing again
sw.addrBook.RemoveAddress(addr)
// add the given address to the address book to avoid dialing ourselves
// again this is our public address
sw.addrBook.AddOurAddress(addr)
return ErrSwitchConnectToSelf
return ErrSwitchConnectToSelf{addr}
}
// Avoid duplicate
if sw.peers.Has(peerID) {
return ErrSwitchDuplicatePeer
return ErrSwitchDuplicatePeerID{peerID}
}
// Check for duplicate connection or peer info IP.
if sw.peers.HasIP(pc.RemoteIP()) ||
sw.peers.HasIP(peerNodeInfo.NetAddress().IP) {
return ErrSwitchDuplicatePeerIP{pc.RemoteIP()}
}
// Filter peer against ID white list

View File

@@ -193,7 +193,7 @@ func TestSwitchFiltersOutItself(t *testing.T) {
// addr should be rejected in addPeer based on the same ID
err := s1.DialPeerWithAddress(rp.Addr(), false)
if assert.Error(t, err) {
assert.Equal(t, ErrSwitchConnectToSelf, err)
assert.Equal(t, ErrSwitchConnectToSelf{rp.Addr()}.Error(), err.Error())
}
assert.True(t, s1.addrBook.OurAddress(rp.Addr()))
@@ -317,7 +317,13 @@ func TestSwitchReconnectsToPersistentPeer(t *testing.T) {
assert.False(peer.IsRunning())
// simulate another remote peer
rp = &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: DefaultPeerConfig()}
rp = &remotePeer{
PrivKey: crypto.GenPrivKeyEd25519(),
Config: DefaultPeerConfig(),
// Use different interface to prevent duplicate IP filter, this will break
// beyond two peers.
listenAddr: "127.0.0.2:0",
}
rp.Start()
defer rp.Stop()

View File

@@ -1,7 +1,9 @@
package p2p
import (
"fmt"
"net"
"sync/atomic"
crypto "github.com/tendermint/go-crypto"
cmn "github.com/tendermint/tmlibs/common"
@@ -80,7 +82,9 @@ func MakeConnectedSwitches(cfg *cfg.P2PConfig, n int, initSwitch func(int, *Swit
func Connect2Switches(switches []*Switch, i, j int) {
switchI := switches[i]
switchJ := switches[j]
c1, c2 := conn.NetPipe()
doneCh := make(chan struct{})
go func() {
err := switchI.addPeerWithConnection(c1)
@@ -128,6 +132,8 @@ func StartSwitches(switches []*Switch) error {
return nil
}
var listenAddrSuffix uint32 = 1
func MakeSwitch(cfg *cfg.P2PConfig, i int, network, version string, initSwitch func(int, *Switch) *Switch) *Switch {
// new switch, add reactors
// TODO: let the config be passed in?
@@ -142,7 +148,7 @@ func MakeSwitch(cfg *cfg.P2PConfig, i int, network, version string, initSwitch f
Moniker: cmn.Fmt("switch%d", i),
Network: network,
Version: version,
ListenAddr: cmn.Fmt("%v:%v", network, cmn.RandIntn(64512)+1023),
ListenAddr: fmt.Sprintf("127.0.0.%d:%d", atomic.AddUint32(&listenAddrSuffix, 1), cmn.RandIntn(64512)+1023),
}
for ch := range sw.reactorsByCh {
ni.Channels = append(ni.Channels, ch)

View File

@@ -11,7 +11,7 @@ import (
rpcclient "github.com/tendermint/tendermint/rpc/lib/client"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
tmpubsub "github.com/tendermint/tmlibs/pubsub"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
)
/*

View File

@@ -8,7 +8,7 @@ import (
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
tmpubsub "github.com/tendermint/tmlibs/pubsub"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
)
/*

View File

@@ -3,17 +3,16 @@
## Generate markdown for [Slate](https://github.com/tendermint/slate)
We are using [Slate](https://github.com/tendermint/slate) to power our RPC
documentation. If you are changing a comment, make sure to copy the resulting
changes to the slate repo and make a PR
[there](https://github.com/tendermint/slate) as well. For generating markdown
use:
documentation. For generating markdown use:
```shell
go get github.com/melekes/godoc2md
go get github.com/davecheney/godoc2md
godoc2md -template rpc/core/doc_template.txt github.com/tendermint/tendermint/rpc/core | grep -v -e "pipe.go" -e "routes.go" -e "dev.go" | sed 's$/src/target$https://github.com/tendermint/tendermint/tree/master/rpc/core$'
```
For more information see the [CI script for building the Slate docs](/scripts/slate.sh)
## Pagination
Requests that return multiple items will be paginated to 30 items by default.

View File

@@ -7,7 +7,7 @@ Tendermint supports the following RPC protocols:
* JSONRPC over HTTP
* JSONRPC over websockets
Tendermint RPC is built using [our own RPC library](https://github.com/tendermint/tendermint/tree/master/rpc/lib). Documentation and tests for that library could be found at `tendermint/rpc/lib` directory.
Tendermint RPC is built using [our own RPC library](https://github.com/tendermint/tendermint/tree/master/rpc/lib) which contains its own set of documentation and tests.
## Configuration

View File

@@ -5,10 +5,10 @@ import (
"github.com/pkg/errors"
tmquery "github.com/tendermint/tendermint/libs/pubsub/query"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
rpctypes "github.com/tendermint/tendermint/rpc/lib/types"
tmtypes "github.com/tendermint/tendermint/types"
tmquery "github.com/tendermint/tmlibs/pubsub/query"
)
// Subscribe for events via WebSocket.
@@ -46,10 +46,10 @@ import (
// https://godoc.org/github.com/tendermint/tendermint/types#pkg-constants
//
// For complete query syntax, check out
// https://godoc.org/github.com/tendermint/tmlibs/pubsub/query.
// https://godoc.org/github.com/tendermint/tendermint/libs/pubsub/query.
//
// ```go
// import "github.com/tendermint/tmlibs/pubsub/query"
// import "github.com/tendermint/tendermint/libs/pubsub/query"
// import "github.com/tendermint/tendermint/types"
//
// client := client.NewHTTP("tcp://0.0.0.0:46657", "/websocket")

View File

@@ -3,11 +3,12 @@ package core
import (
"fmt"
cmn "github.com/tendermint/tmlibs/common"
tmquery "github.com/tendermint/tendermint/libs/pubsub/query"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/state/txindex/null"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
tmquery "github.com/tendermint/tmlibs/pubsub/query"
)
// Tx allows you to query the transaction results. `nil` could mean the

View File

@@ -8,7 +8,7 @@ import (
"github.com/pkg/errors"
"github.com/tendermint/go-amino"
tmpubsub "github.com/tendermint/tmlibs/pubsub"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
)
//----------------------------------------

View File

@@ -1,13 +0,0 @@
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
debora run -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; killall tendermint"
debora run -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; tendermint unsafe_reset_priv_validator; rm -rf ~/.tendermint/data"
debora run -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; git pull origin develop; make"
debora run -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; mkdir -p ~/.tendermint/logs"
debora run --bg --label tendermint -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; tendermint node 2>&1 | stdinwriter -outpath ~/.tendermint/logs/tendermint.log"
printf "\n\nSleeping for a minute\n"
sleep 60
debora download tendermint "logs/async$1"
debora run -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; killall tendermint"

View File

@@ -1,11 +0,0 @@
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
debora run -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; killall tendermint; killall logjack"
debora run -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; tendermint unsafe_reset_priv_validator; rm -rf ~/.tendermint/data; rm ~/.tendermint/config/genesis.json; rm ~/.tendermint/logs/*"
debora run -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; git pull origin develop; make"
debora run -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; mkdir -p ~/.tendermint/logs"
debora run --bg --label tendermint -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; tendermint node 2>&1 | stdinwriter -outpath ~/.tendermint/logs/tendermint.log"
debora run --bg --label logjack -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; logjack -chopSize='10M' -limitSize='1G' ~/.tendermint/logs/tendermint.log"
printf "Done\n"

View File

@@ -1,10 +0,0 @@
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
printf "Starting group $1...\n"
sleep 3
debora --group "$1" run --bg --label tendermint -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; tendermint node 2>&1 | stdinwriter -outpath ~/.tendermint/logs/tendermint.log"
debora --group "$1" run --bg --label logjack -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; logjack -chopSize='10M' -limitSize='1G' ~/.tendermint/logs/tendermint.log"
printf "Done\n"

View File

@@ -1,9 +0,0 @@
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
printf "Stopping group $1...\n"
sleep 3
debora --group "$1" run -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; killall tendermint; killall logjack"
printf "Done\n"

View File

@@ -1,19 +0,0 @@
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
debora open "[::]:46661"
debora --group default.upgrade status
printf "\n\nShutting down barak default port...\n\n"
sleep 3
debora --group default.upgrade close "[::]:46660"
debora --group default.upgrade run -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; git pull origin develop; make"
debora --group default.upgrade run -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; mkdir -p ~/.barak/logs"
debora --group default.upgrade run --bg --label barak -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; barak --config=cmd/barak/seed 2>&1 | stdinwriter -outpath ~/.barak/logs/barak.log"
printf "\n\nTesting new barak...\n\n"
sleep 3
debora status
printf "\n\nShutting down old barak...\n\n"
sleep 3
debora --group default.upgrade quit
printf "Done!\n"

View File

@@ -1,9 +0,0 @@
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
printf "Upgrading group $1...\n"
sleep 3
debora --group "$1" run -- bash -c "cd \$GOPATH/src/github.com/tendermint/tendermint; git pull origin develop; make"
printf "Done\n"

77
scripts/slate.sh Normal file
View File

@@ -0,0 +1,77 @@
#!/usr/bin/env bash
set -euo pipefail
if [ "$CIRCLE_BRANCH" == "" ]; then
echo "this script is meant to be run on CircleCI, exiting"
echo 1
fi
# check for changes in the `rpc/core` directory
did_rpc_change=$(git diff --name-status $CIRCLE_BRANCH origin/master | grep rpc/core)
if [ "$did_rpc_change" == "" ]; then
echo "no changes detected in rpc/core, exiting"
exit 0
else
echo "changes detected in rpc/core, continuing"
fi
# only run this script on changes to rpc/core committed to develop
if [ "$CIRCLE_BRANCH" != "master" ]; then
echo "the branch being built isn't master, exiting"
exit 0
else
echo "on master, building the RPC docs"
fi
# godoc2md used to convert the go documentation from
# `rpc/core` into a markdown file consumed by Slate
go get github.com/davecheney/godoc2md
# slate works via forks, and we'll be committing to
# master branch, which will trigger our fork to run
# the `./deploy.sh` and publish via the `gh-pages` branch
slate_repo=github.com/tendermint/slate
slate_path="$GOPATH"/src/"$slate_repo"
if [ ! -d "$slate_path" ]; then
git clone https://"$slate_repo".git $slate_path
fi
# the main file we need to update if rpc/core changed
destination="$slate_path"/source/index.html.md
# we remove it then re-create it with the latest changes
rm $destination
header="---
title: RPC Reference
language_tabs:
- shell
- go
toc_footers:
- <a href='https://tendermint.com/'>Tendermint</a>
- <a href='https://github.com/lord/slate'>Documentation Powered by Slate</a>
search: true
---"
# write header to the main slate file
echo "$header" > "$destination"
# generate a markdown from the godoc comments, using a template
rpc_docs=$(godoc2md -template rpc/core/doc_template.txt github.com/tendermint/tendermint/rpc/core | grep -v -e "pipe.go" -e "routes.go" -e "dev.go" | sed 's$/src/target$https://github.com/tendermint/tendermint/tree/master/rpc/core$')
# append core RPC docs
echo "$rpc_docs" >> "$destination"
# commit the changes
cd $slate_path
git config --global user.email "github@tendermint.com"
git config --global user.name "tenderbot"
git commit -a -m "Update tendermint RPC docs via CircleCI"
git push -q https://${GITHUB_ACCESS_TOKEN}@github.com/tendermint/slate.git master

View File

@@ -74,7 +74,7 @@ func (blockExec *BlockExecutor) ApplyBlock(s State, blockID types.BlockID, block
return s, ErrInvalidBlock(err)
}
abciResponses, err := execBlockOnProxyApp(blockExec.logger, blockExec.proxyApp, block)
abciResponses, err := execBlockOnProxyApp(blockExec.logger, blockExec.proxyApp, block, s.Validators)
if err != nil {
return s, ErrProxyAppConn(err)
}
@@ -160,7 +160,7 @@ func (blockExec *BlockExecutor) Commit(block *types.Block) ([]byte, error) {
// Executes block's transactions on proxyAppConn.
// Returns a list of transaction results and updates to the validator set
func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, block *types.Block) (*ABCIResponses, error) {
func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, block *types.Block, vs *types.ValidatorSet) (*ABCIResponses, error) {
var validTxs, invalidTxs = 0, 0
txIndex := 0
@@ -187,10 +187,11 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus,
proxyAppConn.SetResponseCallback(proxyCb)
// determine which validators did not sign last block
absentVals := make([]int32, 0)
absentVals := make([][]byte, 0)
for valI, vote := range block.LastCommit.Precommits {
if vote == nil {
absentVals = append(absentVals, int32(valI))
_, val := vs.GetByIndex(valI)
absentVals = append(absentVals, val.PubKey.Bytes())
}
}
@@ -198,6 +199,7 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus,
byzantineVals := make([]abci.Evidence, len(block.Evidence.Evidence))
for i, ev := range block.Evidence.Evidence {
byzantineVals[i] = abci.Evidence{
Type: []byte(ev.String()),
PubKey: ev.Address(), // XXX
Height: ev.Height(),
}
@@ -359,8 +361,8 @@ func fireEvents(logger log.Logger, eventBus types.BlockEventPublisher, block *ty
// ExecCommitBlock executes and commits a block on the proxyApp without validating or mutating the state.
// It returns the application root hash (result of abci.Commit).
func ExecCommitBlock(appConnConsensus proxy.AppConnConsensus, block *types.Block, logger log.Logger) ([]byte, error) {
_, err := execBlockOnProxyApp(logger, appConnConsensus, block)
func ExecCommitBlock(appConnConsensus proxy.AppConnConsensus, block *types.Block, logger log.Logger, vs *types.ValidatorSet) ([]byte, error) {
_, err := execBlockOnProxyApp(logger, appConnConsensus, block, vs)
if err != nil {
logger.Error("Error executing block on proxy app", "height", block.Height, "err", err)
return nil, err

View File

@@ -18,7 +18,8 @@ import (
)
var (
privKey = crypto.GenPrivKeyEd25519FromSecret([]byte("execution_test"))
privKey = crypto.GenPrivKeyEd25519FromSecret([]byte("execution_test_1"))
privKey2 = crypto.GenPrivKeyEd25519FromSecret([]byte("execution_test_2"))
chainID = "execution_chain"
testPartSize = 65536
nTxsPerBlock = 10
@@ -64,18 +65,18 @@ func TestBeginBlockAbsentValidators(t *testing.T) {
testCases := []struct {
desc string
lastCommitPrecommits []*types.Vote
expectedAbsentValidators []int32
expectedAbsentValidators [][]byte
}{
{"none absent", []*types.Vote{{ValidatorIndex: 0, Timestamp: now, Type: types.VoteTypePrecommit}, {ValidatorIndex: 1, Timestamp: now}}, []int32{}},
{"one absent", []*types.Vote{{ValidatorIndex: 0, Timestamp: now, Type: types.VoteTypePrecommit}, nil}, []int32{1}},
{"multiple absent", []*types.Vote{nil, nil}, []int32{0, 1}},
{"none absent", []*types.Vote{{ValidatorIndex: 0, Timestamp: now, Type: types.VoteTypePrecommit}, {ValidatorIndex: 1, Timestamp: now}}, [][]byte{}},
{"one absent", []*types.Vote{{ValidatorIndex: 0, Timestamp: now, Type: types.VoteTypePrecommit}, nil}, [][]byte{privKey2.PubKey().Bytes()}},
{"multiple absent", []*types.Vote{nil, nil}, [][]byte{privKey.PubKey().Bytes(), privKey2.PubKey().Bytes()}},
}
for _, tc := range testCases {
lastCommit := &types.Commit{BlockID: prevBlockID, Precommits: tc.lastCommitPrecommits}
block, _ := state.MakeBlock(2, makeTxs(2), lastCommit)
_, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger())
_, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), state.Validators)
require.Nil(t, err, tc.desc)
// -> app must receive an index of the absent validator
@@ -109,10 +110,10 @@ func TestBeginBlockByzantineValidators(t *testing.T) {
expectedByzantineValidators []abci.Evidence
}{
{"none byzantine", []types.Evidence{}, []abci.Evidence{}},
{"one byzantine", []types.Evidence{ev1}, []abci.Evidence{{ev1.Address(), ev1.Height()}}},
{"one byzantine", []types.Evidence{ev1}, []abci.Evidence{{[]byte(ev1.String()), ev1.Address(), ev1.Height(), int64(0)}}},
{"multiple byzantine", []types.Evidence{ev1, ev2}, []abci.Evidence{
{ev1.Address(), ev1.Height()},
{ev2.Address(), ev2.Height()}}},
{[]byte(ev1.String()), ev1.Address(), ev1.Height(), int64(0)},
{[]byte(ev2.String()), ev2.Address(), ev2.Height(), int64(0)}}},
}
for _, tc := range testCases {
@@ -120,7 +121,7 @@ func TestBeginBlockByzantineValidators(t *testing.T) {
block, _ := state.MakeBlock(10, makeTxs(2), lastCommit)
block.Evidence.Evidence = tc.evidence
_, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger())
_, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), state.Validators)
require.Nil(t, err, tc.desc)
// -> app must receive an index of the byzantine validator
@@ -142,7 +143,8 @@ func state() State {
s, _ := MakeGenesisState(&types.GenesisDoc{
ChainID: chainID,
Validators: []types.GenesisValidator{
{privKey.PubKey(), 10000, "test"},
{privKey.PubKey(), 10000, "test1"},
{privKey2.PubKey(), 10000, "test2"},
},
AppHash: nil,
})
@@ -161,7 +163,7 @@ var _ abci.Application = (*testApp)(nil)
type testApp struct {
abci.BaseApplication
AbsentValidators []int32
AbsentValidators [][]byte
ByzantineValidators []abci.Evidence
}

View File

@@ -8,7 +8,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/abci/types"
"github.com/tendermint/go-crypto"
crypto "github.com/tendermint/go-crypto"
cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db"
@@ -121,7 +121,7 @@ func TestABCIResponsesSaveLoad2(t *testing.T) {
{Code: 383},
{Data: []byte("Gotcha!"),
Tags: []cmn.KVPair{
cmn.KVPair{[]byte("a"), []byte{1}},
cmn.KVPair{[]byte("a"), []byte("1")},
cmn.KVPair{[]byte("build"), []byte("stuff")},
}},
},

View File

@@ -4,7 +4,7 @@ import (
"errors"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tmlibs/pubsub/query"
"github.com/tendermint/tendermint/libs/pubsub/query"
)
// TxIndexer interface defines methods to index and search transactions.

View File

@@ -3,8 +3,9 @@ package txindex
import (
"context"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tendermint/types"
)
const (

View File

@@ -12,8 +12,8 @@ import (
"github.com/pkg/errors"
cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tmlibs/pubsub/query"
"github.com/tendermint/tendermint/libs/pubsub/query"
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/types"
)

View File

@@ -11,8 +11,8 @@ import (
abci "github.com/tendermint/abci/types"
cmn "github.com/tendermint/tmlibs/common"
db "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tmlibs/pubsub/query"
"github.com/tendermint/tendermint/libs/pubsub/query"
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/types"
)

View File

@@ -5,7 +5,7 @@ import (
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tmlibs/pubsub/query"
"github.com/tendermint/tendermint/libs/pubsub/query"
)
var _ txindex.TxIndexer = (*TxIndex)(nil)

View File

@@ -1,4 +1,4 @@
FROM golang:1.9.4
FROM golang:1.10
# Add testing deps for curl
RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list
@@ -29,7 +29,6 @@ RUN bash scripts/install_abci_apps.sh
# NOTE: this will overwrite whatever is in vendor/
COPY . $REPO
RUN go install ./cmd/tendermint
# expose the volume for debugging

View File

@@ -6,7 +6,7 @@ import (
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
tmpubsub "github.com/tendermint/tmlibs/pubsub"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
)
const defaultCapacity = 1000
@@ -67,7 +67,7 @@ func (b *EventBus) UnsubscribeAll(ctx context.Context, subscriber string) error
func (b *EventBus) Publish(eventType string, eventData TMEventData) error {
// no explicit deadline for publishing events
ctx := context.Background()
b.pubsub.PublishWithTags(ctx, eventData, tmpubsub.NewTagMap(map[string]interface{}{EventTypeKey: eventType}))
b.pubsub.PublishWithTags(ctx, eventData, tmpubsub.NewTagMap(map[string]string{EventTypeKey: eventType}))
return nil
}
@@ -92,7 +92,7 @@ func (b *EventBus) PublishEventTx(event EventDataTx) error {
// no explicit deadline for publishing events
ctx := context.Background()
tags := make(map[string]interface{})
tags := make(map[string]string)
// validate and fill tags from tx result
for _, tag := range event.Result.Tags {
@@ -112,7 +112,7 @@ func (b *EventBus) PublishEventTx(event EventDataTx) error {
tags[TxHashKey] = fmt.Sprintf("%X", event.Tx.Hash())
logIfTagExists(TxHeightKey, tags, b.Logger)
tags[TxHeightKey] = event.Height
tags[TxHeightKey] = fmt.Sprintf("%d", event.Height)
b.pubsub.PublishWithTags(ctx, event, tmpubsub.NewTagMap(tags))
return nil
@@ -160,7 +160,7 @@ func (b *EventBus) PublishEventLock(event EventDataRoundState) error {
return b.Publish(EventLock, event)
}
func logIfTagExists(tag string, tags map[string]interface{}, logger log.Logger) {
func logIfTagExists(tag string, tags map[string]string, logger log.Logger) {
if value, ok := tags[tag]; ok {
logger.Error("Found predefined tag (value will be overwritten)", "tag", tag, "value", value)
}

View File

@@ -12,8 +12,8 @@ import (
abci "github.com/tendermint/abci/types"
cmn "github.com/tendermint/tmlibs/common"
tmpubsub "github.com/tendermint/tmlibs/pubsub"
tmquery "github.com/tendermint/tmlibs/pubsub/query"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
tmquery "github.com/tendermint/tendermint/libs/pubsub/query"
)
func TestEventBusPublishEventTx(t *testing.T) {
@@ -23,12 +23,12 @@ func TestEventBusPublishEventTx(t *testing.T) {
defer eventBus.Stop()
tx := Tx("foo")
result := abci.ResponseDeliverTx{Data: []byte("bar"), Tags: []cmn.KVPair{}, Fee: cmn.KI64Pair{Key: []uint8{}, Value: 0}}
result := abci.ResponseDeliverTx{Data: []byte("bar"), Tags: []cmn.KVPair{{[]byte("baz"), []byte("1")}}, Fee: cmn.KI64Pair{Key: []uint8{}, Value: 0}}
txEventsCh := make(chan interface{})
// PublishEventTx adds all these 3 tags, so the query below should work
query := fmt.Sprintf("tm.event='Tx' AND tx.height=1 AND tx.hash='%X'", tx.Hash())
query := fmt.Sprintf("tm.event='Tx' AND tx.height=1 AND tx.hash='%X' AND baz=1", tx.Hash())
err = eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query), txEventsCh)
require.NoError(t, err)

View File

@@ -3,9 +3,9 @@ package types
import (
"fmt"
"github.com/tendermint/go-amino"
tmpubsub "github.com/tendermint/tmlibs/pubsub"
tmquery "github.com/tendermint/tmlibs/pubsub/query"
amino "github.com/tendermint/go-amino"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
tmquery "github.com/tendermint/tendermint/libs/pubsub/query"
)
// Reserved event types

View File

@@ -9,6 +9,11 @@ import (
"github.com/tendermint/tmlibs/merkle"
)
const (
// Evidence type for duplicate vote
DUPLICATE_VOTE = "DUPLICATE_VOTE"
)
// ErrEvidenceInvalid wraps a piece of evidence and the error denoting how or why it is invalid.
type ErrEvidenceInvalid struct {
Evidence Evidence
@@ -35,7 +40,7 @@ type Evidence interface {
Verify(chainID string) error // verify the evidence
Equal(Evidence) bool // check equality of evidence
String() string
String() string // used as type in abci.Evidence
}
func RegisterEvidences(cdc *amino.Codec) {
@@ -54,8 +59,7 @@ type DuplicateVoteEvidence struct {
// String returns a string representation of the evidence.
func (dve *DuplicateVoteEvidence) String() string {
return fmt.Sprintf("VoteA: %v; VoteB: %v", dve.VoteA, dve.VoteB)
return DUPLICATE_VOTE
}
// Height returns the height this evidence refers to.

View File

@@ -3,7 +3,7 @@ package types
import (
"context"
tmpubsub "github.com/tendermint/tmlibs/pubsub"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
)
type NopEventBus struct{}

View File

@@ -59,13 +59,14 @@ func (valSet *ValidatorSet) IncrementAccum(times int) {
// Decrement the validator with most accum times times
for i := 0; i < times; i++ {
mostest := validatorsHeap.Peek().(*Validator)
if i == times-1 {
valSet.Proposer = mostest
}
// mind underflow
mostest.Accum = safeSubClip(mostest.Accum, valSet.TotalVotingPower())
validatorsHeap.Update(mostest, accumComparable{mostest})
if i == times-1 {
valSet.Proposer = mostest
} else {
validatorsHeap.Update(mostest, accumComparable{mostest})
}
}
}

View File

@@ -4,13 +4,13 @@ package version
const (
Maj = "0"
Min = "19"
Fix = "5"
Fix = "6"
)
var (
// Version is the current version of Tendermint
// Must be a string because scripts like dist.sh read this file.
Version = "0.19.5"
Version = "0.19.6-dev"
// GitCommit is the current HEAD set using ldflags.
GitCommit string