mirror of
https://github.com/fluencelabs/tendermint
synced 2025-07-24 16:51:58 +00:00
Compare commits
59 Commits
v0.19.6-rc
...
v0.20.0-rc
Author | SHA1 | Date | |
---|---|---|---|
|
cb35c6378f | ||
|
916b120364 | ||
|
b043888ad4 | ||
|
7c9fbadd97 | ||
|
f34d1009c4 | ||
|
0e3dc32b3d | ||
|
d292fa4541 | ||
|
4e51e4775c | ||
|
8a49364b5e | ||
|
3255c076e5 | ||
|
85bd74bb7a | ||
|
1a6040b99b | ||
|
174d0cab7d | ||
|
685366525a | ||
|
bc3abcdf91 | ||
|
cebf710c5a | ||
|
46de7a107b | ||
|
2a3e466b0c | ||
|
8c2085430d | ||
|
adf74a6d87 | ||
|
cdd01cd375 | ||
|
178e357d7f | ||
|
683b527534 | ||
|
d584e03427 | ||
|
d454b1b25f | ||
|
432f21452d | ||
|
f0ce8b3883 | ||
|
3da5198631 | ||
|
252a0a392b | ||
|
f7106bfb39 | ||
|
2a517ac98c | ||
|
b542dce2e1 | ||
|
82ded582f2 | ||
|
e0d4fe2dba | ||
|
83c6f2864d | ||
|
33ec8cb609 | ||
|
43a652bcbf | ||
|
094a40084d | ||
|
eec9f142b5 | ||
|
5796e879b9 | ||
|
846ca5ce56 | ||
|
fd6021876b | ||
|
2763c8539c | ||
|
1c0bfe1158 | ||
|
1e87ef7f75 | ||
|
c8be091d4a | ||
|
ec34c8f9d2 | ||
|
6004587347 | ||
|
f55725ebfa | ||
|
7f20eb5f8e | ||
|
eeabb4c06b | ||
|
4da81aa0b7 | ||
|
67068a34f2 | ||
|
2a0e9f93ce | ||
|
708f35e5c1 | ||
|
f3f5c7f472 | ||
|
68f6226bea | ||
|
118b86b1ef | ||
|
b9afcbe3a2 |
@@ -77,6 +77,22 @@ jobs:
|
||||
paths:
|
||||
- "bin/abci*"
|
||||
|
||||
build_slate:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v1-pkg-cache
|
||||
- restore_cache:
|
||||
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- run:
|
||||
name: slate docs
|
||||
command: |
|
||||
set -ex
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make build-slate
|
||||
|
||||
lint:
|
||||
<<: *defaults
|
||||
steps:
|
||||
@@ -117,18 +133,21 @@ jobs:
|
||||
key: v1-pkg-cache
|
||||
- restore_cache:
|
||||
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- run: mkdir -p /tmp/logs
|
||||
- run:
|
||||
name: Run tests
|
||||
command: |
|
||||
for pkg in $(go list github.com/tendermint/tendermint/... | grep -v /vendor/ | circleci tests split --split-by=timings); do
|
||||
id=$(basename "$pkg")
|
||||
|
||||
go test -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg"
|
||||
GOCACHE=off go test -v -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log"
|
||||
done
|
||||
- persist_to_workspace:
|
||||
root: /tmp/workspace
|
||||
paths:
|
||||
- "profiles/*"
|
||||
- store_artifacts:
|
||||
path: /tmp/logs
|
||||
|
||||
test_persistence:
|
||||
<<: *defaults
|
||||
@@ -180,6 +199,9 @@ workflows:
|
||||
test-suite:
|
||||
jobs:
|
||||
- setup_dependencies
|
||||
- build_slate:
|
||||
requires:
|
||||
- setup_dependencies
|
||||
- setup_abci:
|
||||
requires:
|
||||
- setup_dependencies
|
||||
|
27
CHANGELOG.md
27
CHANGELOG.md
@@ -1,13 +1,36 @@
|
||||
# Changelog
|
||||
|
||||
## 0.19.6
|
||||
- [p2p] remove `auth_enc` config option, peer connections are always auth
|
||||
encrypted
|
||||
|
||||
*TBD*
|
||||
## 0.19.7
|
||||
|
||||
BREAKING:
|
||||
|
||||
- [libs/pubsub] TagMap#Get returns a string value
|
||||
- [libs/pubsub] NewTagMap accepts a map of strings
|
||||
|
||||
FEATURES
|
||||
|
||||
- [rpc] the RPC documentation is now published to https://tendermint.github.io/slate
|
||||
- [p2p] AllowDuplicateIP config option to refuse connections from same IP.
|
||||
- true by default for now, false by default in next breaking release
|
||||
- [docs] Add docs for query, tx indexing, events, pubsub
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
- [consensus] consensus reactor now receives events from a separate event bus,
|
||||
which is not dependant on external RPC load
|
||||
- [consensus/wal] do not look for height in older files if we've seen height - 1
|
||||
- [docs] Various cleanup and link fixes
|
||||
|
||||
## 0.19.6
|
||||
|
||||
*May 29th, 2018*
|
||||
|
||||
BUG FIXES
|
||||
|
||||
- [blockchain] Fix fast-sync deadlock during high peer turnover
|
||||
|
||||
## 0.19.5
|
||||
|
||||
|
42
Gopkg.lock
generated
42
Gopkg.lock
generated
@@ -5,7 +5,7 @@
|
||||
branch = "master"
|
||||
name = "github.com/btcsuite/btcd"
|
||||
packages = ["btcec"]
|
||||
revision = "675abc5df3c5531bc741b56a765e35623459da6d"
|
||||
revision = "86fed781132ac890ee03e906e4ecd5d6fa180c64"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
@@ -82,7 +82,7 @@
|
||||
branch = "master"
|
||||
name = "github.com/golang/snappy"
|
||||
packages = ["."]
|
||||
revision = "553a641470496b2327abcac10b36396bd98e45c9"
|
||||
revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gorilla/websocket"
|
||||
@@ -128,14 +128,14 @@
|
||||
[[projects]]
|
||||
name = "github.com/magiconair/properties"
|
||||
packages = ["."]
|
||||
revision = "c3beff4c2358b44d0493c7dda585e7db7ff28ae6"
|
||||
version = "v1.7.6"
|
||||
revision = "c2353362d570a7bfa228149c62842019201cfb71"
|
||||
version = "v1.8.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/mapstructure"
|
||||
packages = ["."]
|
||||
revision = "00c29f56e2386353d58c599509e8dc3801b0d716"
|
||||
revision = "bb74f1db0675b241733089d5a1faa5dd8b0ef57b"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pelletier/go-toml"
|
||||
@@ -159,7 +159,7 @@
|
||||
branch = "master"
|
||||
name = "github.com/rcrowley/go-metrics"
|
||||
packages = ["."]
|
||||
revision = "d932a24a8ccb8fcadc993e5c6c58f93dac168294"
|
||||
revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/afero"
|
||||
@@ -179,8 +179,8 @@
|
||||
[[projects]]
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = ["."]
|
||||
revision = "a1f051bc3eba734da4772d60e2d677f47cf93ef4"
|
||||
version = "v0.0.2"
|
||||
revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
|
||||
version = "v0.0.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -226,7 +226,7 @@
|
||||
"leveldb/table",
|
||||
"leveldb/util"
|
||||
]
|
||||
revision = "714f901b98fdb3aa954b4193d8cbd64a28d80cad"
|
||||
revision = "5d6fca44a948d2be89a9702de7717f0168403d3d"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/tendermint/abci"
|
||||
@@ -238,8 +238,8 @@
|
||||
"server",
|
||||
"types"
|
||||
]
|
||||
revision = "78a8905690ef54f9d57e3b2b0ee7ad3a04ef3f1f"
|
||||
version = "v0.10.3"
|
||||
revision = "9af8b7a7c87478869f8c280ed9539470b8f470b4"
|
||||
version = "v0.11.0-rc4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -263,12 +263,6 @@
|
||||
revision = "915416979bf70efa4bcbf1c6cd5d64c5fff9fc19"
|
||||
version = "v0.6.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/tendermint/go-wire"
|
||||
packages = ["."]
|
||||
revision = "fa721242b042ecd4c6ed1a934ee740db4f74e45c"
|
||||
version = "v0.7.3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/tendermint/tmlibs"
|
||||
packages = [
|
||||
@@ -283,8 +277,8 @@
|
||||
"merkle",
|
||||
"test"
|
||||
]
|
||||
revision = "cc5f287c4798ffe88c04d02df219ecb6932080fd"
|
||||
version = "v0.8.3-rc0"
|
||||
revision = "d970af87248a4e162590300dbb74e102183a417d"
|
||||
version = "v0.8.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -299,7 +293,7 @@
|
||||
"ripemd160",
|
||||
"salsa20/salsa"
|
||||
]
|
||||
revision = "b0697eccbea9adec5b7ba8008f4c33d98d733388"
|
||||
revision = "5ba7f63082460102a45837dbd1827e10f9479ac0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -311,16 +305,15 @@
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"internal/timeseries",
|
||||
"lex/httplex",
|
||||
"trace"
|
||||
]
|
||||
revision = "5f9ae10d9af5b1c89ae6904293b14b064d4ada23"
|
||||
revision = "1e491301e022f8f977054da4c2d852decd59571f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix"]
|
||||
revision = "bb9c189858d91f42db229b04d45a4c3d23a7662a"
|
||||
revision = "c11f84a56e43e20a78cee75a7c034031ecf57d1f"
|
||||
|
||||
[[projects]]
|
||||
name = "golang.org/x/text"
|
||||
@@ -344,7 +337,6 @@
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200"
|
||||
@@ -382,6 +374,6 @@
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "d85c98dcac32cc1fe05d006aa75e8985f6447a150a041b972a673a65e7681da9"
|
||||
inputs-digest = "8df4c3cc6f6e61bbe6971c4593d838ec36103667376efd7799b6edf4021adf31"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
10
Gopkg.toml
10
Gopkg.toml
@@ -71,7 +71,7 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tendermint/abci"
|
||||
version = "~0.10.3"
|
||||
version = "~0.11.0-rc4"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tendermint/go-crypto"
|
||||
@@ -79,7 +79,7 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tendermint/go-amino"
|
||||
version = "0.9.9"
|
||||
version = "=0.9.9"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/tendermint/tmlibs"
|
||||
@@ -89,6 +89,12 @@
|
||||
name = "google.golang.org/grpc"
|
||||
version = "~1.7.3"
|
||||
|
||||
# this got updated and broke, so locked to an old working commit ...
|
||||
[[override]]
|
||||
name = "google.golang.org/genproto"
|
||||
revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200"
|
||||
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
|
7
Makefile
7
Makefile
@@ -226,8 +226,11 @@ sentry-stop:
|
||||
@if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi
|
||||
cd networks/remote/terraform && terraform destroy -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_KEY_FILE="$(HOME)/.ssh/id_rsa.pub"
|
||||
|
||||
# meant for the CI, inspect script & adapt accordingly
|
||||
build-slate:
|
||||
bash scripts/slate.sh
|
||||
|
||||
# To avoid unintended conflicts with file names, always add to .PHONY
|
||||
# unless there is a reason not to.
|
||||
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
|
||||
.PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop
|
||||
|
||||
.PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate
|
||||
|
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
@@ -66,11 +67,13 @@ type BlockPool struct {
|
||||
// block requests
|
||||
requesters map[int64]*bpRequester
|
||||
height int64 // the lowest key in requesters.
|
||||
numPending int32 // number of requests pending assignment or block response
|
||||
// peers
|
||||
peers map[p2p.ID]*bpPeer
|
||||
maxPeerHeight int64
|
||||
|
||||
// atomic
|
||||
numPending int32 // number of requests pending assignment or block response
|
||||
|
||||
requestsCh chan<- BlockRequest
|
||||
errorsCh chan<- peerError
|
||||
}
|
||||
@@ -151,7 +154,7 @@ func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequester
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
return pool.height, pool.numPending, len(pool.requesters)
|
||||
return pool.height, atomic.LoadInt32(&pool.numPending), len(pool.requesters)
|
||||
}
|
||||
|
||||
// TODO: relax conditions, prevent abuse.
|
||||
@@ -245,7 +248,7 @@ func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int
|
||||
}
|
||||
|
||||
if requester.setBlock(block, peerID) {
|
||||
pool.numPending--
|
||||
atomic.AddInt32(&pool.numPending, -1)
|
||||
peer := pool.peers[peerID]
|
||||
if peer != nil {
|
||||
peer.decrPending(blockSize)
|
||||
@@ -291,10 +294,7 @@ func (pool *BlockPool) RemovePeer(peerID p2p.ID) {
|
||||
func (pool *BlockPool) removePeer(peerID p2p.ID) {
|
||||
for _, requester := range pool.requesters {
|
||||
if requester.getPeerID() == peerID {
|
||||
if requester.getBlock() != nil {
|
||||
pool.numPending++
|
||||
}
|
||||
go requester.redo() // pick another peer and ...
|
||||
requester.redo()
|
||||
}
|
||||
}
|
||||
delete(pool.peers, peerID)
|
||||
@@ -332,7 +332,7 @@ func (pool *BlockPool) makeNextRequester() {
|
||||
// request.SetLogger(pool.Logger.With("height", nextHeight))
|
||||
|
||||
pool.requesters[nextHeight] = request
|
||||
pool.numPending++
|
||||
atomic.AddInt32(&pool.numPending, 1)
|
||||
|
||||
err := request.Start()
|
||||
if err != nil {
|
||||
@@ -360,7 +360,7 @@ func (pool *BlockPool) sendError(err error, peerID p2p.ID) {
|
||||
|
||||
// unused by tendermint; left for debugging purposes
|
||||
func (pool *BlockPool) debug() string {
|
||||
pool.mtx.Lock() // Lock
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
str := ""
|
||||
@@ -466,8 +466,8 @@ func newBPRequester(pool *BlockPool, height int64) *bpRequester {
|
||||
bpr := &bpRequester{
|
||||
pool: pool,
|
||||
height: height,
|
||||
gotBlockCh: make(chan struct{}),
|
||||
redoCh: make(chan struct{}),
|
||||
gotBlockCh: make(chan struct{}, 1),
|
||||
redoCh: make(chan struct{}, 1),
|
||||
|
||||
peerID: "",
|
||||
block: nil,
|
||||
@@ -481,7 +481,7 @@ func (bpr *bpRequester) OnStart() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns true if the peer matches
|
||||
// Returns true if the peer matches and block doesn't already exist.
|
||||
func (bpr *bpRequester) setBlock(block *types.Block, peerID p2p.ID) bool {
|
||||
bpr.mtx.Lock()
|
||||
if bpr.block != nil || bpr.peerID != peerID {
|
||||
@@ -491,7 +491,10 @@ func (bpr *bpRequester) setBlock(block *types.Block, peerID p2p.ID) bool {
|
||||
bpr.block = block
|
||||
bpr.mtx.Unlock()
|
||||
|
||||
bpr.gotBlockCh <- struct{}{}
|
||||
select {
|
||||
case bpr.gotBlockCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -507,17 +510,27 @@ func (bpr *bpRequester) getPeerID() p2p.ID {
|
||||
return bpr.peerID
|
||||
}
|
||||
|
||||
// This is called from the requestRoutine, upon redo().
|
||||
func (bpr *bpRequester) reset() {
|
||||
bpr.mtx.Lock()
|
||||
defer bpr.mtx.Unlock()
|
||||
|
||||
if bpr.block != nil {
|
||||
atomic.AddInt32(&bpr.pool.numPending, 1)
|
||||
}
|
||||
|
||||
bpr.peerID = ""
|
||||
bpr.block = nil
|
||||
bpr.mtx.Unlock()
|
||||
}
|
||||
|
||||
// Tells bpRequester to pick another peer and try again.
|
||||
// NOTE: blocking
|
||||
// NOTE: Nonblocking, and does nothing if another redo
|
||||
// was already requested.
|
||||
func (bpr *bpRequester) redo() {
|
||||
bpr.redoCh <- struct{}{}
|
||||
select {
|
||||
case bpr.redoCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Responsible for making more requests as necessary
|
||||
@@ -546,17 +559,8 @@ OUTER_LOOP:
|
||||
|
||||
// Send request and wait.
|
||||
bpr.pool.sendRequest(bpr.height, peer.id)
|
||||
select {
|
||||
case <-bpr.pool.Quit():
|
||||
bpr.Stop()
|
||||
return
|
||||
case <-bpr.Quit():
|
||||
return
|
||||
case <-bpr.redoCh:
|
||||
bpr.reset()
|
||||
continue OUTER_LOOP // When peer is removed
|
||||
case <-bpr.gotBlockCh:
|
||||
// We got the block, now see if it's good.
|
||||
WAIT_LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-bpr.pool.Quit():
|
||||
bpr.Stop()
|
||||
@@ -566,6 +570,10 @@ OUTER_LOOP:
|
||||
case <-bpr.redoCh:
|
||||
bpr.reset()
|
||||
continue OUTER_LOOP
|
||||
case <-bpr.gotBlockCh:
|
||||
// We got a block!
|
||||
// Continue the for-loop and wait til Quit.
|
||||
continue WAIT_LOOP
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -287,11 +287,11 @@ type P2PConfig struct {
|
||||
// Does not work if the peer-exchange reactor is disabled.
|
||||
SeedMode bool `mapstructure:"seed_mode"`
|
||||
|
||||
// Authenticated encryption
|
||||
AuthEnc bool `mapstructure:"auth_enc"`
|
||||
|
||||
// Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||
PrivatePeerIDs string `mapstructure:"private_peer_ids"`
|
||||
|
||||
// Toggle to disable guard against peers connecting from the same ip.
|
||||
AllowDuplicateIP bool `mapstructure:"allow_duplicate_ip"`
|
||||
}
|
||||
|
||||
// DefaultP2PConfig returns a default configuration for the peer-to-peer layer
|
||||
@@ -307,7 +307,7 @@ func DefaultP2PConfig() *P2PConfig {
|
||||
RecvRate: 512000, // 500 kB/s
|
||||
PexReactor: true,
|
||||
SeedMode: false,
|
||||
AuthEnc: true,
|
||||
AllowDuplicateIP: true, // so non-breaking yet
|
||||
}
|
||||
}
|
||||
|
||||
@@ -317,6 +317,7 @@ func TestP2PConfig() *P2PConfig {
|
||||
cfg.ListenAddress = "tcp://0.0.0.0:36656"
|
||||
cfg.SkipUPNP = true
|
||||
cfg.FlushThrottleTimeout = 10
|
||||
cfg.AllowDuplicateIP = true
|
||||
return cfg
|
||||
}
|
||||
|
||||
|
@@ -165,9 +165,6 @@ pex = {{ .P2P.PexReactor }}
|
||||
# Does not work if the peer-exchange reactor is disabled.
|
||||
seed_mode = {{ .P2P.SeedMode }}
|
||||
|
||||
# Authenticated encryption
|
||||
auth_enc = {{ .P2P.AuthEnc }}
|
||||
|
||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||
private_peer_ids = "{{ .P2P.PrivatePeerIDs }}"
|
||||
|
||||
|
@@ -58,14 +58,11 @@ func TestByzantine(t *testing.T) {
|
||||
css[i].doPrevote = func(height int64, round int) {}
|
||||
}
|
||||
|
||||
eventBus := types.NewEventBus()
|
||||
eventBus := css[i].eventBus
|
||||
eventBus.SetLogger(logger.With("module", "events", "validator", i))
|
||||
err := eventBus.Start()
|
||||
require.NoError(t, err)
|
||||
defer eventBus.Stop()
|
||||
|
||||
eventChans[i] = make(chan interface{}, 1)
|
||||
err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, eventChans[i])
|
||||
err := eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, eventChans[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
conR := NewConsensusReactor(css[i], true) // so we dont start the consensus states
|
||||
|
@@ -254,7 +254,8 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
logger.Debug("---------------------------- Testing changing the voting power of one validator a few times")
|
||||
|
||||
val1PubKey := css[0].privValidator.GetPubKey()
|
||||
updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKey.Bytes(), 25)
|
||||
val1PubKeyABCI := types.TM2PB.PubKey(val1PubKey)
|
||||
updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25)
|
||||
previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
@@ -266,7 +267,7 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
|
||||
}
|
||||
|
||||
updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKey.Bytes(), 2)
|
||||
updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2)
|
||||
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
@@ -278,7 +279,7 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
|
||||
}
|
||||
|
||||
updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKey.Bytes(), 26)
|
||||
updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26)
|
||||
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
@@ -316,7 +317,8 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
logger.Info("---------------------------- Testing adding one validator")
|
||||
|
||||
newValidatorPubKey1 := css[nVals].privValidator.GetPubKey()
|
||||
newValidatorTx1 := kvstore.MakeValSetChangeTx(newValidatorPubKey1.Bytes(), testMinPower)
|
||||
valPubKey1ABCI := types.TM2PB.PubKey(newValidatorPubKey1)
|
||||
newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower)
|
||||
|
||||
// wait till everyone makes block 2
|
||||
// ensure the commit includes all validators
|
||||
@@ -342,7 +344,8 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
logger.Info("---------------------------- Testing changing the voting power of one validator")
|
||||
|
||||
updateValidatorPubKey1 := css[nVals].privValidator.GetPubKey()
|
||||
updateValidatorTx1 := kvstore.MakeValSetChangeTx(updateValidatorPubKey1.Bytes(), 25)
|
||||
updatePubKey1ABCI := types.TM2PB.PubKey(updateValidatorPubKey1)
|
||||
updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25)
|
||||
previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, updateValidatorTx1)
|
||||
@@ -358,10 +361,12 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
logger.Info("---------------------------- Testing adding two validators at once")
|
||||
|
||||
newValidatorPubKey2 := css[nVals+1].privValidator.GetPubKey()
|
||||
newValidatorTx2 := kvstore.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), testMinPower)
|
||||
newVal2ABCI := types.TM2PB.PubKey(newValidatorPubKey2)
|
||||
newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower)
|
||||
|
||||
newValidatorPubKey3 := css[nVals+2].privValidator.GetPubKey()
|
||||
newValidatorTx3 := kvstore.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), testMinPower)
|
||||
newVal3ABCI := types.TM2PB.PubKey(newValidatorPubKey3)
|
||||
newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower)
|
||||
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3)
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3)
|
||||
@@ -373,8 +378,8 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
//---------------------------------------------------------------------------
|
||||
logger.Info("---------------------------- Testing removing two validators at once")
|
||||
|
||||
removeValidatorTx2 := kvstore.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), 0)
|
||||
removeValidatorTx3 := kvstore.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), 0)
|
||||
removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0)
|
||||
removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0)
|
||||
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, removeValidatorTx2, removeValidatorTx3)
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, removeValidatorTx2, removeValidatorTx3)
|
||||
|
@@ -2,7 +2,6 @@ package consensus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
@@ -197,20 +196,20 @@ type Handshaker struct {
|
||||
stateDB dbm.DB
|
||||
initialState sm.State
|
||||
store types.BlockStore
|
||||
appState json.RawMessage
|
||||
genDoc *types.GenesisDoc
|
||||
logger log.Logger
|
||||
|
||||
nBlocks int // number of blocks applied to the state
|
||||
}
|
||||
|
||||
func NewHandshaker(stateDB dbm.DB, state sm.State,
|
||||
store types.BlockStore, appState json.RawMessage) *Handshaker {
|
||||
store types.BlockStore, genDoc *types.GenesisDoc) *Handshaker {
|
||||
|
||||
return &Handshaker{
|
||||
stateDB: stateDB,
|
||||
initialState: state,
|
||||
store: store,
|
||||
appState: appState,
|
||||
genDoc: genDoc,
|
||||
logger: log.NewNopLogger(),
|
||||
nBlocks: 0,
|
||||
}
|
||||
@@ -269,8 +268,11 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
|
||||
if appBlockHeight == 0 {
|
||||
validators := types.TM2PB.Validators(state.Validators)
|
||||
req := abci.RequestInitChain{
|
||||
Validators: validators,
|
||||
AppStateBytes: h.appState,
|
||||
Time: h.genDoc.GenesisTime.Unix(), // TODO
|
||||
ChainId: h.genDoc.ChainID,
|
||||
ConsensusParams: types.TM2PB.ConsensusParams(h.genDoc.ConsensusParams),
|
||||
Validators: validators,
|
||||
AppStateBytes: h.genDoc.AppStateJSON,
|
||||
}
|
||||
_, err := proxyApp.Consensus().InitChainSync(req)
|
||||
if err != nil {
|
||||
@@ -365,7 +367,7 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl
|
||||
for i := appBlockHeight + 1; i <= finalBlock; i++ {
|
||||
h.logger.Info("Applying block", "height", i)
|
||||
block := h.store.LoadBlock(i)
|
||||
appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger)
|
||||
appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, state.LastValidators, h.stateDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@@ -299,7 +299,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
|
||||
// Create proxyAppConn connection (consensus, mempool, query)
|
||||
clientCreator := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir())
|
||||
proxyApp := proxy.NewAppConns(clientCreator,
|
||||
NewHandshaker(stateDB, state, blockStore, gdoc.AppState()))
|
||||
NewHandshaker(stateDB, state, blockStore, gdoc))
|
||||
err = proxyApp.Start()
|
||||
if err != nil {
|
||||
cmn.Exit(cmn.Fmt("Error starting proxy app conns: %v", err))
|
||||
|
@@ -366,7 +366,8 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
|
||||
}
|
||||
|
||||
// now start the app using the handshake - it should sync
|
||||
handshaker := NewHandshaker(stateDB, state, store, nil)
|
||||
genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile())
|
||||
handshaker := NewHandshaker(stateDB, state, store, genDoc)
|
||||
proxyApp := proxy.NewAppConns(clientCreator2, handshaker)
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
t.Fatalf("Error starting proxy app connections: %v", err)
|
||||
@@ -416,10 +417,10 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB,
|
||||
}
|
||||
defer proxyApp.Stop()
|
||||
|
||||
// TODO: get the genesis bytes (https://github.com/tendermint/tendermint/issues/1224)
|
||||
var genesisBytes []byte
|
||||
validators := types.TM2PB.Validators(state.Validators)
|
||||
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators, genesisBytes}); err != nil {
|
||||
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
|
||||
Validators: validators,
|
||||
}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -453,10 +454,10 @@ func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, c
|
||||
}
|
||||
defer proxyApp.Stop()
|
||||
|
||||
// TODO: get the genesis bytes (https://github.com/tendermint/tendermint/issues/1224)
|
||||
var genesisBytes []byte
|
||||
validators := types.TM2PB.Validators(state.Validators)
|
||||
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators, genesisBytes}); err != nil {
|
||||
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
|
||||
Validators: validators,
|
||||
}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
|
@@ -111,7 +111,7 @@ func (wal *baseWAL) OnStop() {
|
||||
}
|
||||
|
||||
// Write is called in newStep and for each receive on the
|
||||
// peerMsgQueue and the timoutTicker.
|
||||
// peerMsgQueue and the timeoutTicker.
|
||||
// NOTE: does not call fsync()
|
||||
func (wal *baseWAL) Write(msg WALMessage) {
|
||||
if wal == nil {
|
||||
@@ -144,13 +144,14 @@ type WALSearchOptions struct {
|
||||
IgnoreDataCorruptionErrors bool
|
||||
}
|
||||
|
||||
// SearchForEndHeight searches for the EndHeightMessage with the height and
|
||||
// returns an auto.GroupReader, whenever it was found or not and an error.
|
||||
// SearchForEndHeight searches for the EndHeightMessage with the given height
|
||||
// and returns an auto.GroupReader, whenever it was found or not and an error.
|
||||
// Group reader will be nil if found equals false.
|
||||
//
|
||||
// CONTRACT: caller must close group reader.
|
||||
func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) {
|
||||
var msg *TimedWALMessage
|
||||
lastHeightFound := int64(-1)
|
||||
|
||||
// NOTE: starting from the last file in the group because we're usually
|
||||
// searching for the last height. See replay.go
|
||||
@@ -166,17 +167,25 @@ func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions)
|
||||
for {
|
||||
msg, err = dec.Decode()
|
||||
if err == io.EOF {
|
||||
// OPTIMISATION: no need to look for height in older files if we've seen h < height
|
||||
if lastHeightFound > 0 && lastHeightFound < height {
|
||||
gr.Close()
|
||||
return nil, false, nil
|
||||
}
|
||||
// check next file
|
||||
break
|
||||
}
|
||||
if options.IgnoreDataCorruptionErrors && IsDataCorruptionError(err) {
|
||||
wal.Logger.Debug("Corrupted entry. Skipping...", "err", err)
|
||||
// do nothing
|
||||
continue
|
||||
} else if err != nil {
|
||||
gr.Close()
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
if m, ok := msg.Msg.(EndHeightMessage); ok {
|
||||
lastHeightFound = m.Height
|
||||
if m.Height == height { // found
|
||||
wal.Logger.Debug("Found", "height", height, "index", index)
|
||||
return gr, true, nil
|
||||
@@ -271,23 +280,17 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) {
|
||||
|
||||
b = make([]byte, 4)
|
||||
_, err = dec.rd.Read(b)
|
||||
if err == io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read length: %v", err)
|
||||
}
|
||||
length := binary.BigEndian.Uint32(b)
|
||||
|
||||
if length > maxMsgSizeBytes {
|
||||
return nil, DataCorruptionError{fmt.Errorf("length %d exceeded maximum possible value of %d bytes", length, maxMsgSizeBytes)}
|
||||
return nil, fmt.Errorf("length %d exceeded maximum possible value of %d bytes", length, maxMsgSizeBytes)
|
||||
}
|
||||
|
||||
data := make([]byte, length)
|
||||
_, err = dec.rd.Read(data)
|
||||
if err == io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read data: %v", err)
|
||||
}
|
||||
|
@@ -52,7 +52,7 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
|
||||
return nil, errors.Wrap(err, "failed to make genesis state")
|
||||
}
|
||||
blockStore := bc.NewBlockStore(blockStoreDB)
|
||||
handshaker := NewHandshaker(stateDB, state, blockStore, genDoc.AppState())
|
||||
handshaker := NewHandshaker(stateDB, state, blockStore, genDoc)
|
||||
proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app), handshaker)
|
||||
proxyApp.SetLogger(logger.With("module", "proxy"))
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
|
@@ -183,6 +183,7 @@ Try running these commands:
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x0000000000000000
|
||||
|
||||
> deliver_tx "abc"
|
||||
-> code: OK
|
||||
@@ -194,7 +195,7 @@ Try running these commands:
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x49DFD15CCDACDEAE9728CB01FBB5E8688CA58B91
|
||||
-> data.hex: 0x0200000000000000
|
||||
|
||||
> query "abc"
|
||||
-> code: OK
|
||||
@@ -208,7 +209,7 @@ Try running these commands:
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x70102DB32280373FBF3F9F89DA2A20CE2CD62B0B
|
||||
-> data.hex: 0x0400000000000000
|
||||
|
||||
> query "def"
|
||||
-> code: OK
|
||||
@@ -301,6 +302,7 @@ In another window, start the ``abci-cli console``:
|
||||
|
||||
> set_option serial on
|
||||
-> code: OK
|
||||
-> log: OK (SetOption doesn't return anything.)
|
||||
|
||||
> check_tx 0x00
|
||||
-> code: OK
|
||||
|
@@ -1,133 +1,42 @@
|
||||
Application Architecture Guide
|
||||
==============================
|
||||
|
||||
Overview
|
||||
--------
|
||||
Here we provide a brief guide on the recommended architecture of a Tendermint blockchain
|
||||
application.
|
||||
|
||||
A blockchain application is more than the consensus engine and the
|
||||
transaction logic (eg. smart contracts, business logic) as implemented
|
||||
in the ABCI app. There are also (mobile, web, desktop) clients that will
|
||||
need to connect and make use of the app. We will assume for now that you
|
||||
have a well designed transactions and database model, but maybe this
|
||||
will be the topic of another article. This article is more interested in
|
||||
various ways of setting up the "plumbing" and connecting these pieces,
|
||||
and demonstrating some evolving best practices.
|
||||
The following diagram provides a superb example:
|
||||
|
||||
Security
|
||||
--------
|
||||
https://drive.google.com/open?id=1yR2XpRi9YCY9H9uMfcw8-RMJpvDyvjz9
|
||||
|
||||
A very important aspect when constructing a blockchain is security. The
|
||||
consensus model can be DoSed (no consensus possible) by corrupting 1/3
|
||||
of the validators and exploited (writing arbitrary blocks) by corrupting
|
||||
2/3 of the validators. So, while the security is not that of the
|
||||
"weakest link", you should take care that the "average link" is
|
||||
sufficiently hardened.
|
||||
The end-user application here is the Cosmos Voyager, at the bottom left.
|
||||
Voyager communicates with a REST API exposed by a local Light-Client Daemon.
|
||||
The Light-Client Daemon is an application specific program that communicates with
|
||||
Tendermint nodes and verifies Tendermint light-client proofs through the Tendermint Core RPC.
|
||||
The Tendermint Core process communicates with a local ABCI application, where the
|
||||
user query or transaction is actually processed.
|
||||
|
||||
One big attack surface on the validators is the communication between
|
||||
the ABCI app and the tendermint core. This should be highly protected.
|
||||
Ideally, the app and the core are running on the same machine, so no
|
||||
external agent can target the communication channel. You can use unix
|
||||
sockets (with permissions preventing access from other users), or even
|
||||
compile the two apps into one binary if the ABCI app is also writen in
|
||||
go. If you are unable to do that due to language support, then the ABCI
|
||||
app should bind a TCP connection to localhost (127.0.0.1), which is less
|
||||
efficient and secure, but still not reachable from outside. If you must
|
||||
run the ABCI app and tendermint core on separate machines, make sure you
|
||||
have a secure communication channel (ssh tunnel?)
|
||||
The ABCI application must be a deterministic result of the Tendermint consensus - any external influence
|
||||
on the application state that didn't come through Tendermint could cause a
|
||||
consensus failure. Thus *nothing* should communicate with the application except Tendermint via ABCI.
|
||||
|
||||
Now assuming, you have linked together your app and the core securely,
|
||||
you must also make sure no one can get on the machine it is hosted on.
|
||||
At this point it is basic network security. Run on a secure operating
|
||||
system (SELinux?). Limit who has access to the machine (user accounts,
|
||||
but also where the physical machine is hosted). Turn off all services
|
||||
except for ssh, which should only be accessible by some well-guarded
|
||||
public/private key pairs (no password). And maybe even firewall off
|
||||
access to the ports used by the validators, so only known validators can
|
||||
connect.
|
||||
If the application is written in Go, it can be compiled into the Tendermint binary.
|
||||
Otherwise, it should use a unix socket to communicate with Tendermint.
|
||||
If it's necessary to use TCP, extra care must be taken to encrypt and authenticate the connection.
|
||||
|
||||
There was also a suggestion on slack from @jhon about compiling
|
||||
everything together with a unikernel for more security, such as
|
||||
`Mirage <https://mirage.io>`__ or
|
||||
`UNIK <https://github.com/emc-advanced-dev/unik>`__.
|
||||
All reads from the app happen through the Tendermint `/abci_query` endpoint.
|
||||
All writes to the app happen through the Tendermint `/broadcast_tx_*` endpoints.
|
||||
|
||||
Connecting your client to the blockchain
|
||||
----------------------------------------
|
||||
The Light-Client Daemon is what provides light clients (end users) with nearly all the security of a full node.
|
||||
It formats and broadcasts transactions, and verifies proofs of queries and transaction results.
|
||||
Note that it need not be a daemon - the Light-Client logic could instead be implemented in the same process as the end-user application.
|
||||
|
||||
Tendermint Core RPC
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
Note for those ABCI applications with weaker security requirements, the functionality of the Light-Client Daemon can be moved
|
||||
into the ABCI application process itself. That said, exposing the application process to anything besides Tendermint over ABCI
|
||||
requires extreme caution, as all transactions, and possibly all queries, should still pass through Tendermint.
|
||||
|
||||
The concept is that the ABCI app is completely hidden from the outside
|
||||
world and only communicated through a tested and secured `interface
|
||||
exposed by the tendermint core <./specification/rpc.html>`__. This interface
|
||||
exposes a lot of data on the block header and consensus process, which
|
||||
is quite useful for externally verifying the system. It also includes
|
||||
3(!) methods to broadcast a transaction (propose it for the blockchain,
|
||||
and possibly await a response). And one method to query app-specific
|
||||
data from the ABCI application.
|
||||
|
||||
Pros:
|
||||
|
||||
- Server code already written
|
||||
- Access to block headers to validate merkle proofs (nice for light clients)
|
||||
- Basic read/write functionality is supported
|
||||
|
||||
Cons:
|
||||
|
||||
- Limited interface to app. All queries must be serialized into []byte (less expressive than JSON over HTTP) and there is no way to push data from ABCI app to the client (eg. notify me if account X receives a transaction)
|
||||
|
||||
Custom ABCI server
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This was proposed by @wolfposd on slack and demonstrated by
|
||||
`TMChat <https://github.com/wolfposd/TMChat>`__, a sample app. The
|
||||
concept is to write a custom server for your app (with typical REST
|
||||
API/websockets/etc for easy use by a mobile app). This custom server is
|
||||
in the same binary as the ABCI app and data store, so can easily react
|
||||
to complex events there that involve understanding the data format (send
|
||||
a message if my balance drops below 500). All "writes" sent to this
|
||||
server are proxied via websocket/JSON-RPC to tendermint core. When they
|
||||
come back as deliver\_tx over ABCI, they will be written to the data
|
||||
store. For "reads", we can do any queries we wish that are supported by
|
||||
our architecture, using any web technology that is useful. The general
|
||||
architecture is shown in the following diagram:
|
||||
|
||||
.. figure:: assets/tm-application-example.png
|
||||
|
||||
Pros:
|
||||
|
||||
- Separates application logic from blockchain logic
|
||||
- Allows much richer, more flexible client-facing API
|
||||
- Allows pub-sub, watching certain fields, etc.
|
||||
|
||||
Cons:
|
||||
|
||||
- Access to ABCI app can be dangerous (be VERY careful not to write unless it comes from the validator node)
|
||||
- No direct access to the blockchain headers to verify tx
|
||||
- You must write your own API (but maybe that's a pro...)
|
||||
|
||||
Hybrid solutions
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Likely the least secure but most versatile. The client can access both
|
||||
the tendermint node for all blockchain info, as well as a custom app
|
||||
server, for complex queries and pub-sub on the abci app.
|
||||
|
||||
Pros:
|
||||
|
||||
- All from both above solutions
|
||||
|
||||
Cons:
|
||||
|
||||
- Even more complexity; even more attack vectors (less
|
||||
security)
|
||||
|
||||
Scalability
|
||||
-----------
|
||||
|
||||
Read replica using non-validating nodes? They could forward transactions
|
||||
to the validators (fewer connections, more security), and locally allow
|
||||
all queries in any of the above configurations. Thus, while
|
||||
transaction-processing speed is limited by the speed of the abci app and
|
||||
the number of validators, one should be able to scale our read
|
||||
performance to quite an extent (until the replication process drains too
|
||||
many resources from the validator nodes).
|
||||
See the following for more extensive documentation:
|
||||
- [Interchain Standard for the Light-Client REST API](https://github.com/cosmos/cosmos-sdk/pull/1028)
|
||||
- [Tendermint RPC Docs](https://tendermint.github.io/slate/)
|
||||
- [Tendermint in Production](https://github.com/tendermint/tendermint/pull/1618)
|
||||
- [Tendermint Basics](https://tendermint.readthedocs.io/en/master/using-tendermint.html)
|
||||
- [ABCI spec](https://github.com/tendermint/abci/blob/master/specification.rst)
|
||||
|
@@ -103,9 +103,6 @@ pex = true
|
||||
# Does not work if the peer-exchange reactor is disabled.
|
||||
seed_mode = false
|
||||
|
||||
# Authenticated encryption
|
||||
auth_enc = true
|
||||
|
||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||
private_peer_ids = ""
|
||||
|
||||
|
@@ -103,9 +103,6 @@ pex = true
|
||||
# Does not work if the peer-exchange reactor is disabled.
|
||||
seed_mode = false
|
||||
|
||||
# Authenticated encryption
|
||||
auth_enc = true
|
||||
|
||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||
private_peer_ids = ""
|
||||
|
||||
|
@@ -103,9 +103,6 @@ pex = true
|
||||
# Does not work if the peer-exchange reactor is disabled.
|
||||
seed_mode = false
|
||||
|
||||
# Authenticated encryption
|
||||
auth_enc = true
|
||||
|
||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||
private_peer_ids = ""
|
||||
|
||||
|
@@ -103,9 +103,6 @@ pex = true
|
||||
# Does not work if the peer-exchange reactor is disabled.
|
||||
seed_mode = false
|
||||
|
||||
# Authenticated encryption
|
||||
auth_enc = true
|
||||
|
||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||
private_peer_ids = ""
|
||||
|
||||
|
@@ -55,7 +55,10 @@ Tendermint 102
|
||||
abci-spec.rst
|
||||
app-architecture.rst
|
||||
app-development.rst
|
||||
subscribing-to-events-via-websocket.rst
|
||||
indexing-transactions.rst
|
||||
how-to-read-logs.rst
|
||||
running-in-production.rst
|
||||
|
||||
Tendermint 201
|
||||
--------------
|
||||
|
100
docs/indexing-transactions.rst
Normal file
100
docs/indexing-transactions.rst
Normal file
@@ -0,0 +1,100 @@
|
||||
Indexing Transactions
|
||||
=====================
|
||||
|
||||
Tendermint allows you to index transactions and later query or subscribe to
|
||||
their results.
|
||||
|
||||
Let's take a look at the ``[tx_index]`` config section:
|
||||
|
||||
::
|
||||
|
||||
##### transactions indexer configuration options #####
|
||||
[tx_index]
|
||||
|
||||
# What indexer to use for transactions
|
||||
#
|
||||
# Options:
|
||||
# 1) "null" (default)
|
||||
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
indexer = "kv"
|
||||
|
||||
# Comma-separated list of tags to index (by default the only tag is tx hash)
|
||||
#
|
||||
# It's recommended to index only a subset of tags due to possible memory
|
||||
# bloat. This is, of course, depends on the indexer's DB and the volume of
|
||||
# transactions.
|
||||
index_tags = ""
|
||||
|
||||
# When set to true, tells indexer to index all tags. Note this may be not
|
||||
# desirable (see the comment above). IndexTags has a precedence over
|
||||
# IndexAllTags (i.e. when given both, IndexTags will be indexed).
|
||||
index_all_tags = false
|
||||
|
||||
By default, Tendermint will index all transactions by their respective hashes
|
||||
using an embedded simple indexer. Note, we are planning to add more options in
|
||||
the future (e.g., Postgresql indexer).
|
||||
|
||||
Adding tags
|
||||
-----------
|
||||
|
||||
In your application's ``DeliverTx`` method, add the ``Tags`` field with the
|
||||
pairs of UTF-8 encoded strings (e.g. "account.owner": "Bob", "balance":
|
||||
"100.0", "date": "2018-01-02").
|
||||
|
||||
Example:
|
||||
|
||||
::
|
||||
|
||||
func (app *KVStoreApplication) DeliverTx(tx []byte) types.Result {
|
||||
...
|
||||
tags := []cmn.KVPair{
|
||||
{[]byte("account.name"), []byte("igor")},
|
||||
{[]byte("account.address"), []byte("0xdeadbeef")},
|
||||
{[]byte("tx.amount"), []byte("7")},
|
||||
}
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags}
|
||||
}
|
||||
|
||||
If you want Tendermint to only index transactions by "account.name" tag, in the
|
||||
config set ``tx_index.index_tags="account.name"``. If you to index all tags,
|
||||
set ``index_all_tags=true``
|
||||
|
||||
Note, there are a few predefined tags:
|
||||
|
||||
- ``tm.event`` (event type)
|
||||
- ``tx.hash`` (transaction's hash)
|
||||
- ``tx.height`` (height of the block transaction was committed in)
|
||||
|
||||
Tendermint will throw a warning if you try to use any of the above keys.
|
||||
|
||||
Quering transactions
|
||||
--------------------
|
||||
|
||||
You can query the transaction results by calling ``/tx_search`` RPC endpoint:
|
||||
|
||||
::
|
||||
|
||||
curl "localhost:46657/tx_search?query=\"account.name='igor'\"&prove=true"
|
||||
|
||||
Check out `API docs <https://tendermint.github.io/slate/?shell#txsearch>`__ for more
|
||||
information on query syntax and other options.
|
||||
|
||||
Subscribing to transactions
|
||||
---------------------------
|
||||
|
||||
Clients can subscribe to transactions with the given tags via Websocket by
|
||||
providing a query to ``/subscribe`` RPC endpoint.
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "subscribe",
|
||||
"id": "0",
|
||||
"params": {
|
||||
"query": "account.name='igor'"
|
||||
}
|
||||
}
|
||||
|
||||
Check out `API docs <https://tendermint.github.io/slate/#subscribe>`__ for more
|
||||
information on query syntax and other options.
|
203
docs/running-in-production.rst
Normal file
203
docs/running-in-production.rst
Normal file
@@ -0,0 +1,203 @@
|
||||
Running in production
|
||||
=====================
|
||||
|
||||
Logging
|
||||
-------
|
||||
|
||||
Default logging level (``main:info,state:info,*:``) should suffice for normal
|
||||
operation mode. Read `this post
|
||||
<https://blog.cosmos.network/one-of-the-exciting-new-features-in-0-10-0-release-is-smart-log-level-flag-e2506b4ab756>`__
|
||||
for details on how to configure ``log_level`` config variable. Some of the
|
||||
modules can be found `here <./how-to-read-logs.html#list-of-modules>`__. If
|
||||
you're trying to debug Tendermint or asked to provide logs with debug logging
|
||||
level, you can do so by running tendermint with ``--log_level="*:debug"``.
|
||||
|
||||
DOS Exposure and Mitigation
|
||||
---------------------------
|
||||
|
||||
Validators are supposed to setup `Sentry Node Architecture
|
||||
<https://blog.cosmos.network/tendermint-explained-bringing-bft-based-pos-to-the-public-blockchain-domain-f22e274a0fdb>`__
|
||||
to prevent Denial-of-service attacks. You can read more about it `here
|
||||
<https://github.com/tendermint/aib-data/blob/develop/medium/TendermintBFT.md>`__.
|
||||
|
||||
P2P
|
||||
~~~
|
||||
|
||||
The core of the Tendermint peer-to-peer system is ``MConnection``. Each
|
||||
connection has ``MaxPacketMsgPayloadSize``, which is the maximum packet size
|
||||
and bounded send & receive queues. One can impose restrictions on send &
|
||||
receive rate per connection (``SendRate``, ``RecvRate``).
|
||||
|
||||
RPC
|
||||
~~~
|
||||
|
||||
Endpoints returning multiple entries are limited by default to return 30
|
||||
elements (100 max).
|
||||
|
||||
Rate-limiting and authentication are another key aspects to help protect
|
||||
against DOS attacks. While in the future we may implement these features, for
|
||||
now, validators are supposed to use external tools like `NGINX
|
||||
<https://www.nginx.com/blog/rate-limiting-nginx/>`__ or `traefik
|
||||
<https://docs.traefik.io/configuration/commons/#rate-limiting>`__ to achieve
|
||||
the same things.
|
||||
|
||||
Debugging Tendermint
|
||||
--------------------
|
||||
|
||||
If you ever have to debug Tendermint, the first thing you should probably do is
|
||||
to check out the logs. See `"How to read logs" <./how-to-read-logs.html>`__,
|
||||
where we explain what certain log statements mean.
|
||||
|
||||
If, after skimming through the logs, things are not clear still, the second
|
||||
TODO is to query the `/status` RPC endpoint. It provides the necessary info:
|
||||
whenever the node is syncing or not, what height it is on, etc.
|
||||
|
||||
```
|
||||
$ curl http(s)://{ip}:{rpcPort}/status
|
||||
```
|
||||
|
||||
`/dump_consensus_state` will give you a detailed overview of the consensus
|
||||
state (proposer, lastest validators, peers states). From it, you should be able
|
||||
to figure out why, for example, the network had halted.
|
||||
|
||||
```
|
||||
$ curl http(s)://{ip}:{rpcPort}/dump_consensus_state
|
||||
```
|
||||
|
||||
There is a reduced version of this endpoint - `/consensus_state`, which
|
||||
returns just the votes seen at the current height.
|
||||
|
||||
- `Github Issues <https://github.com/tendermint/tendermint/issues>`__
|
||||
- `StackOverflow questions <https://stackoverflow.com/questions/tagged/tendermint>`__
|
||||
|
||||
Monitoring Tendermint
|
||||
---------------------
|
||||
|
||||
Each Tendermint instance has a standard `/health` RPC endpoint, which responds
|
||||
with 200 (OK) if everything is fine and 500 (or no response) - if something is
|
||||
wrong.
|
||||
|
||||
Other useful endpoints include mentioned earlier `/status`, `/net_info` and
|
||||
`/validators`.
|
||||
|
||||
We have a small tool, called tm-monitor, which outputs information from the
|
||||
endpoints above plus some statistics. The tool can be found `here
|
||||
<https://github.com/tendermint/tools/tree/master/tm-monitor>`__.
|
||||
|
||||
What happens when my app dies?
|
||||
------------------------------
|
||||
|
||||
You are supposed to run Tendermint under a `process supervisor
|
||||
<https://en.wikipedia.org/wiki/Process_supervision>`__ (like systemd or runit).
|
||||
It will ensure Tendermint is always running (despite possible errors).
|
||||
|
||||
Getting back to the original question, if your application dies, Tendermint
|
||||
will panic. After a process supervisor restarts your application, Tendermint
|
||||
should be able to reconnect successfully. The order of restart does not matter
|
||||
for it.
|
||||
|
||||
Signal handling
|
||||
---------------
|
||||
|
||||
We catch SIGINT and SIGTERM and try to clean up nicely. For other signals we
|
||||
use the default behaviour in Go: `Default behavior of signals in Go programs
|
||||
<https://golang.org/pkg/os/signal/#hdr-Default_behavior_of_signals_in_Go_programs>`__.
|
||||
|
||||
Hardware
|
||||
--------
|
||||
|
||||
Processor and Memory
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
While actual specs vary depending on the load and validators count, minimal requirements are:
|
||||
|
||||
- 1GB RAM
|
||||
- 25GB of disk space
|
||||
- 1.4 GHz CPU
|
||||
|
||||
SSD disks are preferable for applications with high transaction throughput.
|
||||
|
||||
Recommended:
|
||||
|
||||
- 2GB RAM
|
||||
- 100GB SSD
|
||||
- x64 2.0 GHz 2v CPU
|
||||
|
||||
While for now, Tendermint stores all the history and it may require significant
|
||||
disk space over time, we are planning to implement state syncing (See `#828
|
||||
<https://github.com/tendermint/tendermint/issues/828>`__). So, storing all the
|
||||
past blocks will not be necessary.
|
||||
|
||||
Operating Systems
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tendermint can be compiled for a wide range of operating systems thanks to Go
|
||||
language (the list of $OS/$ARCH pairs can be found `here
|
||||
<https://golang.org/doc/install/source#environment>`__).
|
||||
|
||||
While we do not favor any operation system, more secure and stable Linux server
|
||||
distributions (like Centos) should be preferred over desktop operation systems
|
||||
(like Mac OS).
|
||||
|
||||
Misc.
|
||||
~~~~~
|
||||
|
||||
NOTE: if you are going to use Tendermint in a public domain, make sure you read
|
||||
`hardware recommendations (see "4. Hardware")
|
||||
<https://cosmos.network/validators>`__ for a validator in the Cosmos network.
|
||||
|
||||
Configuration parameters
|
||||
------------------------
|
||||
|
||||
- ``p2p.flush_throttle_timeout``
|
||||
``p2p.max_packet_msg_payload_size``
|
||||
``p2p.send_rate``
|
||||
``p2p.recv_rate``
|
||||
|
||||
If you are going to use Tendermint in a private domain and you have a private
|
||||
high-speed network among your peers, it makes sense to lower flush throttle
|
||||
timeout and increase other params.
|
||||
|
||||
::
|
||||
|
||||
[p2p]
|
||||
|
||||
send_rate=20000000 # 2MB/s
|
||||
recv_rate=20000000 # 2MB/s
|
||||
flush_throttle_timeout=10
|
||||
max_packet_msg_payload_size=10240 # 10KB
|
||||
|
||||
- ``mempool.recheck``
|
||||
|
||||
After every block, Tendermint rechecks every transaction left in the mempool to
|
||||
see if transactions committed in that block affected the application state, so
|
||||
some of the transactions left may become invalid. If that does not apply to
|
||||
your application, you can disable it by setting ``mempool.recheck=false``.
|
||||
|
||||
- ``mempool.broadcast``
|
||||
|
||||
Setting this to false will stop the mempool from relaying transactions to other
|
||||
peers until they are included in a block. It means only the peer you send the
|
||||
tx to will see it until it is included in a block.
|
||||
|
||||
- ``consensus.skip_timeout_commit``
|
||||
|
||||
We want skip_timeout_commit=false when there is economics on the line because
|
||||
proposers should wait to hear for more votes. But if you don't care about that
|
||||
and want the fastest consensus, you can skip it. It will be kept false by
|
||||
default for public deployments (e.g. `Cosmos Hub
|
||||
<https://cosmos.network/intro/hub>`__) while for enterprise applications,
|
||||
setting it to true is not a problem.
|
||||
|
||||
- ``consensus.peer_gossip_sleep_duration``
|
||||
|
||||
You can try to reduce the time your node sleeps before checking if theres something to send its peers.
|
||||
|
||||
- ``consensus.timeout_commit``
|
||||
|
||||
You can also try lowering ``timeout_commit`` (time we sleep before proposing the next block).
|
||||
|
||||
- ``consensus.max_block_size_txs``
|
||||
|
||||
By default, the maximum number of transactions per a block is 10_000. Feel free
|
||||
to change it to suit your needs.
|
33
docs/spec/consensus/wal.md
Normal file
33
docs/spec/consensus/wal.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# WAL
|
||||
|
||||
Consensus module writes every message to the WAL (write-ahead log).
|
||||
|
||||
It also issues fsync syscall through
|
||||
[File#Sync](https://golang.org/pkg/os/#File.Sync) for messages signed by this
|
||||
node (to prevent double signing).
|
||||
|
||||
Under the hood, it uses
|
||||
[autofile.Group](https://godoc.org/github.com/tendermint/tmlibs/autofile#Group),
|
||||
which rotates files when those get too big (> 10MB).
|
||||
|
||||
The total maximum size is 1GB. We only need the latest block and the block before it,
|
||||
but if the former is dragging on across many rounds, we want all those rounds.
|
||||
|
||||
## Replay
|
||||
|
||||
Consensus module will replay all the messages of the last height written to WAL
|
||||
before a crash (if such occurs).
|
||||
|
||||
The private validator may try to sign messages during replay because it runs
|
||||
somewhat autonomously and does not know about replay process.
|
||||
|
||||
For example, if we got all the way to precommit in the WAL and then crash,
|
||||
after we replay the proposal message, the private validator will try to sign a
|
||||
prevote. But it will fail. That's ok because we’ll see the prevote later in the
|
||||
WAL. Then it will go to precommit, and that time it will work because the
|
||||
private validator contains the `LastSignBytes` and then we’ll replay the
|
||||
precommit from the WAL.
|
||||
|
||||
Make sure to read about [WAL
|
||||
corruption](https://tendermint.readthedocs.io/projects/tools/en/master/specification/corruption.html#wal-corruption)
|
||||
and recovery strategies.
|
@@ -17,9 +17,6 @@ We will attempt to connect to the peer at IP:PORT, and verify,
|
||||
via authenticated encryption, that it is in possession of the private key
|
||||
corresponding to `<ID>`. This prevents man-in-the-middle attacks on the peer layer.
|
||||
|
||||
If `auth_enc = false`, peers can use an arbitrary ID, but they must always use
|
||||
one. Authentication can then happen out-of-band of Tendermint, for instance via VPN.
|
||||
|
||||
## Connections
|
||||
|
||||
All p2p connections use TCP.
|
||||
|
@@ -47,3 +47,13 @@ type bcStatusResponseMessage struct {
|
||||
## Protocol
|
||||
|
||||
TODO
|
||||
|
||||
## Channels
|
||||
|
||||
Defines `maxMsgSize` for the maximum size of incoming messages,
|
||||
`SendQueueCapacity` and `RecvBufferCapacity` for maximum sending and
|
||||
receiving buffers respectively. These are supposed to prevent amplification
|
||||
attacks by setting up the upper limit on how much data we can receive & send to
|
||||
a peer.
|
||||
|
||||
Sending incorrectly encoded data will result in stopping the peer.
|
||||
|
@@ -342,3 +342,11 @@ It broadcasts `NewRoundStepMessage` or `CommitStepMessage` upon new round state
|
||||
broadcasting these messages does not depend on the PeerRoundState; it is sent on the StateChannel.
|
||||
Upon receiving VoteMessage it broadcasts `HasVoteMessage` message to its peers on the StateChannel.
|
||||
`ProposalHeartbeatMessage` is sent the same way on the StateChannel.
|
||||
|
||||
## Channels
|
||||
|
||||
Defines 4 channels: state, data, vote and vote_set_bits. Each channel
|
||||
has `SendQueueCapacity` and `RecvBufferCapacity` and
|
||||
`RecvMessageCapacity` set to `maxMsgSize`.
|
||||
|
||||
Sending incorrectly encoded data will result in stopping the peer.
|
||||
|
10
docs/spec/reactors/evidence/reactor.md
Normal file
10
docs/spec/reactors/evidence/reactor.md
Normal file
@@ -0,0 +1,10 @@
|
||||
# Evidence Reactor
|
||||
|
||||
## Channels
|
||||
|
||||
[#1503](https://github.com/tendermint/tendermint/issues/1503)
|
||||
|
||||
Sending invalid evidence will result in stopping the peer.
|
||||
|
||||
Sending incorrectly encoded data or data exceeding `maxMsgSize` will result
|
||||
in stopping the peer.
|
14
docs/spec/reactors/mempool/reactor.md
Normal file
14
docs/spec/reactors/mempool/reactor.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# Mempool Reactor
|
||||
|
||||
## Channels
|
||||
|
||||
[#1503](https://github.com/tendermint/tendermint/issues/1503)
|
||||
|
||||
Mempool maintains a cache of the last 10000 transactions to prevent
|
||||
replaying old transactions (plus transactions coming from other
|
||||
validators, who are continually exchanging transactions). Read [Replay
|
||||
Protection](https://tendermint.readthedocs.io/projects/tools/en/master/app-development.html?#replay-protection)
|
||||
for details.
|
||||
|
||||
Sending incorrectly encoded data or data exceeding `maxMsgSize` will result
|
||||
in stopping the peer.
|
12
docs/spec/reactors/pex/reactor.md
Normal file
12
docs/spec/reactors/pex/reactor.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# PEX Reactor
|
||||
|
||||
## Channels
|
||||
|
||||
Defines only `SendQueueCapacity`. [#1503](https://github.com/tendermint/tendermint/issues/1503)
|
||||
|
||||
Implements rate-limiting by enforcing minimal time between two consecutive
|
||||
`pexRequestMessage` requests. If the peer sends us addresses we did not ask,
|
||||
it is stopped.
|
||||
|
||||
Sending incorrectly encoded data or data exceeding `maxMsgSize` will result
|
||||
in stopping the peer.
|
@@ -122,9 +122,6 @@ like the file below, however, double check by inspecting the
|
||||
# Does not work if the peer-exchange reactor is disabled.
|
||||
seed_mode = false
|
||||
|
||||
# Authenticated encryption
|
||||
auth_enc = true
|
||||
|
||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||
private_peer_ids = ""
|
||||
|
||||
|
@@ -1,190 +1,4 @@
|
||||
RPC
|
||||
===
|
||||
|
||||
Coming soon: RPC docs powered by `slate <https://github.com/lord/slate>`__. Until then, read on.
|
||||
|
||||
Tendermint supports the following RPC protocols:
|
||||
|
||||
- URI over HTTP
|
||||
- JSONRPC over HTTP
|
||||
- JSONRPC over websockets
|
||||
|
||||
Tendermint RPC is build using `our own RPC
|
||||
library <https://github.com/tendermint/tendermint/tree/master/rpc/lib>`__.
|
||||
Documentation and tests for that library could be found at
|
||||
``tendermint/rpc/lib`` directory.
|
||||
|
||||
Configuration
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Set the ``laddr`` config parameter under ``[rpc]`` table in the
|
||||
$TMHOME/config/config.toml file or the ``--rpc.laddr`` command-line flag to the
|
||||
desired protocol://host:port setting. Default: ``tcp://0.0.0.0:46657``.
|
||||
|
||||
Arguments
|
||||
~~~~~~~~~
|
||||
|
||||
Arguments which expect strings or byte arrays may be passed as quoted
|
||||
strings, like ``"abc"`` or as ``0x``-prefixed strings, like
|
||||
``0x616263``.
|
||||
|
||||
URI/HTTP
|
||||
~~~~~~~~
|
||||
|
||||
Example request:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
curl -s 'http://localhost:46657/broadcast_tx_sync?tx="abc"' | jq .
|
||||
|
||||
Response:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"error": "",
|
||||
"result": {
|
||||
"hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF",
|
||||
"log": "",
|
||||
"data": "",
|
||||
"code": 0
|
||||
},
|
||||
"id": "",
|
||||
"jsonrpc": "2.0"
|
||||
}
|
||||
|
||||
The first entry in the result-array (``96``) is the method this response
|
||||
correlates with. ``96`` refers to "ResultTypeBroadcastTx", see
|
||||
`responses.go <https://github.com/tendermint/tendermint/blob/master/rpc/core/types/responses.go>`__
|
||||
for a complete overview.
|
||||
|
||||
JSONRPC/HTTP
|
||||
~~~~~~~~~~~~
|
||||
|
||||
JSONRPC requests can be POST'd to the root RPC endpoint via HTTP (e.g.
|
||||
``http://localhost:46657/``).
|
||||
|
||||
Example request:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"method": "broadcast_tx_sync",
|
||||
"jsonrpc": "2.0",
|
||||
"params": [ "abc" ],
|
||||
"id": "dontcare"
|
||||
}
|
||||
|
||||
JSONRPC/websockets
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
JSONRPC requests can be made via websocket. The websocket endpoint is at
|
||||
``/websocket``, e.g. ``http://localhost:46657/websocket``. Asynchronous
|
||||
RPC functions like event ``subscribe`` and ``unsubscribe`` are only
|
||||
available via websockets.
|
||||
|
||||
Endpoints
|
||||
~~~~~~~~~
|
||||
|
||||
An HTTP Get request to the root RPC endpoint (e.g.
|
||||
``http://localhost:46657``) shows a list of available endpoints.
|
||||
|
||||
::
|
||||
|
||||
Available endpoints:
|
||||
http://localhost:46657/abci_info
|
||||
http://localhost:46657/dump_consensus_state
|
||||
http://localhost:46657/genesis
|
||||
http://localhost:46657/net_info
|
||||
http://localhost:46657/num_unconfirmed_txs
|
||||
http://localhost:46657/health
|
||||
http://localhost:46657/status
|
||||
http://localhost:46657/unconfirmed_txs
|
||||
http://localhost:46657/unsafe_flush_mempool
|
||||
http://localhost:46657/unsafe_stop_cpu_profiler
|
||||
http://localhost:46657/validators
|
||||
|
||||
Endpoints that require arguments:
|
||||
http://localhost:46657/abci_query?path=_&data=_&prove=_
|
||||
http://localhost:46657/block?height=_
|
||||
http://localhost:46657/blockchain?minHeight=_&maxHeight=_
|
||||
http://localhost:46657/broadcast_tx_async?tx=_
|
||||
http://localhost:46657/broadcast_tx_commit?tx=_
|
||||
http://localhost:46657/broadcast_tx_sync?tx=_
|
||||
http://localhost:46657/commit?height=_
|
||||
http://localhost:46657/dial_seeds?seeds=_
|
||||
http://localhost:46657/dial_peers?peers=_&persistent=_
|
||||
http://localhost:46657/subscribe?event=_
|
||||
http://localhost:46657/tx?hash=_&prove=_
|
||||
http://localhost:46657/unsafe_start_cpu_profiler?filename=_
|
||||
http://localhost:46657/unsafe_write_heap_profile?filename=_
|
||||
http://localhost:46657/unsubscribe?event=_
|
||||
|
||||
tx
|
||||
~~
|
||||
|
||||
Returns a transaction matching the given transaction hash.
|
||||
|
||||
**Parameters**
|
||||
|
||||
1. hash - the transaction hash
|
||||
2. prove - include a proof of the transaction inclusion in the block in
|
||||
the result (optional, default: false)
|
||||
|
||||
**Returns**
|
||||
|
||||
- ``proof``: the ``types.TxProof`` object
|
||||
- ``tx``: ``[]byte`` - the transaction
|
||||
- ``tx_result``: the ``abci.Result`` object
|
||||
- ``index``: ``int`` - index of the transaction
|
||||
- ``height``: ``int`` - height of the block where this transaction was
|
||||
in
|
||||
|
||||
**Example**
|
||||
|
||||
.. code:: bash
|
||||
|
||||
curl -s 'http://localhost:46657/broadcast_tx_commit?tx="abc"' | jq .
|
||||
# {
|
||||
# "error": "",
|
||||
# "result": {
|
||||
# "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF",
|
||||
# "log": "",
|
||||
# "data": "",
|
||||
# "code": 0
|
||||
# },
|
||||
# "id": "",
|
||||
# "jsonrpc": "2.0"
|
||||
# }
|
||||
|
||||
curl -s 'http://localhost:46657/tx?hash=0x2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF' | jq .
|
||||
# {
|
||||
# "error": "",
|
||||
# "result": {
|
||||
# "proof": {
|
||||
# "Proof": {
|
||||
# "aunts": []
|
||||
# },
|
||||
# "Data": "YWJjZA==",
|
||||
# "RootHash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF",
|
||||
# "Total": 1,
|
||||
# "Index": 0
|
||||
# },
|
||||
# "tx": "YWJjZA==",
|
||||
# "tx_result": {
|
||||
# "log": "",
|
||||
# "data": "",
|
||||
# "code": 0
|
||||
# },
|
||||
# "index": 0,
|
||||
# "height": 52
|
||||
# },
|
||||
# "id": "",
|
||||
# "jsonrpc": "2.0"
|
||||
# }
|
||||
|
||||
More Examples
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
See the various bash tests using curl in ``test/``, and examples using
|
||||
the ``Go`` API in ``rpc/client/``.
|
||||
The RPC documentation is hosted `here <https://tendermint.github.io/slate>`__ and is generated by the CI from our `Slate repo <https://github.com/tendermint/slate>`__. To update the documentation, edit the relevant ``godoc`` comments in the `rpc/core directory <https://github.com/tendermint/tendermint/tree/develop/rpc/core>`__.
|
||||
|
@@ -65,9 +65,7 @@ are connected to at least one validator.
|
||||
Config
|
||||
------
|
||||
|
||||
Authenticated encryption is enabled by default. If you wish to use another
|
||||
authentication scheme or your peers are connected via VPN, you can turn it off
|
||||
by setting ``auth_enc`` to ``false`` in the config file.
|
||||
Authenticated encryption is enabled by default.
|
||||
|
||||
Additional Reading
|
||||
------------------
|
||||
|
28
docs/subscribing-to-events-via-websocket.rst
Normal file
28
docs/subscribing-to-events-via-websocket.rst
Normal file
@@ -0,0 +1,28 @@
|
||||
Subscribing to events via Websocket
|
||||
===================================
|
||||
|
||||
Tendermint emits different events, to which you can subscribe via `Websocket
|
||||
<https://en.wikipedia.org/wiki/WebSocket>`__. This can be useful for
|
||||
third-party applications (for analysys) or inspecting state.
|
||||
|
||||
`List of events <https://godoc.org/github.com/tendermint/tendermint/types#pkg-constants>`__
|
||||
|
||||
You can subscribe to any of the events above by calling ``subscribe`` RPC method via Websocket.
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "subscribe",
|
||||
"id": "0",
|
||||
"params": {
|
||||
"query": "tm.event='NewBlock'"
|
||||
}
|
||||
}
|
||||
|
||||
Check out `API docs <https://tendermint.github.io/slate/#subscribe>`__ for more
|
||||
information on query syntax and other options.
|
||||
|
||||
You can also use tags, given you had included them into DeliverTx response, to
|
||||
query transaction results. See `Indexing transactions
|
||||
<./indexing-transactions.html>`__ for details.
|
@@ -22,7 +22,7 @@ func TestExample(t *testing.T) {
|
||||
ch := make(chan interface{}, 1)
|
||||
err := s.Subscribe(ctx, "example-client", query.MustParse("abci.account.name='John'"), ch)
|
||||
require.NoError(t, err)
|
||||
err = s.PublishWithTags(ctx, "Tombstone", pubsub.NewTagMap(map[string]interface{}{"abci.account.name": "John"}))
|
||||
err = s.PublishWithTags(ctx, "Tombstone", pubsub.NewTagMap(map[string]string{"abci.account.name": "John"}))
|
||||
require.NoError(t, err)
|
||||
assertReceive(t, "Tombstone", ch)
|
||||
}
|
||||
|
@@ -38,18 +38,6 @@ var (
|
||||
ErrAlreadySubscribed = errors.New("already subscribed")
|
||||
)
|
||||
|
||||
// TagMap is used to associate tags to a message.
|
||||
// They can be queried by subscribers to choose messages they will received.
|
||||
type TagMap interface {
|
||||
// Get returns the value for a key, or nil if no value is present.
|
||||
// The ok result indicates whether value was found in the tags.
|
||||
Get(key string) (value interface{}, ok bool)
|
||||
// Len returns the number of tags.
|
||||
Len() int
|
||||
}
|
||||
|
||||
type tagMap map[string]interface{}
|
||||
|
||||
type cmd struct {
|
||||
op operation
|
||||
query Query
|
||||
@@ -80,14 +68,28 @@ type Server struct {
|
||||
// Option sets a parameter for the server.
|
||||
type Option func(*Server)
|
||||
|
||||
// TagMap is used to associate tags to a message.
|
||||
// They can be queried by subscribers to choose messages they will received.
|
||||
type TagMap interface {
|
||||
// Get returns the value for a key, or nil if no value is present.
|
||||
// The ok result indicates whether value was found in the tags.
|
||||
Get(key string) (value string, ok bool)
|
||||
// Len returns the number of tags.
|
||||
Len() int
|
||||
}
|
||||
|
||||
type tagMap map[string]string
|
||||
|
||||
var _ TagMap = (*tagMap)(nil)
|
||||
|
||||
// NewTagMap constructs a new immutable tag set from a map.
|
||||
func NewTagMap(data map[string]interface{}) TagMap {
|
||||
func NewTagMap(data map[string]string) TagMap {
|
||||
return tagMap(data)
|
||||
}
|
||||
|
||||
// Get returns the value for a key, or nil if no value is present.
|
||||
// The ok result indicates whether value was found in the tags.
|
||||
func (ts tagMap) Get(key string) (value interface{}, ok bool) {
|
||||
func (ts tagMap) Get(key string) (value string, ok bool) {
|
||||
value, ok = ts[key]
|
||||
return
|
||||
}
|
||||
@@ -213,7 +215,7 @@ func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error {
|
||||
// Publish publishes the given message. An error will be returned to the caller
|
||||
// if the context is canceled.
|
||||
func (s *Server) Publish(ctx context.Context, msg interface{}) error {
|
||||
return s.PublishWithTags(ctx, msg, NewTagMap(make(map[string]interface{})))
|
||||
return s.PublishWithTags(ctx, msg, NewTagMap(make(map[string]string)))
|
||||
}
|
||||
|
||||
// PublishWithTags publishes the given message with the set of tags. The set is
|
||||
|
@@ -49,14 +49,14 @@ func TestDifferentClients(t *testing.T) {
|
||||
ch1 := make(chan interface{}, 1)
|
||||
err := s.Subscribe(ctx, "client-1", query.MustParse("tm.events.type='NewBlock'"), ch1)
|
||||
require.NoError(t, err)
|
||||
err = s.PublishWithTags(ctx, "Iceman", pubsub.NewTagMap(map[string]interface{}{"tm.events.type": "NewBlock"}))
|
||||
err = s.PublishWithTags(ctx, "Iceman", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock"}))
|
||||
require.NoError(t, err)
|
||||
assertReceive(t, "Iceman", ch1)
|
||||
|
||||
ch2 := make(chan interface{}, 1)
|
||||
err = s.Subscribe(ctx, "client-2", query.MustParse("tm.events.type='NewBlock' AND abci.account.name='Igor'"), ch2)
|
||||
require.NoError(t, err)
|
||||
err = s.PublishWithTags(ctx, "Ultimo", pubsub.NewTagMap(map[string]interface{}{"tm.events.type": "NewBlock", "abci.account.name": "Igor"}))
|
||||
err = s.PublishWithTags(ctx, "Ultimo", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock", "abci.account.name": "Igor"}))
|
||||
require.NoError(t, err)
|
||||
assertReceive(t, "Ultimo", ch1)
|
||||
assertReceive(t, "Ultimo", ch2)
|
||||
@@ -64,7 +64,7 @@ func TestDifferentClients(t *testing.T) {
|
||||
ch3 := make(chan interface{}, 1)
|
||||
err = s.Subscribe(ctx, "client-3", query.MustParse("tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10"), ch3)
|
||||
require.NoError(t, err)
|
||||
err = s.PublishWithTags(ctx, "Valeria Richards", pubsub.NewTagMap(map[string]interface{}{"tm.events.type": "NewRoundStep"}))
|
||||
err = s.PublishWithTags(ctx, "Valeria Richards", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewRoundStep"}))
|
||||
require.NoError(t, err)
|
||||
assert.Zero(t, len(ch3))
|
||||
}
|
||||
@@ -81,7 +81,7 @@ func TestClientSubscribesTwice(t *testing.T) {
|
||||
ch1 := make(chan interface{}, 1)
|
||||
err := s.Subscribe(ctx, clientID, q, ch1)
|
||||
require.NoError(t, err)
|
||||
err = s.PublishWithTags(ctx, "Goblin Queen", pubsub.NewTagMap(map[string]interface{}{"tm.events.type": "NewBlock"}))
|
||||
err = s.PublishWithTags(ctx, "Goblin Queen", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock"}))
|
||||
require.NoError(t, err)
|
||||
assertReceive(t, "Goblin Queen", ch1)
|
||||
|
||||
@@ -89,7 +89,7 @@ func TestClientSubscribesTwice(t *testing.T) {
|
||||
err = s.Subscribe(ctx, clientID, q, ch2)
|
||||
require.Error(t, err)
|
||||
|
||||
err = s.PublishWithTags(ctx, "Spider-Man", pubsub.NewTagMap(map[string]interface{}{"tm.events.type": "NewBlock"}))
|
||||
err = s.PublishWithTags(ctx, "Spider-Man", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock"}))
|
||||
require.NoError(t, err)
|
||||
assertReceive(t, "Spider-Man", ch1)
|
||||
}
|
||||
@@ -209,7 +209,7 @@ func benchmarkNClients(n int, b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.PublishWithTags(ctx, "Gamora", pubsub.NewTagMap(map[string]interface{}{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": i}))
|
||||
s.PublishWithTags(ctx, "Gamora", pubsub.NewTagMap(map[string]string{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": string(i)}))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -232,7 +232,7 @@ func benchmarkNClientsOneQuery(n int, b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.PublishWithTags(ctx, "Gamora", pubsub.NewTagMap(map[string]interface{}{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": 1}))
|
||||
s.PublishWithTags(ctx, "Gamora", pubsub.NewTagMap(map[string]string{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": "1"}))
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -11,8 +11,8 @@ import (
|
||||
|
||||
func TestEmptyQueryMatchesAnything(t *testing.T) {
|
||||
q := query.Empty{}
|
||||
assert.True(t, q.Matches(pubsub.NewTagMap(map[string]interface{}{})))
|
||||
assert.True(t, q.Matches(pubsub.NewTagMap(map[string]interface{}{"Asher": "Roth"})))
|
||||
assert.True(t, q.Matches(pubsub.NewTagMap(map[string]interface{}{"Route": 66})))
|
||||
assert.True(t, q.Matches(pubsub.NewTagMap(map[string]interface{}{"Route": 66, "Billy": "Blue"})))
|
||||
assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{})))
|
||||
assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{"Asher": "Roth"})))
|
||||
assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{"Route": "66"})))
|
||||
assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{"Route": "66", "Billy": "Blue"})))
|
||||
}
|
||||
|
@@ -77,6 +77,13 @@ const (
|
||||
OpContains
|
||||
)
|
||||
|
||||
const (
|
||||
// DateLayout defines a layout for all dates (`DATE date`)
|
||||
DateLayout = "2006-01-02"
|
||||
// TimeLayout defines a layout for all times (`TIME time`)
|
||||
TimeLayout = time.RFC3339
|
||||
)
|
||||
|
||||
// Conditions returns a list of conditions.
|
||||
func (q *Query) Conditions() []Condition {
|
||||
conditions := make([]Condition, 0)
|
||||
@@ -112,7 +119,7 @@ func (q *Query) Conditions() []Condition {
|
||||
conditions = append(conditions, Condition{tag, op, valueWithoutSingleQuotes})
|
||||
case rulenumber:
|
||||
number := buffer[begin:end]
|
||||
if strings.Contains(number, ".") { // if it looks like a floating-point number
|
||||
if strings.ContainsAny(number, ".") { // if it looks like a floating-point number
|
||||
value, err := strconv.ParseFloat(number, 64)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", err, number))
|
||||
@@ -126,7 +133,7 @@ func (q *Query) Conditions() []Condition {
|
||||
conditions = append(conditions, Condition{tag, op, value})
|
||||
}
|
||||
case ruletime:
|
||||
value, err := time.Parse(time.RFC3339, buffer[begin:end])
|
||||
value, err := time.Parse(TimeLayout, buffer[begin:end])
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)", err, buffer[begin:end]))
|
||||
}
|
||||
@@ -188,7 +195,7 @@ func (q *Query) Matches(tags pubsub.TagMap) bool {
|
||||
}
|
||||
case rulenumber:
|
||||
number := buffer[begin:end]
|
||||
if strings.Contains(number, ".") { // if it looks like a floating-point number
|
||||
if strings.ContainsAny(number, ".") { // if it looks like a floating-point number
|
||||
value, err := strconv.ParseFloat(number, 64)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", err, number))
|
||||
@@ -206,7 +213,7 @@ func (q *Query) Matches(tags pubsub.TagMap) bool {
|
||||
}
|
||||
}
|
||||
case ruletime:
|
||||
value, err := time.Parse(time.RFC3339, buffer[begin:end])
|
||||
value, err := time.Parse(TimeLayout, buffer[begin:end])
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)", err, buffer[begin:end]))
|
||||
}
|
||||
@@ -242,9 +249,18 @@ func match(tag string, op Operator, operand reflect.Value, tags pubsub.TagMap) b
|
||||
switch operand.Kind() {
|
||||
case reflect.Struct: // time
|
||||
operandAsTime := operand.Interface().(time.Time)
|
||||
v, ok := value.(time.Time)
|
||||
if !ok { // if value from tags is not time.Time
|
||||
return false
|
||||
// try our best to convert value from tags to time.Time
|
||||
var (
|
||||
v time.Time
|
||||
err error
|
||||
)
|
||||
if strings.ContainsAny(value, "T") {
|
||||
v, err = time.Parse(TimeLayout, value)
|
||||
} else {
|
||||
v, err = time.Parse(DateLayout, value)
|
||||
}
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to convert value %v from tag to time.Time: %v", value, err))
|
||||
}
|
||||
switch op {
|
||||
case OpLessEqual:
|
||||
@@ -262,23 +278,9 @@ func match(tag string, op Operator, operand reflect.Value, tags pubsub.TagMap) b
|
||||
operandFloat64 := operand.Interface().(float64)
|
||||
var v float64
|
||||
// try our best to convert value from tags to float64
|
||||
switch vt := value.(type) {
|
||||
case float64:
|
||||
v = vt
|
||||
case float32:
|
||||
v = float64(vt)
|
||||
case int:
|
||||
v = float64(vt)
|
||||
case int8:
|
||||
v = float64(vt)
|
||||
case int16:
|
||||
v = float64(vt)
|
||||
case int32:
|
||||
v = float64(vt)
|
||||
case int64:
|
||||
v = float64(vt)
|
||||
default: // fail for all other types
|
||||
panic(fmt.Sprintf("Incomparable types: %T (%v) vs float64 (%v)", value, value, operandFloat64))
|
||||
v, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to convert value %v from tag to float64: %v", value, err))
|
||||
}
|
||||
switch op {
|
||||
case OpLessEqual:
|
||||
@@ -295,24 +297,20 @@ func match(tag string, op Operator, operand reflect.Value, tags pubsub.TagMap) b
|
||||
case reflect.Int64:
|
||||
operandInt := operand.Interface().(int64)
|
||||
var v int64
|
||||
// try our best to convert value from tags to int64
|
||||
switch vt := value.(type) {
|
||||
case int64:
|
||||
v = vt
|
||||
case int8:
|
||||
v = int64(vt)
|
||||
case int16:
|
||||
v = int64(vt)
|
||||
case int32:
|
||||
v = int64(vt)
|
||||
case int:
|
||||
v = int64(vt)
|
||||
case float64:
|
||||
v = int64(vt)
|
||||
case float32:
|
||||
v = int64(vt)
|
||||
default: // fail for all other types
|
||||
panic(fmt.Sprintf("Incomparable types: %T (%v) vs int64 (%v)", value, value, operandInt))
|
||||
// if value looks like float, we try to parse it as float
|
||||
if strings.ContainsAny(value, ".") {
|
||||
v1, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to convert value %v from tag to float64: %v", value, err))
|
||||
}
|
||||
v = int64(v1)
|
||||
} else {
|
||||
var err error
|
||||
// try our best to convert value from tags to int64
|
||||
v, err = strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to convert value %v from tag to int64: %v", value, err))
|
||||
}
|
||||
}
|
||||
switch op {
|
||||
case OpLessEqual:
|
||||
@@ -327,15 +325,11 @@ func match(tag string, op Operator, operand reflect.Value, tags pubsub.TagMap) b
|
||||
return v == operandInt
|
||||
}
|
||||
case reflect.String:
|
||||
v, ok := value.(string)
|
||||
if !ok { // if value from tags is not string
|
||||
return false
|
||||
}
|
||||
switch op {
|
||||
case OpEqual:
|
||||
return v == operand.String()
|
||||
return value == operand.String()
|
||||
case OpContains:
|
||||
return strings.Contains(v, operand.String())
|
||||
return strings.Contains(value, operand.String())
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown kind of operand %v", operand.Kind()))
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package query_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -12,38 +13,37 @@ import (
|
||||
)
|
||||
|
||||
func TestMatches(t *testing.T) {
|
||||
const shortForm = "2006-Jan-02"
|
||||
txDate, err := time.Parse(shortForm, "2017-Jan-01")
|
||||
require.NoError(t, err)
|
||||
txTime, err := time.Parse(time.RFC3339, "2018-05-03T14:45:00Z")
|
||||
require.NoError(t, err)
|
||||
var (
|
||||
txDate = "2017-01-01"
|
||||
txTime = "2018-05-03T14:45:00Z"
|
||||
)
|
||||
|
||||
testCases := []struct {
|
||||
s string
|
||||
tags map[string]interface{}
|
||||
tags map[string]string
|
||||
err bool
|
||||
matches bool
|
||||
}{
|
||||
{"tm.events.type='NewBlock'", map[string]interface{}{"tm.events.type": "NewBlock"}, false, true},
|
||||
{"tm.events.type='NewBlock'", map[string]string{"tm.events.type": "NewBlock"}, false, true},
|
||||
|
||||
{"tx.gas > 7", map[string]interface{}{"tx.gas": 8}, false, true},
|
||||
{"tx.gas > 7 AND tx.gas < 9", map[string]interface{}{"tx.gas": 8}, false, true},
|
||||
{"body.weight >= 3.5", map[string]interface{}{"body.weight": 3.5}, false, true},
|
||||
{"account.balance < 1000.0", map[string]interface{}{"account.balance": 900}, false, true},
|
||||
{"apples.kg <= 4", map[string]interface{}{"apples.kg": 4.0}, false, true},
|
||||
{"body.weight >= 4.5", map[string]interface{}{"body.weight": float32(4.5)}, false, true},
|
||||
{"oranges.kg < 4 AND watermellons.kg > 10", map[string]interface{}{"oranges.kg": 3, "watermellons.kg": 12}, false, true},
|
||||
{"peaches.kg < 4", map[string]interface{}{"peaches.kg": 5}, false, false},
|
||||
{"tx.gas > 7", map[string]string{"tx.gas": "8"}, false, true},
|
||||
{"tx.gas > 7 AND tx.gas < 9", map[string]string{"tx.gas": "8"}, false, true},
|
||||
{"body.weight >= 3.5", map[string]string{"body.weight": "3.5"}, false, true},
|
||||
{"account.balance < 1000.0", map[string]string{"account.balance": "900"}, false, true},
|
||||
{"apples.kg <= 4", map[string]string{"apples.kg": "4.0"}, false, true},
|
||||
{"body.weight >= 4.5", map[string]string{"body.weight": fmt.Sprintf("%v", float32(4.5))}, false, true},
|
||||
{"oranges.kg < 4 AND watermellons.kg > 10", map[string]string{"oranges.kg": "3", "watermellons.kg": "12"}, false, true},
|
||||
{"peaches.kg < 4", map[string]string{"peaches.kg": "5"}, false, false},
|
||||
|
||||
{"tx.date > DATE 2017-01-01", map[string]interface{}{"tx.date": time.Now()}, false, true},
|
||||
{"tx.date = DATE 2017-01-01", map[string]interface{}{"tx.date": txDate}, false, true},
|
||||
{"tx.date = DATE 2018-01-01", map[string]interface{}{"tx.date": txDate}, false, false},
|
||||
{"tx.date > DATE 2017-01-01", map[string]string{"tx.date": time.Now().Format(query.DateLayout)}, false, true},
|
||||
{"tx.date = DATE 2017-01-01", map[string]string{"tx.date": txDate}, false, true},
|
||||
{"tx.date = DATE 2018-01-01", map[string]string{"tx.date": txDate}, false, false},
|
||||
|
||||
{"tx.time >= TIME 2013-05-03T14:45:00Z", map[string]interface{}{"tx.time": time.Now()}, false, true},
|
||||
{"tx.time = TIME 2013-05-03T14:45:00Z", map[string]interface{}{"tx.time": txTime}, false, false},
|
||||
{"tx.time >= TIME 2013-05-03T14:45:00Z", map[string]string{"tx.time": time.Now().Format(query.TimeLayout)}, false, true},
|
||||
{"tx.time = TIME 2013-05-03T14:45:00Z", map[string]string{"tx.time": txTime}, false, false},
|
||||
|
||||
{"abci.owner.name CONTAINS 'Igor'", map[string]interface{}{"abci.owner.name": "Igor,Ivan"}, false, true},
|
||||
{"abci.owner.name CONTAINS 'Igor'", map[string]interface{}{"abci.owner.name": "Pavel,Ivan"}, false, false},
|
||||
{"abci.owner.name CONTAINS 'Igor'", map[string]string{"abci.owner.name": "Igor,Ivan"}, false, true},
|
||||
{"abci.owner.name CONTAINS 'Igor'", map[string]string{"abci.owner.name": "Pavel,Ivan"}, false, false},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
@@ -159,7 +159,7 @@ func NewNode(config *cfg.Config,
|
||||
// and sync tendermint and the app by performing a handshake
|
||||
// and replaying any necessary blocks
|
||||
consensusLogger := logger.With("module", "consensus")
|
||||
handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc.AppState())
|
||||
handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc)
|
||||
handshaker.SetLogger(consensusLogger)
|
||||
proxyApp := proxy.NewAppConns(clientCreator, handshaker)
|
||||
proxyApp.SetLogger(logger.With("module", "proxy"))
|
||||
@@ -269,9 +269,6 @@ func NewNode(config *cfg.Config,
|
||||
// but it would still be nice to have a clear list of the current "PersistentPeers"
|
||||
// somewhere that we can return with net_info.
|
||||
//
|
||||
// Let's assume we always have IDs ... and we just dont authenticate them
|
||||
// if auth_enc=false.
|
||||
//
|
||||
// If PEX is on, it should handle dialing the seeds. Otherwise the switch does it.
|
||||
// Note we currently use the addrBook regardless at least for AddOurAddress
|
||||
addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
|
||||
|
25
p2p/peer.go
25
p2p/peer.go
@@ -6,7 +6,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/go-crypto"
|
||||
crypto "github.com/tendermint/go-crypto"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
|
||||
@@ -116,8 +116,6 @@ func newPeer(pc peerConn, nodeInfo NodeInfo,
|
||||
|
||||
// PeerConfig is a Peer configuration.
|
||||
type PeerConfig struct {
|
||||
AuthEnc bool `mapstructure:"auth_enc"` // authenticated encryption
|
||||
|
||||
// times are in seconds
|
||||
HandshakeTimeout time.Duration `mapstructure:"handshake_timeout"`
|
||||
DialTimeout time.Duration `mapstructure:"dial_timeout"`
|
||||
@@ -132,7 +130,6 @@ type PeerConfig struct {
|
||||
// DefaultPeerConfig returns the default config.
|
||||
func DefaultPeerConfig() *PeerConfig {
|
||||
return &PeerConfig{
|
||||
AuthEnc: true,
|
||||
HandshakeTimeout: 20, // * time.Second,
|
||||
DialTimeout: 3, // * time.Second,
|
||||
MConfig: tmconn.DefaultMConnConfig(),
|
||||
@@ -159,7 +156,7 @@ func newOutboundPeerConn(addr *NetAddress, config *PeerConfig, persistent bool,
|
||||
}
|
||||
|
||||
// ensure dialed ID matches connection ID
|
||||
if config.AuthEnc && addr.ID != pc.ID() {
|
||||
if addr.ID != pc.ID() {
|
||||
if err2 := conn.Close(); err2 != nil {
|
||||
return pc, cmn.ErrorWrap(err, err2.Error())
|
||||
}
|
||||
@@ -187,17 +184,15 @@ func newPeerConn(rawConn net.Conn,
|
||||
conn = FuzzConnAfterFromConfig(conn, 10*time.Second, config.FuzzConfig)
|
||||
}
|
||||
|
||||
if config.AuthEnc {
|
||||
// Set deadline for secret handshake
|
||||
if err := conn.SetDeadline(time.Now().Add(config.HandshakeTimeout * time.Second)); err != nil {
|
||||
return pc, cmn.ErrorWrap(err, "Error setting deadline while encrypting connection")
|
||||
}
|
||||
// Set deadline for secret handshake
|
||||
if err := conn.SetDeadline(time.Now().Add(config.HandshakeTimeout * time.Second)); err != nil {
|
||||
return pc, cmn.ErrorWrap(err, "Error setting deadline while encrypting connection")
|
||||
}
|
||||
|
||||
// Encrypt connection
|
||||
conn, err = tmconn.MakeSecretConnection(conn, ourNodePrivKey)
|
||||
if err != nil {
|
||||
return pc, cmn.ErrorWrap(err, "Error creating peer")
|
||||
}
|
||||
// Encrypt connection
|
||||
conn, err = tmconn.MakeSecretConnection(conn, ourNodePrivKey)
|
||||
if err != nil {
|
||||
return pc, cmn.ErrorWrap(err, "Error creating peer")
|
||||
}
|
||||
|
||||
// Only the information we already have
|
||||
|
@@ -47,10 +47,6 @@ func (ps *PeerSet) Add(peer Peer) error {
|
||||
return ErrSwitchDuplicatePeerID{peer.ID()}
|
||||
}
|
||||
|
||||
if ps.hasIP(peer.RemoteIP()) {
|
||||
return ErrSwitchDuplicatePeerIP{peer.RemoteIP()}
|
||||
}
|
||||
|
||||
index := len(ps.list)
|
||||
// Appending is safe even with other goroutines
|
||||
// iterating over the ps.list slice.
|
||||
|
@@ -143,20 +143,6 @@ func TestPeerSetAddDuplicate(t *testing.T) {
|
||||
assert.Equal(t, wantNilErrCount, gotNilErrCount, "invalid nil errCount")
|
||||
}
|
||||
|
||||
func TestPeerSetAddDuplicateIP(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
peerSet := NewPeerSet()
|
||||
|
||||
if err := peerSet.Add(randPeer(net.IP{172, 0, 0, 1})); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Add peer with same IP.
|
||||
err := peerSet.Add(randPeer(net.IP{172, 0, 0, 1}))
|
||||
assert.Equal(t, ErrSwitchDuplicatePeerIP{IP: net.IP{172, 0, 0, 1}}, err)
|
||||
}
|
||||
|
||||
func TestPeerSetGet(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
@@ -41,32 +41,10 @@ func TestPeerBasic(t *testing.T) {
|
||||
assert.Equal(rp.ID(), p.ID())
|
||||
}
|
||||
|
||||
func TestPeerWithoutAuthEnc(t *testing.T) {
|
||||
assert, require := assert.New(t), require.New(t)
|
||||
|
||||
config := DefaultPeerConfig()
|
||||
config.AuthEnc = false
|
||||
|
||||
// simulate remote peer
|
||||
rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: config}
|
||||
rp.Start()
|
||||
defer rp.Stop()
|
||||
|
||||
p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config)
|
||||
require.Nil(err)
|
||||
|
||||
err = p.Start()
|
||||
require.Nil(err)
|
||||
defer p.Stop()
|
||||
|
||||
assert.True(p.IsRunning())
|
||||
}
|
||||
|
||||
func TestPeerSend(t *testing.T) {
|
||||
assert, require := assert.New(t), require.New(t)
|
||||
|
||||
config := DefaultPeerConfig()
|
||||
config.AuthEnc = false
|
||||
|
||||
// simulate remote peer
|
||||
rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: config}
|
||||
|
@@ -7,7 +7,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/go-amino"
|
||||
amino "github.com/tendermint/go-amino"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
@@ -281,6 +281,7 @@ func (r *PEXReactor) receiveRequest(src Peer) error {
|
||||
// RequestAddrs asks peer for more addresses if we do not already
|
||||
// have a request out for this peer.
|
||||
func (r *PEXReactor) RequestAddrs(p Peer) {
|
||||
r.Logger.Debug("Request addrs", "from", p)
|
||||
id := string(p.ID())
|
||||
if r.requestsSent.Has(id) {
|
||||
return
|
||||
|
@@ -27,6 +27,7 @@ var (
|
||||
func init() {
|
||||
config = cfg.DefaultP2PConfig()
|
||||
config.PexReactor = true
|
||||
config.AllowDuplicateIP = true
|
||||
}
|
||||
|
||||
func TestPEXReactorBasic(t *testing.T) {
|
||||
@@ -59,6 +60,16 @@ func TestPEXReactorAddRemovePeer(t *testing.T) {
|
||||
assert.Equal(t, size+1, book.Size())
|
||||
}
|
||||
|
||||
// --- FAIL: TestPEXReactorRunning (11.10s)
|
||||
// pex_reactor_test.go:411: expected all switches to be connected to at
|
||||
// least one peer (switches: 0 => {outbound: 1, inbound: 0}, 1 =>
|
||||
// {outbound: 0, inbound: 1}, 2 => {outbound: 0, inbound: 0}, )
|
||||
//
|
||||
// EXPLANATION: peers are getting rejected because in switch#addPeer we check
|
||||
// if any peer (who we already connected to) has the same IP. Even though local
|
||||
// peers have different IP addresses, they all have the same underlying remote
|
||||
// IP: 127.0.0.1.
|
||||
//
|
||||
func TestPEXReactorRunning(t *testing.T) {
|
||||
N := 3
|
||||
switches := make([]*p2p.Switch, N)
|
||||
|
@@ -95,7 +95,6 @@ func NewSwitch(config *cfg.P2PConfig) *Switch {
|
||||
sw.peerConfig.MConfig.SendRate = config.SendRate
|
||||
sw.peerConfig.MConfig.RecvRate = config.RecvRate
|
||||
sw.peerConfig.MConfig.MaxPacketMsgPayloadSize = config.MaxPacketMsgPayloadSize
|
||||
sw.peerConfig.AuthEnc = config.AuthEnc
|
||||
|
||||
sw.BaseService = *cmn.NewBaseService(nil, "P2P Switch", sw)
|
||||
return sw
|
||||
@@ -534,10 +533,6 @@ func (sw *Switch) addPeer(pc peerConn) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// NOTE: if AuthEnc==false, we don't have a peerID until after the handshake.
|
||||
// If AuthEnc==true then we already know the ID and could do the checks first before the handshake,
|
||||
// but it's simple to just deal with both cases the same after the handshake.
|
||||
|
||||
// Exchange NodeInfo on the conn
|
||||
peerNodeInfo, err := pc.HandshakeTimeout(sw.nodeInfo, time.Duration(sw.peerConfig.HandshakeTimeout*time.Second))
|
||||
if err != nil {
|
||||
@@ -547,13 +542,14 @@ func (sw *Switch) addPeer(pc peerConn) error {
|
||||
peerID := peerNodeInfo.ID
|
||||
|
||||
// ensure connection key matches self reported key
|
||||
if pc.config.AuthEnc {
|
||||
connID := pc.ID()
|
||||
connID := pc.ID()
|
||||
|
||||
if peerID != connID {
|
||||
return fmt.Errorf("nodeInfo.ID() (%v) doesn't match conn.ID() (%v)",
|
||||
peerID, connID)
|
||||
}
|
||||
if peerID != connID {
|
||||
return fmt.Errorf(
|
||||
"nodeInfo.ID() (%v) doesn't match conn.ID() (%v)",
|
||||
peerID,
|
||||
connID,
|
||||
)
|
||||
}
|
||||
|
||||
// Validate the peers nodeInfo
|
||||
@@ -568,7 +564,7 @@ func (sw *Switch) addPeer(pc peerConn) error {
|
||||
// and add to our addresses to avoid dialing again
|
||||
sw.addrBook.RemoveAddress(addr)
|
||||
sw.addrBook.AddOurAddress(addr)
|
||||
return ErrSwitchConnectToSelf{}
|
||||
return ErrSwitchConnectToSelf{addr}
|
||||
}
|
||||
|
||||
// Avoid duplicate
|
||||
@@ -577,8 +573,9 @@ func (sw *Switch) addPeer(pc peerConn) error {
|
||||
}
|
||||
|
||||
// Check for duplicate connection or peer info IP.
|
||||
if sw.peers.HasIP(pc.RemoteIP()) ||
|
||||
sw.peers.HasIP(peerNodeInfo.NetAddress().IP) {
|
||||
if !sw.config.AllowDuplicateIP &&
|
||||
(sw.peers.HasIP(pc.RemoteIP()) ||
|
||||
sw.peers.HasIP(peerNodeInfo.NetAddress().IP)) {
|
||||
return ErrSwitchDuplicatePeerIP{pc.RemoteIP()}
|
||||
}
|
||||
|
||||
|
@@ -25,6 +25,7 @@ var (
|
||||
func init() {
|
||||
config = cfg.DefaultP2PConfig()
|
||||
config.PexReactor = true
|
||||
config.AllowDuplicateIP = true
|
||||
}
|
||||
|
||||
type PeerMessage struct {
|
||||
@@ -180,7 +181,7 @@ func TestConnAddrFilter(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSwitchFiltersOutItself(t *testing.T) {
|
||||
s1 := MakeSwitch(config, 1, "127.0.0.2", "123.123.123", initSwitchFunc)
|
||||
s1 := MakeSwitch(config, 1, "127.0.0.1", "123.123.123", initSwitchFunc)
|
||||
// addr := s1.NodeInfo().NetAddress()
|
||||
|
||||
// // add ourselves like we do in node.go#427
|
||||
@@ -193,7 +194,7 @@ func TestSwitchFiltersOutItself(t *testing.T) {
|
||||
// addr should be rejected in addPeer based on the same ID
|
||||
err := s1.DialPeerWithAddress(rp.Addr(), false)
|
||||
if assert.Error(t, err) {
|
||||
assert.EqualValues(t, ErrSwitchConnectToSelf{}, err)
|
||||
assert.Equal(t, ErrSwitchConnectToSelf{rp.Addr()}.Error(), err.Error())
|
||||
}
|
||||
|
||||
assert.True(t, s1.addrBook.OurAddress(rp.Addr()))
|
||||
@@ -322,7 +323,7 @@ func TestSwitchReconnectsToPersistentPeer(t *testing.T) {
|
||||
Config: DefaultPeerConfig(),
|
||||
// Use different interface to prevent duplicate IP filter, this will break
|
||||
// beyond two peers.
|
||||
listenAddr: "127.0.0.2:0",
|
||||
listenAddr: "127.0.0.1:0",
|
||||
}
|
||||
rp.Start()
|
||||
defer rp.Stop()
|
||||
|
@@ -3,7 +3,6 @@ package p2p
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"sync/atomic"
|
||||
|
||||
crypto "github.com/tendermint/go-crypto"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
@@ -132,8 +131,6 @@ func StartSwitches(switches []*Switch) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var listenAddrSuffix uint32 = 1
|
||||
|
||||
func MakeSwitch(cfg *cfg.P2PConfig, i int, network, version string, initSwitch func(int, *Switch) *Switch) *Switch {
|
||||
// new switch, add reactors
|
||||
// TODO: let the config be passed in?
|
||||
@@ -148,7 +145,7 @@ func MakeSwitch(cfg *cfg.P2PConfig, i int, network, version string, initSwitch f
|
||||
Moniker: cmn.Fmt("switch%d", i),
|
||||
Network: network,
|
||||
Version: version,
|
||||
ListenAddr: fmt.Sprintf("127.0.0.%d:%d", atomic.AddUint32(&listenAddrSuffix, 1), cmn.RandIntn(64512)+1023),
|
||||
ListenAddr: fmt.Sprintf("127.0.0.1:%d", cmn.RandIntn(64512)+1023),
|
||||
}
|
||||
for ch := range sw.reactorsByCh {
|
||||
ni.Channels = append(ni.Channels, ch)
|
||||
|
@@ -3,17 +3,16 @@
|
||||
## Generate markdown for [Slate](https://github.com/tendermint/slate)
|
||||
|
||||
We are using [Slate](https://github.com/tendermint/slate) to power our RPC
|
||||
documentation. If you are changing a comment, make sure to copy the resulting
|
||||
changes to the slate repo and make a PR
|
||||
[there](https://github.com/tendermint/slate) as well. For generating markdown
|
||||
use:
|
||||
documentation. For generating markdown use:
|
||||
|
||||
```shell
|
||||
go get github.com/melekes/godoc2md
|
||||
go get github.com/davecheney/godoc2md
|
||||
|
||||
godoc2md -template rpc/core/doc_template.txt github.com/tendermint/tendermint/rpc/core | grep -v -e "pipe.go" -e "routes.go" -e "dev.go" | sed 's$/src/target$https://github.com/tendermint/tendermint/tree/master/rpc/core$'
|
||||
```
|
||||
|
||||
For more information see the [CI script for building the Slate docs](/scripts/slate.sh)
|
||||
|
||||
## Pagination
|
||||
|
||||
Requests that return multiple items will be paginated to 30 items by default.
|
||||
|
@@ -7,7 +7,7 @@ Tendermint supports the following RPC protocols:
|
||||
* JSONRPC over HTTP
|
||||
* JSONRPC over websockets
|
||||
|
||||
Tendermint RPC is built using [our own RPC library](https://github.com/tendermint/tendermint/tree/master/rpc/lib). Documentation and tests for that library could be found at `tendermint/rpc/lib` directory.
|
||||
Tendermint RPC is built using [our own RPC library](https://github.com/tendermint/tendermint/tree/master/rpc/lib) which contains its own set of documentation and tests.
|
||||
|
||||
## Configuration
|
||||
|
||||
|
77
scripts/slate.sh
Normal file
77
scripts/slate.sh
Normal file
@@ -0,0 +1,77 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
if [ "$CIRCLE_BRANCH" == "" ]; then
|
||||
echo "this script is meant to be run on CircleCI, exiting"
|
||||
echo 1
|
||||
fi
|
||||
|
||||
# check for changes in the `rpc/core` directory
|
||||
did_rpc_change=$(git diff --name-status $CIRCLE_BRANCH origin/master | grep rpc/core)
|
||||
|
||||
if [ "$did_rpc_change" == "" ]; then
|
||||
echo "no changes detected in rpc/core, exiting"
|
||||
exit 0
|
||||
else
|
||||
echo "changes detected in rpc/core, continuing"
|
||||
fi
|
||||
|
||||
# only run this script on changes to rpc/core committed to develop
|
||||
if [ "$CIRCLE_BRANCH" != "master" ]; then
|
||||
echo "the branch being built isn't master, exiting"
|
||||
exit 0
|
||||
else
|
||||
echo "on master, building the RPC docs"
|
||||
fi
|
||||
|
||||
# godoc2md used to convert the go documentation from
|
||||
# `rpc/core` into a markdown file consumed by Slate
|
||||
go get github.com/davecheney/godoc2md
|
||||
|
||||
# slate works via forks, and we'll be committing to
|
||||
# master branch, which will trigger our fork to run
|
||||
# the `./deploy.sh` and publish via the `gh-pages` branch
|
||||
slate_repo=github.com/tendermint/slate
|
||||
slate_path="$GOPATH"/src/"$slate_repo"
|
||||
|
||||
if [ ! -d "$slate_path" ]; then
|
||||
git clone https://"$slate_repo".git $slate_path
|
||||
fi
|
||||
|
||||
# the main file we need to update if rpc/core changed
|
||||
destination="$slate_path"/source/index.html.md
|
||||
|
||||
# we remove it then re-create it with the latest changes
|
||||
rm $destination
|
||||
|
||||
header="---
|
||||
title: RPC Reference
|
||||
|
||||
language_tabs:
|
||||
- shell
|
||||
- go
|
||||
|
||||
toc_footers:
|
||||
- <a href='https://tendermint.com/'>Tendermint</a>
|
||||
- <a href='https://github.com/lord/slate'>Documentation Powered by Slate</a>
|
||||
|
||||
search: true
|
||||
---"
|
||||
|
||||
# write header to the main slate file
|
||||
echo "$header" > "$destination"
|
||||
|
||||
# generate a markdown from the godoc comments, using a template
|
||||
rpc_docs=$(godoc2md -template rpc/core/doc_template.txt github.com/tendermint/tendermint/rpc/core | grep -v -e "pipe.go" -e "routes.go" -e "dev.go" | sed 's$/src/target$https://github.com/tendermint/tendermint/tree/master/rpc/core$')
|
||||
|
||||
# append core RPC docs
|
||||
echo "$rpc_docs" >> "$destination"
|
||||
|
||||
# commit the changes
|
||||
cd $slate_path
|
||||
|
||||
git config --global user.email "github@tendermint.com"
|
||||
git config --global user.name "tenderbot"
|
||||
|
||||
git commit -a -m "Update tendermint RPC docs via CircleCI"
|
||||
git push -q https://${GITHUB_ACCESS_TOKEN}@github.com/tendermint/slate.git master
|
@@ -5,7 +5,6 @@ import (
|
||||
|
||||
fail "github.com/ebuchman/fail-test"
|
||||
abci "github.com/tendermint/abci/types"
|
||||
crypto "github.com/tendermint/go-crypto"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tmlibs/db"
|
||||
@@ -74,7 +73,7 @@ func (blockExec *BlockExecutor) ApplyBlock(s State, blockID types.BlockID, block
|
||||
return s, ErrInvalidBlock(err)
|
||||
}
|
||||
|
||||
abciResponses, err := execBlockOnProxyApp(blockExec.logger, blockExec.proxyApp, block)
|
||||
abciResponses, err := execBlockOnProxyApp(blockExec.logger, blockExec.proxyApp, block, s.LastValidators, blockExec.db)
|
||||
if err != nil {
|
||||
return s, ErrProxyAppConn(err)
|
||||
}
|
||||
@@ -160,7 +159,8 @@ func (blockExec *BlockExecutor) Commit(block *types.Block) ([]byte, error) {
|
||||
|
||||
// Executes block's transactions on proxyAppConn.
|
||||
// Returns a list of transaction results and updates to the validator set
|
||||
func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, block *types.Block) (*ABCIResponses, error) {
|
||||
func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus,
|
||||
block *types.Block, lastValSet *types.ValidatorSet, stateDB dbm.DB) (*ABCIResponses, error) {
|
||||
var validTxs, invalidTxs = 0, 0
|
||||
|
||||
txIndex := 0
|
||||
@@ -186,29 +186,14 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus,
|
||||
}
|
||||
proxyAppConn.SetResponseCallback(proxyCb)
|
||||
|
||||
// determine which validators did not sign last block
|
||||
absentVals := make([]int32, 0)
|
||||
for valI, vote := range block.LastCommit.Precommits {
|
||||
if vote == nil {
|
||||
absentVals = append(absentVals, int32(valI))
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: determine which validators were byzantine
|
||||
byzantineVals := make([]abci.Evidence, len(block.Evidence.Evidence))
|
||||
for i, ev := range block.Evidence.Evidence {
|
||||
byzantineVals[i] = abci.Evidence{
|
||||
PubKey: ev.Address(), // XXX
|
||||
Height: ev.Height(),
|
||||
}
|
||||
}
|
||||
signVals, byzVals := getBeginBlockValidatorInfo(block, lastValSet, stateDB)
|
||||
|
||||
// Begin block
|
||||
_, err := proxyAppConn.BeginBlockSync(abci.RequestBeginBlock{
|
||||
Hash: block.Hash(),
|
||||
Header: types.TM2PB.Header(block.Header),
|
||||
AbsentValidators: absentVals,
|
||||
ByzantineValidators: byzantineVals,
|
||||
Validators: signVals,
|
||||
ByzantineValidators: byzVals,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error("Error in proxyAppConn.BeginBlock", "err", err)
|
||||
@@ -240,12 +225,56 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus,
|
||||
return abciResponses, nil
|
||||
}
|
||||
|
||||
func getBeginBlockValidatorInfo(block *types.Block, lastValSet *types.ValidatorSet, stateDB dbm.DB) ([]abci.SigningValidator, []abci.Evidence) {
|
||||
|
||||
// Sanity check that commit length matches validator set size -
|
||||
// only applies after first block
|
||||
if block.Height > 1 {
|
||||
precommitLen := len(block.LastCommit.Precommits)
|
||||
valSetLen := len(lastValSet.Validators)
|
||||
if precommitLen != valSetLen {
|
||||
// sanity check
|
||||
panic(fmt.Sprintf("precommit length (%d) doesn't match valset length (%d) at height %d\n\n%v\n\n%v",
|
||||
precommitLen, valSetLen, block.Height, block.LastCommit.Precommits, lastValSet.Validators))
|
||||
}
|
||||
}
|
||||
|
||||
// determine which validators did not sign last block.
|
||||
signVals := make([]abci.SigningValidator, len(lastValSet.Validators))
|
||||
for i, val := range lastValSet.Validators {
|
||||
var vote *types.Vote
|
||||
if i < len(block.LastCommit.Precommits) {
|
||||
vote = block.LastCommit.Precommits[i]
|
||||
}
|
||||
val := abci.SigningValidator{
|
||||
Validator: types.TM2PB.Validator(val),
|
||||
SignedLastBlock: vote != nil,
|
||||
}
|
||||
signVals[i] = val
|
||||
}
|
||||
|
||||
byzVals := make([]abci.Evidence, len(block.Evidence.Evidence))
|
||||
for i, ev := range block.Evidence.Evidence {
|
||||
// We need the validator set. We already did this in validateBlock.
|
||||
// TODO: Should we instead cache the valset in the evidence itself and add
|
||||
// `SetValidatorSet()` and `ToABCI` methods ?
|
||||
valset, err := LoadValidators(stateDB, ev.Height())
|
||||
if err != nil {
|
||||
panic(err) // shoudn't happen
|
||||
}
|
||||
byzVals[i] = types.TM2PB.Evidence(ev, valset, block.Time)
|
||||
}
|
||||
|
||||
return signVals, byzVals
|
||||
|
||||
}
|
||||
|
||||
// If more or equal than 1/3 of total voting power changed in one block, then
|
||||
// a light client could never prove the transition externally. See
|
||||
// ./lite/doc.go for details on how a light client tracks validators.
|
||||
func updateValidators(currentSet *types.ValidatorSet, updates []abci.Validator) error {
|
||||
for _, v := range updates {
|
||||
pubkey, err := crypto.PubKeyFromBytes(v.PubKey) // NOTE: expects go-amino encoded pubkey
|
||||
pubkey, err := types.PB2TM.PubKey(v.PubKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -359,8 +388,9 @@ func fireEvents(logger log.Logger, eventBus types.BlockEventPublisher, block *ty
|
||||
|
||||
// ExecCommitBlock executes and commits a block on the proxyApp without validating or mutating the state.
|
||||
// It returns the application root hash (result of abci.Commit).
|
||||
func ExecCommitBlock(appConnConsensus proxy.AppConnConsensus, block *types.Block, logger log.Logger) ([]byte, error) {
|
||||
_, err := execBlockOnProxyApp(logger, appConnConsensus, block)
|
||||
func ExecCommitBlock(appConnConsensus proxy.AppConnConsensus, block *types.Block,
|
||||
logger log.Logger, lastValSet *types.ValidatorSet, stateDB dbm.DB) ([]byte, error) {
|
||||
_, err := execBlockOnProxyApp(logger, appConnConsensus, block, lastValSet, stateDB)
|
||||
if err != nil {
|
||||
logger.Error("Error executing block on proxy app", "height", block.Height, "err", err)
|
||||
return nil, err
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -18,7 +19,6 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
privKey = crypto.GenPrivKeyEd25519FromSecret([]byte("execution_test"))
|
||||
chainID = "execution_chain"
|
||||
testPartSize = 65536
|
||||
nTxsPerBlock = 10
|
||||
@@ -31,7 +31,7 @@ func TestApplyBlock(t *testing.T) {
|
||||
require.Nil(t, err)
|
||||
defer proxyApp.Stop()
|
||||
|
||||
state, stateDB := state(), dbm.NewMemDB()
|
||||
state, stateDB := state(1, 1)
|
||||
|
||||
blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(),
|
||||
types.MockMempool{}, types.MockEvidencePool{})
|
||||
@@ -45,8 +45,8 @@ func TestApplyBlock(t *testing.T) {
|
||||
// TODO check state and mempool
|
||||
}
|
||||
|
||||
// TestBeginBlockAbsentValidators ensures we send absent validators list.
|
||||
func TestBeginBlockAbsentValidators(t *testing.T) {
|
||||
// TestBeginBlockValidators ensures we send absent validators list.
|
||||
func TestBeginBlockValidators(t *testing.T) {
|
||||
app := &testApp{}
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
proxyApp := proxy.NewAppConns(cc, nil)
|
||||
@@ -54,32 +54,46 @@ func TestBeginBlockAbsentValidators(t *testing.T) {
|
||||
require.Nil(t, err)
|
||||
defer proxyApp.Stop()
|
||||
|
||||
state := state()
|
||||
state, stateDB := state(2, 2)
|
||||
|
||||
prevHash := state.LastBlockID.Hash
|
||||
prevParts := types.PartSetHeader{}
|
||||
prevBlockID := types.BlockID{prevHash, prevParts}
|
||||
|
||||
now := time.Now().UTC()
|
||||
vote0 := &types.Vote{ValidatorIndex: 0, Timestamp: now, Type: types.VoteTypePrecommit}
|
||||
vote1 := &types.Vote{ValidatorIndex: 1, Timestamp: now}
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
lastCommitPrecommits []*types.Vote
|
||||
expectedAbsentValidators []int32
|
||||
expectedAbsentValidators []int
|
||||
}{
|
||||
{"none absent", []*types.Vote{{ValidatorIndex: 0, Timestamp: now, Type: types.VoteTypePrecommit}, {ValidatorIndex: 1, Timestamp: now}}, []int32{}},
|
||||
{"one absent", []*types.Vote{{ValidatorIndex: 0, Timestamp: now, Type: types.VoteTypePrecommit}, nil}, []int32{1}},
|
||||
{"multiple absent", []*types.Vote{nil, nil}, []int32{0, 1}},
|
||||
{"none absent", []*types.Vote{vote0, vote1}, []int{}},
|
||||
{"one absent", []*types.Vote{vote0, nil}, []int{1}},
|
||||
{"multiple absent", []*types.Vote{nil, nil}, []int{0, 1}},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
lastCommit := &types.Commit{BlockID: prevBlockID, Precommits: tc.lastCommitPrecommits}
|
||||
|
||||
// block for height 2
|
||||
block, _ := state.MakeBlock(2, makeTxs(2), lastCommit)
|
||||
_, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger())
|
||||
_, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), state.Validators, stateDB)
|
||||
require.Nil(t, err, tc.desc)
|
||||
|
||||
// -> app must receive an index of the absent validator
|
||||
assert.Equal(t, tc.expectedAbsentValidators, app.AbsentValidators, tc.desc)
|
||||
// -> app receives a list of validators with a bool indicating if they signed
|
||||
ctr := 0
|
||||
for i, v := range app.Validators {
|
||||
if ctr < len(tc.expectedAbsentValidators) &&
|
||||
tc.expectedAbsentValidators[ctr] == i {
|
||||
|
||||
assert.False(t, v.SignedLastBlock)
|
||||
ctr++
|
||||
} else {
|
||||
assert.True(t, v.SignedLastBlock)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,35 +106,41 @@ func TestBeginBlockByzantineValidators(t *testing.T) {
|
||||
require.Nil(t, err)
|
||||
defer proxyApp.Stop()
|
||||
|
||||
state := state()
|
||||
state, stateDB := state(2, 12)
|
||||
|
||||
prevHash := state.LastBlockID.Hash
|
||||
prevParts := types.PartSetHeader{}
|
||||
prevBlockID := types.BlockID{prevHash, prevParts}
|
||||
|
||||
height1, idx1, val1 := int64(8), 0, []byte("val1")
|
||||
height2, idx2, val2 := int64(3), 1, []byte("val2")
|
||||
height1, idx1, val1 := int64(8), 0, state.Validators.Validators[0].Address
|
||||
height2, idx2, val2 := int64(3), 1, state.Validators.Validators[1].Address
|
||||
ev1 := types.NewMockGoodEvidence(height1, idx1, val1)
|
||||
ev2 := types.NewMockGoodEvidence(height2, idx2, val2)
|
||||
|
||||
now := time.Now()
|
||||
valSet := state.Validators
|
||||
testCases := []struct {
|
||||
desc string
|
||||
evidence []types.Evidence
|
||||
expectedByzantineValidators []abci.Evidence
|
||||
}{
|
||||
{"none byzantine", []types.Evidence{}, []abci.Evidence{}},
|
||||
{"one byzantine", []types.Evidence{ev1}, []abci.Evidence{{ev1.Address(), ev1.Height()}}},
|
||||
{"one byzantine", []types.Evidence{ev1}, []abci.Evidence{types.TM2PB.Evidence(ev1, valSet, now)}},
|
||||
{"multiple byzantine", []types.Evidence{ev1, ev2}, []abci.Evidence{
|
||||
{ev1.Address(), ev1.Height()},
|
||||
{ev2.Address(), ev2.Height()}}},
|
||||
types.TM2PB.Evidence(ev1, valSet, now),
|
||||
types.TM2PB.Evidence(ev2, valSet, now)}},
|
||||
}
|
||||
|
||||
vote0 := &types.Vote{ValidatorIndex: 0, Timestamp: now, Type: types.VoteTypePrecommit}
|
||||
vote1 := &types.Vote{ValidatorIndex: 1, Timestamp: now}
|
||||
votes := []*types.Vote{vote0, vote1}
|
||||
lastCommit := &types.Commit{BlockID: prevBlockID, Precommits: votes}
|
||||
for _, tc := range testCases {
|
||||
lastCommit := &types.Commit{BlockID: prevBlockID}
|
||||
|
||||
block, _ := state.MakeBlock(10, makeTxs(2), lastCommit)
|
||||
block.Time = now
|
||||
block.Evidence.Evidence = tc.evidence
|
||||
_, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger())
|
||||
_, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), state.Validators, stateDB)
|
||||
require.Nil(t, err, tc.desc)
|
||||
|
||||
// -> app must receive an index of the byzantine validator
|
||||
@@ -138,15 +158,30 @@ func makeTxs(height int64) (txs []types.Tx) {
|
||||
return txs
|
||||
}
|
||||
|
||||
func state() State {
|
||||
func state(nVals, height int) (State, dbm.DB) {
|
||||
vals := make([]types.GenesisValidator, nVals)
|
||||
for i := 0; i < nVals; i++ {
|
||||
secret := []byte(fmt.Sprintf("test%d", i))
|
||||
pk := crypto.GenPrivKeyEd25519FromSecret(secret)
|
||||
vals[i] = types.GenesisValidator{
|
||||
pk.PubKey(), 1000, fmt.Sprintf("test%d", i),
|
||||
}
|
||||
}
|
||||
s, _ := MakeGenesisState(&types.GenesisDoc{
|
||||
ChainID: chainID,
|
||||
Validators: []types.GenesisValidator{
|
||||
{privKey.PubKey(), 10000, "test"},
|
||||
},
|
||||
AppHash: nil,
|
||||
ChainID: chainID,
|
||||
Validators: vals,
|
||||
AppHash: nil,
|
||||
})
|
||||
return s
|
||||
|
||||
// save validators to db for 2 heights
|
||||
stateDB := dbm.NewMemDB()
|
||||
SaveState(stateDB, s)
|
||||
|
||||
for i := 1; i < height; i++ {
|
||||
s.LastBlockHeight += 1
|
||||
SaveState(stateDB, s)
|
||||
}
|
||||
return s, stateDB
|
||||
}
|
||||
|
||||
func makeBlock(state State, height int64) *types.Block {
|
||||
@@ -161,7 +196,7 @@ var _ abci.Application = (*testApp)(nil)
|
||||
type testApp struct {
|
||||
abci.BaseApplication
|
||||
|
||||
AbsentValidators []int32
|
||||
Validators []abci.SigningValidator
|
||||
ByzantineValidators []abci.Evidence
|
||||
}
|
||||
|
||||
@@ -174,7 +209,7 @@ func (app *testApp) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) {
|
||||
}
|
||||
|
||||
func (app *testApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock {
|
||||
app.AbsentValidators = req.AbsentValidators
|
||||
app.Validators = req.Validators
|
||||
app.ByzantineValidators = req.ByzantineValidators
|
||||
return abci.ResponseBeginBlock{}
|
||||
}
|
||||
|
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
abci "github.com/tendermint/abci/types"
|
||||
"github.com/tendermint/go-crypto"
|
||||
crypto "github.com/tendermint/go-crypto"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
dbm "github.com/tendermint/tmlibs/db"
|
||||
|
||||
@@ -78,10 +78,7 @@ func TestABCIResponsesSaveLoad1(t *testing.T) {
|
||||
abciResponses.DeliverTx[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Tags: nil}
|
||||
abciResponses.DeliverTx[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Tags: nil}
|
||||
abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorUpdates: []abci.Validator{
|
||||
{
|
||||
PubKey: crypto.GenPrivKeyEd25519().PubKey().Bytes(),
|
||||
Power: 10,
|
||||
},
|
||||
types.TM2PB.ValidatorFromPubKeyAndPower(crypto.GenPrivKeyEd25519().PubKey(), 10),
|
||||
}}
|
||||
|
||||
saveABCIResponses(stateDB, block.Height, abciResponses)
|
||||
@@ -121,7 +118,7 @@ func TestABCIResponsesSaveLoad2(t *testing.T) {
|
||||
{Code: 383},
|
||||
{Data: []byte("Gotcha!"),
|
||||
Tags: []cmn.KVPair{
|
||||
cmn.KVPair{[]byte("a"), []byte{1}},
|
||||
cmn.KVPair{[]byte("a"), []byte("1")},
|
||||
cmn.KVPair{[]byte("build"), []byte("stuff")},
|
||||
}},
|
||||
},
|
||||
@@ -435,8 +432,8 @@ func makeHeaderPartsResponsesValPubKeyChange(state State, height int64,
|
||||
if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) {
|
||||
abciResponses.EndBlock = &abci.ResponseEndBlock{
|
||||
ValidatorUpdates: []abci.Validator{
|
||||
{val.PubKey.Bytes(), 0},
|
||||
{pubkey.Bytes(), 10},
|
||||
types.TM2PB.ValidatorFromPubKeyAndPower(val.PubKey, 0),
|
||||
types.TM2PB.ValidatorFromPubKeyAndPower(pubkey, 10),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -457,7 +454,7 @@ func makeHeaderPartsResponsesValPowerChange(state State, height int64,
|
||||
if val.VotingPower != power {
|
||||
abciResponses.EndBlock = &abci.ResponseEndBlock{
|
||||
ValidatorUpdates: []abci.Validator{
|
||||
{val.PubKey.Bytes(), power},
|
||||
types.TM2PB.ValidatorFromPubKeyAndPower(val.PubKey, power),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@@ -173,11 +173,12 @@ func LoadValidators(db dbm.DB, height int64) (*types.ValidatorSet, error) {
|
||||
}
|
||||
|
||||
if valInfo.ValidatorSet == nil {
|
||||
valInfo = loadValidatorsInfo(db, valInfo.LastHeightChanged)
|
||||
if valInfo == nil {
|
||||
valInfo2 := loadValidatorsInfo(db, valInfo.LastHeightChanged)
|
||||
if valInfo2 == nil {
|
||||
cmn.PanicSanity(fmt.Sprintf(`Couldn't find validators at height %d as
|
||||
last changed from height %d`, valInfo.LastHeightChanged, height))
|
||||
}
|
||||
valInfo = valInfo2
|
||||
}
|
||||
|
||||
return valInfo.ValidatorSet, nil
|
||||
|
@@ -73,6 +73,9 @@ func validateBlock(stateDB dbm.DB, s State, b *types.Block) error {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Each check requires loading an old validator set.
|
||||
// We should cap the amount of evidence per block
|
||||
// to prevent potential proposer DoS.
|
||||
for _, ev := range b.Evidence.Evidence {
|
||||
if err := VerifyEvidence(stateDB, s, ev); err != nil {
|
||||
return types.NewEvidenceInvalidErr(ev, err)
|
||||
@@ -82,11 +85,11 @@ func validateBlock(stateDB dbm.DB, s State, b *types.Block) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// XXX: What's cheaper (ie. what should be checked first):
|
||||
// evidence internal validity (ie. sig checks) or validator existed (fetch historical val set from db)
|
||||
|
||||
// VerifyEvidence verifies the evidence fully by checking it is internally
|
||||
// consistent and sufficiently recent.
|
||||
// VerifyEvidence verifies the evidence fully by checking:
|
||||
// - it is sufficiently recent (MaxAge)
|
||||
// - it is from a key who was a validator at the given height
|
||||
// - it is internally consistent
|
||||
// - it was properly signed by the alleged equivocator
|
||||
func VerifyEvidence(stateDB dbm.DB, s State, evidence types.Evidence) error {
|
||||
height := s.LastBlockHeight
|
||||
|
||||
@@ -97,10 +100,6 @@ func VerifyEvidence(stateDB dbm.DB, s State, evidence types.Evidence) error {
|
||||
evidence.Height(), height-maxAge)
|
||||
}
|
||||
|
||||
if err := evidence.Verify(s.ChainID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
valset, err := LoadValidators(stateDB, evidence.Height())
|
||||
if err != nil {
|
||||
// TODO: if err is just that we cant find it cuz we pruned, ignore.
|
||||
@@ -108,14 +107,18 @@ func VerifyEvidence(stateDB dbm.DB, s State, evidence types.Evidence) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// The address must have been an active validator at the height
|
||||
// The address must have been an active validator at the height.
|
||||
// NOTE: we will ignore evidence from H if the key was not a validator
|
||||
// at H, even if it is a validator at some nearby H'
|
||||
ev := evidence
|
||||
height, addr, idx := ev.Height(), ev.Address(), ev.Index()
|
||||
valIdx, val := valset.GetByAddress(addr)
|
||||
height, addr := ev.Height(), ev.Address()
|
||||
_, val := valset.GetByAddress(addr)
|
||||
if val == nil {
|
||||
return fmt.Errorf("Address %X was not a validator at height %d", addr, height)
|
||||
} else if idx != valIdx {
|
||||
return fmt.Errorf("Address %X was validator %d at height %d, not %d", addr, valIdx, height, idx)
|
||||
}
|
||||
|
||||
if err := evidence.Verify(s.ChainID, val.PubKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func TestValidateBlock(t *testing.T) {
|
||||
state := state()
|
||||
state, _ := state(1, 1)
|
||||
|
||||
blockExec := NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), nil, nil, nil)
|
||||
|
||||
|
9
test/app/grpc_client.go
Executable file → Normal file
9
test/app/grpc_client.go
Executable file → Normal file
@@ -2,12 +2,12 @@ package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"context"
|
||||
|
||||
"github.com/tendermint/go-wire"
|
||||
"github.com/tendermint/tendermint/rpc/grpc"
|
||||
)
|
||||
|
||||
@@ -32,5 +32,10 @@ func main() {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println(string(wire.JSONBytes(res)))
|
||||
bz, err := json.Marshal(res)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println(string(bz))
|
||||
}
|
||||
|
@@ -67,7 +67,7 @@ func (b *EventBus) UnsubscribeAll(ctx context.Context, subscriber string) error
|
||||
func (b *EventBus) Publish(eventType string, eventData TMEventData) error {
|
||||
// no explicit deadline for publishing events
|
||||
ctx := context.Background()
|
||||
b.pubsub.PublishWithTags(ctx, eventData, tmpubsub.NewTagMap(map[string]interface{}{EventTypeKey: eventType}))
|
||||
b.pubsub.PublishWithTags(ctx, eventData, tmpubsub.NewTagMap(map[string]string{EventTypeKey: eventType}))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -92,7 +92,7 @@ func (b *EventBus) PublishEventTx(event EventDataTx) error {
|
||||
// no explicit deadline for publishing events
|
||||
ctx := context.Background()
|
||||
|
||||
tags := make(map[string]interface{})
|
||||
tags := make(map[string]string)
|
||||
|
||||
// validate and fill tags from tx result
|
||||
for _, tag := range event.Result.Tags {
|
||||
@@ -112,7 +112,7 @@ func (b *EventBus) PublishEventTx(event EventDataTx) error {
|
||||
tags[TxHashKey] = fmt.Sprintf("%X", event.Tx.Hash())
|
||||
|
||||
logIfTagExists(TxHeightKey, tags, b.Logger)
|
||||
tags[TxHeightKey] = event.Height
|
||||
tags[TxHeightKey] = fmt.Sprintf("%d", event.Height)
|
||||
|
||||
b.pubsub.PublishWithTags(ctx, event, tmpubsub.NewTagMap(tags))
|
||||
return nil
|
||||
@@ -160,7 +160,7 @@ func (b *EventBus) PublishEventLock(event EventDataRoundState) error {
|
||||
return b.Publish(EventLock, event)
|
||||
}
|
||||
|
||||
func logIfTagExists(tag string, tags map[string]interface{}, logger log.Logger) {
|
||||
func logIfTagExists(tag string, tags map[string]string, logger log.Logger) {
|
||||
if value, ok := tags[tag]; ok {
|
||||
logger.Error("Found predefined tag (value will be overwritten)", "tag", tag, "value", value)
|
||||
}
|
||||
|
@@ -23,12 +23,12 @@ func TestEventBusPublishEventTx(t *testing.T) {
|
||||
defer eventBus.Stop()
|
||||
|
||||
tx := Tx("foo")
|
||||
result := abci.ResponseDeliverTx{Data: []byte("bar"), Tags: []cmn.KVPair{}, Fee: cmn.KI64Pair{Key: []uint8{}, Value: 0}}
|
||||
result := abci.ResponseDeliverTx{Data: []byte("bar"), Tags: []cmn.KVPair{{[]byte("baz"), []byte("1")}}, Fee: cmn.KI64Pair{Key: []uint8{}, Value: 0}}
|
||||
|
||||
txEventsCh := make(chan interface{})
|
||||
|
||||
// PublishEventTx adds all these 3 tags, so the query below should work
|
||||
query := fmt.Sprintf("tm.event='Tx' AND tx.height=1 AND tx.hash='%X'", tx.Hash())
|
||||
query := fmt.Sprintf("tm.event='Tx' AND tx.height=1 AND tx.hash='%X' AND baz=1", tx.Hash())
|
||||
err = eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query), txEventsCh)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@@ -28,12 +28,11 @@ func (err *ErrEvidenceInvalid) Error() string {
|
||||
|
||||
// Evidence represents any provable malicious activity by a validator
|
||||
type Evidence interface {
|
||||
Height() int64 // height of the equivocation
|
||||
Address() []byte // address of the equivocating validator
|
||||
Index() int // index of the validator in the validator set
|
||||
Hash() []byte // hash of the evidence
|
||||
Verify(chainID string) error // verify the evidence
|
||||
Equal(Evidence) bool // check equality of evidence
|
||||
Height() int64 // height of the equivocation
|
||||
Address() []byte // address of the equivocating validator
|
||||
Hash() []byte // hash of the evidence
|
||||
Verify(chainID string, pubKey crypto.PubKey) error // verify the evidence
|
||||
Equal(Evidence) bool // check equality of evidence
|
||||
|
||||
String() string
|
||||
}
|
||||
@@ -68,11 +67,6 @@ func (dve *DuplicateVoteEvidence) Address() []byte {
|
||||
return dve.PubKey.Address()
|
||||
}
|
||||
|
||||
// Index returns the index of the validator.
|
||||
func (dve *DuplicateVoteEvidence) Index() int {
|
||||
return dve.VoteA.ValidatorIndex
|
||||
}
|
||||
|
||||
// Hash returns the hash of the evidence.
|
||||
func (dve *DuplicateVoteEvidence) Hash() []byte {
|
||||
return aminoHasher(dve).Hash()
|
||||
@@ -80,7 +74,7 @@ func (dve *DuplicateVoteEvidence) Hash() []byte {
|
||||
|
||||
// Verify returns an error if the two votes aren't conflicting.
|
||||
// To be conflicting, they must be from the same validator, for the same H/R/S, but for different blocks.
|
||||
func (dve *DuplicateVoteEvidence) Verify(chainID string) error {
|
||||
func (dve *DuplicateVoteEvidence) Verify(chainID string, pubKey crypto.PubKey) error {
|
||||
// H/R/S must be the same
|
||||
if dve.VoteA.Height != dve.VoteB.Height ||
|
||||
dve.VoteA.Round != dve.VoteB.Round ||
|
||||
@@ -92,7 +86,8 @@ func (dve *DuplicateVoteEvidence) Verify(chainID string) error {
|
||||
if !bytes.Equal(dve.VoteA.ValidatorAddress, dve.VoteB.ValidatorAddress) {
|
||||
return fmt.Errorf("DuplicateVoteEvidence Error: Validator addresses do not match. Got %X and %X", dve.VoteA.ValidatorAddress, dve.VoteB.ValidatorAddress)
|
||||
}
|
||||
// XXX: Should we enforce index is the same ?
|
||||
|
||||
// Index must be the same
|
||||
if dve.VoteA.ValidatorIndex != dve.VoteB.ValidatorIndex {
|
||||
return fmt.Errorf("DuplicateVoteEvidence Error: Validator indices do not match. Got %d and %d", dve.VoteA.ValidatorIndex, dve.VoteB.ValidatorIndex)
|
||||
}
|
||||
@@ -102,11 +97,18 @@ func (dve *DuplicateVoteEvidence) Verify(chainID string) error {
|
||||
return fmt.Errorf("DuplicateVoteEvidence Error: BlockIDs are the same (%v) - not a real duplicate vote", dve.VoteA.BlockID)
|
||||
}
|
||||
|
||||
// pubkey must match address (this should already be true, sanity check)
|
||||
addr := dve.VoteA.ValidatorAddress
|
||||
if !bytes.Equal(pubKey.Address(), addr) {
|
||||
return fmt.Errorf("DuplicateVoteEvidence FAILED SANITY CHECK - address (%X) doesn't match pubkey (%v - %X)",
|
||||
addr, pubKey, pubKey.Address())
|
||||
}
|
||||
|
||||
// Signatures must be valid
|
||||
if !dve.PubKey.VerifyBytes(dve.VoteA.SignBytes(chainID), dve.VoteA.Signature) {
|
||||
if !pubKey.VerifyBytes(dve.VoteA.SignBytes(chainID), dve.VoteA.Signature) {
|
||||
return fmt.Errorf("DuplicateVoteEvidence Error verifying VoteA: %v", ErrVoteInvalidSignature)
|
||||
}
|
||||
if !dve.PubKey.VerifyBytes(dve.VoteB.SignBytes(chainID), dve.VoteB.Signature) {
|
||||
if !pubKey.VerifyBytes(dve.VoteB.SignBytes(chainID), dve.VoteB.Signature) {
|
||||
return fmt.Errorf("DuplicateVoteEvidence Error verifying VoteB: %v", ErrVoteInvalidSignature)
|
||||
}
|
||||
|
||||
@@ -131,29 +133,26 @@ func (dve *DuplicateVoteEvidence) Equal(ev Evidence) bool {
|
||||
type MockGoodEvidence struct {
|
||||
Height_ int64
|
||||
Address_ []byte
|
||||
Index_ int
|
||||
}
|
||||
|
||||
// UNSTABLE
|
||||
func NewMockGoodEvidence(height int64, index int, address []byte) MockGoodEvidence {
|
||||
return MockGoodEvidence{height, address, index}
|
||||
func NewMockGoodEvidence(height int64, idx int, address []byte) MockGoodEvidence {
|
||||
return MockGoodEvidence{height, address}
|
||||
}
|
||||
|
||||
func (e MockGoodEvidence) Height() int64 { return e.Height_ }
|
||||
func (e MockGoodEvidence) Address() []byte { return e.Address_ }
|
||||
func (e MockGoodEvidence) Index() int { return e.Index_ }
|
||||
func (e MockGoodEvidence) Hash() []byte {
|
||||
return []byte(fmt.Sprintf("%d-%d", e.Height_, e.Index_))
|
||||
return []byte(fmt.Sprintf("%d-%x", e.Height_, e.Address_))
|
||||
}
|
||||
func (e MockGoodEvidence) Verify(chainID string) error { return nil }
|
||||
func (e MockGoodEvidence) Verify(chainID string, pubKey crypto.PubKey) error { return nil }
|
||||
func (e MockGoodEvidence) Equal(ev Evidence) bool {
|
||||
e2 := ev.(MockGoodEvidence)
|
||||
return e.Height_ == e2.Height_ &&
|
||||
bytes.Equal(e.Address_, e2.Address_) &&
|
||||
e.Index_ == e2.Index_
|
||||
bytes.Equal(e.Address_, e2.Address_)
|
||||
}
|
||||
func (e MockGoodEvidence) String() string {
|
||||
return fmt.Sprintf("GoodEvidence: %d/%s/%d", e.Height_, e.Address_, e.Index_)
|
||||
return fmt.Sprintf("GoodEvidence: %d/%s", e.Height_, e.Address_)
|
||||
}
|
||||
|
||||
// UNSTABLE
|
||||
@@ -161,15 +160,16 @@ type MockBadEvidence struct {
|
||||
MockGoodEvidence
|
||||
}
|
||||
|
||||
func (e MockBadEvidence) Verify(chainID string) error { return fmt.Errorf("MockBadEvidence") }
|
||||
func (e MockBadEvidence) Verify(chainID string, pubKey crypto.PubKey) error {
|
||||
return fmt.Errorf("MockBadEvidence")
|
||||
}
|
||||
func (e MockBadEvidence) Equal(ev Evidence) bool {
|
||||
e2 := ev.(MockBadEvidence)
|
||||
return e.Height_ == e2.Height_ &&
|
||||
bytes.Equal(e.Address_, e2.Address_) &&
|
||||
e.Index_ == e2.Index_
|
||||
bytes.Equal(e.Address_, e2.Address_)
|
||||
}
|
||||
func (e MockBadEvidence) String() string {
|
||||
return fmt.Sprintf("BadEvidence: %d/%s/%d", e.Height_, e.Address_, e.Index_)
|
||||
return fmt.Sprintf("BadEvidence: %d/%s", e.Height_, e.Address_)
|
||||
}
|
||||
|
||||
//-------------------------------------------
|
||||
|
@@ -59,17 +59,16 @@ func TestEvidence(t *testing.T) {
|
||||
{vote1, badVote, false}, // signed by wrong key
|
||||
}
|
||||
|
||||
pubKey := val.GetPubKey()
|
||||
for _, c := range cases {
|
||||
ev := &DuplicateVoteEvidence{
|
||||
PubKey: val.GetPubKey(),
|
||||
VoteA: c.vote1,
|
||||
VoteB: c.vote2,
|
||||
VoteA: c.vote1,
|
||||
VoteB: c.vote2,
|
||||
}
|
||||
if c.valid {
|
||||
assert.Nil(t, ev.Verify(chainID), "evidence should be valid")
|
||||
assert.Nil(t, ev.Verify(chainID, pubKey), "evidence should be valid")
|
||||
} else {
|
||||
assert.NotNil(t, ev.Verify(chainID), "evidence should be invalid")
|
||||
assert.NotNil(t, ev.Verify(chainID, pubKey), "evidence should be invalid")
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@@ -1,71 +1,166 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"github.com/tendermint/abci/types"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
abci "github.com/tendermint/abci/types"
|
||||
crypto "github.com/tendermint/go-crypto"
|
||||
)
|
||||
|
||||
// TM2PB is used for converting Tendermint types to protobuf types.
|
||||
//-------------------------------------------------------
|
||||
// Use strings to distinguish types in ABCI messages
|
||||
|
||||
const (
|
||||
ABCIEvidenceTypeDuplicateVote = "duplicate/vote"
|
||||
ABCIEvidenceTypeMockGood = "mock/good"
|
||||
)
|
||||
|
||||
const (
|
||||
ABCIPubKeyTypeEd25519 = "ed25519"
|
||||
ABCIPubKeyTypeSecp256k1 = "secp256k1"
|
||||
)
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
// TM2PB is used for converting Tendermint ABCI to protobuf ABCI.
|
||||
// UNSTABLE
|
||||
var TM2PB = tm2pb{}
|
||||
|
||||
type tm2pb struct{}
|
||||
|
||||
func (tm2pb) Header(header *Header) types.Header {
|
||||
return types.Header{
|
||||
ChainID: header.ChainID,
|
||||
Height: header.Height,
|
||||
Time: header.Time.Unix(),
|
||||
NumTxs: int32(header.NumTxs), // XXX: overflow
|
||||
LastBlockID: TM2PB.BlockID(header.LastBlockID),
|
||||
LastCommitHash: header.LastCommitHash,
|
||||
DataHash: header.DataHash,
|
||||
AppHash: header.AppHash,
|
||||
func (tm2pb) Header(header *Header) abci.Header {
|
||||
return abci.Header{
|
||||
ChainID: header.ChainID,
|
||||
Height: header.Height,
|
||||
Time: header.Time.Unix(),
|
||||
NumTxs: int32(header.NumTxs), // XXX: overflow
|
||||
LastBlockHash: header.LastBlockID.Hash,
|
||||
AppHash: header.AppHash,
|
||||
}
|
||||
}
|
||||
|
||||
func (tm2pb) BlockID(blockID BlockID) types.BlockID {
|
||||
return types.BlockID{
|
||||
Hash: blockID.Hash,
|
||||
Parts: TM2PB.PartSetHeader(blockID.PartsHeader),
|
||||
}
|
||||
}
|
||||
|
||||
func (tm2pb) PartSetHeader(partSetHeader PartSetHeader) types.PartSetHeader {
|
||||
return types.PartSetHeader{
|
||||
Total: int32(partSetHeader.Total), // XXX: overflow
|
||||
Hash: partSetHeader.Hash,
|
||||
}
|
||||
}
|
||||
|
||||
func (tm2pb) Validator(val *Validator) types.Validator {
|
||||
return types.Validator{
|
||||
PubKey: val.PubKey.Bytes(),
|
||||
func (tm2pb) Validator(val *Validator) abci.Validator {
|
||||
return abci.Validator{
|
||||
PubKey: TM2PB.PubKey(val.PubKey),
|
||||
Power: val.VotingPower,
|
||||
}
|
||||
}
|
||||
|
||||
func (tm2pb) Validators(vals *ValidatorSet) []types.Validator {
|
||||
validators := make([]types.Validator, len(vals.Validators))
|
||||
func (tm2pb) PubKey(pubKey crypto.PubKey) abci.PubKey {
|
||||
switch pk := pubKey.(type) {
|
||||
case crypto.PubKeyEd25519:
|
||||
return abci.PubKey{
|
||||
Type: "ed25519",
|
||||
Data: pk[:],
|
||||
}
|
||||
case crypto.PubKeySecp256k1:
|
||||
return abci.PubKey{
|
||||
Type: "secp256k1",
|
||||
Data: pk[:],
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown pubkey type: %v %v", pubKey, reflect.TypeOf(pubKey)))
|
||||
}
|
||||
}
|
||||
|
||||
func (tm2pb) Validators(vals *ValidatorSet) []abci.Validator {
|
||||
validators := make([]abci.Validator, len(vals.Validators))
|
||||
for i, val := range vals.Validators {
|
||||
validators[i] = TM2PB.Validator(val)
|
||||
}
|
||||
return validators
|
||||
}
|
||||
|
||||
func (tm2pb) ConsensusParams(params *ConsensusParams) *types.ConsensusParams {
|
||||
return &types.ConsensusParams{
|
||||
BlockSize: &types.BlockSize{
|
||||
func (tm2pb) ConsensusParams(params *ConsensusParams) *abci.ConsensusParams {
|
||||
return &abci.ConsensusParams{
|
||||
BlockSize: &abci.BlockSize{
|
||||
|
||||
MaxBytes: int32(params.BlockSize.MaxBytes),
|
||||
MaxTxs: int32(params.BlockSize.MaxTxs),
|
||||
MaxGas: params.BlockSize.MaxGas,
|
||||
},
|
||||
TxSize: &types.TxSize{
|
||||
TxSize: &abci.TxSize{
|
||||
MaxBytes: int32(params.TxSize.MaxBytes),
|
||||
MaxGas: params.TxSize.MaxGas,
|
||||
},
|
||||
BlockGossip: &types.BlockGossip{
|
||||
BlockGossip: &abci.BlockGossip{
|
||||
BlockPartSizeBytes: int32(params.BlockGossip.BlockPartSizeBytes),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ABCI Evidence includes information from the past that's not included in the evidence itself
|
||||
// so Evidence types stays compact.
|
||||
func (tm2pb) Evidence(ev Evidence, valSet *ValidatorSet, evTime time.Time) abci.Evidence {
|
||||
_, val := valSet.GetByAddress(ev.Address())
|
||||
if val == nil {
|
||||
// should already have checked this
|
||||
panic(val)
|
||||
}
|
||||
|
||||
abciEvidence := abci.Evidence{
|
||||
Validator: abci.Validator{
|
||||
Address: ev.Address(),
|
||||
PubKey: TM2PB.PubKey(val.PubKey),
|
||||
Power: val.VotingPower,
|
||||
},
|
||||
Height: ev.Height(),
|
||||
Time: evTime.Unix(),
|
||||
TotalVotingPower: valSet.TotalVotingPower(),
|
||||
}
|
||||
|
||||
// set type
|
||||
switch ev.(type) {
|
||||
case *DuplicateVoteEvidence:
|
||||
abciEvidence.Type = ABCIEvidenceTypeDuplicateVote
|
||||
case *MockGoodEvidence, MockGoodEvidence:
|
||||
abciEvidence.Type = ABCIEvidenceTypeMockGood
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown evidence type: %v %v", ev, reflect.TypeOf(ev)))
|
||||
}
|
||||
|
||||
return abciEvidence
|
||||
}
|
||||
|
||||
func (tm2pb) ValidatorFromPubKeyAndPower(pubkey crypto.PubKey, power int64) abci.Validator {
|
||||
pubkeyABCI := TM2PB.PubKey(pubkey)
|
||||
return abci.Validator{
|
||||
Address: pubkey.Address(),
|
||||
PubKey: pubkeyABCI,
|
||||
Power: power,
|
||||
}
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------------
|
||||
|
||||
// PB2TM is used for converting protobuf ABCI to Tendermint ABCI.
|
||||
// UNSTABLE
|
||||
var PB2TM = pb2tm{}
|
||||
|
||||
type pb2tm struct{}
|
||||
|
||||
func (pb2tm) PubKey(pubKey abci.PubKey) (crypto.PubKey, error) {
|
||||
// TODO: define these in go-crypto and use them
|
||||
sizeEd := 32
|
||||
sizeSecp := 33
|
||||
switch pubKey.Type {
|
||||
case ABCIPubKeyTypeEd25519:
|
||||
if len(pubKey.Data) != sizeEd {
|
||||
return nil, fmt.Errorf("Invalid size for PubKeyEd25519. Got %d, expected %d", len(pubKey.Data), sizeEd)
|
||||
}
|
||||
var pk crypto.PubKeyEd25519
|
||||
copy(pk[:], pubKey.Data)
|
||||
return pk, nil
|
||||
case ABCIPubKeyTypeSecp256k1:
|
||||
if len(pubKey.Data) != sizeSecp {
|
||||
return nil, fmt.Errorf("Invalid size for PubKeyEd25519. Got %d, expected %d", len(pubKey.Data), sizeSecp)
|
||||
}
|
||||
var pk crypto.PubKeySecp256k1
|
||||
copy(pk[:], pubKey.Data)
|
||||
return pk, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown pubkey type %v", pubKey.Type)
|
||||
}
|
||||
}
|
||||
|
@@ -4,13 +4,13 @@ package version
|
||||
const (
|
||||
Maj = "0"
|
||||
Min = "19"
|
||||
Fix = "6"
|
||||
Fix = "7"
|
||||
)
|
||||
|
||||
var (
|
||||
// Version is the current version of Tendermint
|
||||
// Must be a string because scripts like dist.sh read this file.
|
||||
Version = "0.19.6-dev"
|
||||
Version = "0.19.7-dev"
|
||||
|
||||
// GitCommit is the current HEAD set using ldflags.
|
||||
GitCommit string
|
||||
|
Reference in New Issue
Block a user