Merge pull request #1474 from tendermint/release/v0.19.0

Release/v0.19.0
This commit is contained in:
Ethan Buchman 2018-04-17 10:59:36 +02:00 committed by GitHub
commit a2930cd723
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
196 changed files with 3884 additions and 4297 deletions

View File

@ -24,6 +24,24 @@ BUG FIXES:
- Graceful handling/recovery for apps that have non-determinism or fail to halt - Graceful handling/recovery for apps that have non-determinism or fail to halt
- Graceful handling/recovery for violations of safety, or liveness - Graceful handling/recovery for violations of safety, or liveness
## 0.19.0 (April 13th, 2018)
BREAKING:
- [cmd] improved `testnet` command; now it can fill in `persistent_peers` for you in the config file and much more (see `tendermint testnet --help` for details)
- [cmd] `show_node_id` now returns an error if there is no node key
- [rpc]: changed the output format for the `/status` endpoint (see https://godoc.org/github.com/tendermint/tendermint/rpc/core#Status)
Upgrade from go-wire to go-amino. This is a sweeping change that breaks everything that is
serialized to disk or over the network.
See github.com/tendermint/go-amino for details on the new format.
See `scripts/wire2amino.go` for a tool to upgrade
genesis/priv_validator/node_key JSON files.
FEATURES:
- [cmd] added `gen_node_key` command
## 0.18.0 (April 6th, 2018) ## 0.18.0 (April 6th, 2018)
BREAKING: BREAKING:

40
Gopkg.lock generated
View File

@ -159,7 +159,7 @@
branch = "master" branch = "master"
name = "github.com/rcrowley/go-metrics" name = "github.com/rcrowley/go-metrics"
packages = ["."] packages = ["."]
revision = "8732c616f52954686704c8645fe1a9d59e9df7c1" revision = "d932a24a8ccb8fcadc993e5c6c58f93dac168294"
[[projects]] [[projects]]
name = "github.com/spf13/afero" name = "github.com/spf13/afero"
@ -191,8 +191,8 @@
[[projects]] [[projects]]
name = "github.com/spf13/pflag" name = "github.com/spf13/pflag"
packages = ["."] packages = ["."]
revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66" revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
version = "v1.0.0" version = "v1.0.1"
[[projects]] [[projects]]
name = "github.com/spf13/viper" name = "github.com/spf13/viper"
@ -238,8 +238,8 @@
"server", "server",
"types" "types"
] ]
revision = "46686763ba8ea595ede16530ed4a40fb38f49f94" revision = "78a8905690ef54f9d57e3b2b0ee7ad3a04ef3f1f"
version = "v0.10.2" version = "v0.10.3"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -251,20 +251,22 @@
] ]
revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057" revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057"
[[projects]]
name = "github.com/tendermint/go-amino"
packages = ["."]
revision = "42246108ff925a457fb709475070a03dfd3e2b5c"
version = "0.9.6"
[[projects]] [[projects]]
name = "github.com/tendermint/go-crypto" name = "github.com/tendermint/go-crypto"
packages = ["."] packages = ["."]
revision = "c3e19f3ea26f5c3357e0bcbb799b0761ef923755" revision = "915416979bf70efa4bcbf1c6cd5d64c5fff9fc19"
version = "v0.5.0" version = "v0.6.2"
[[projects]] [[projects]]
name = "github.com/tendermint/go-wire" name = "github.com/tendermint/go-wire"
packages = [ packages = ["."]
".",
"data"
]
revision = "fa721242b042ecd4c6ed1a934ee740db4f74e45c" revision = "fa721242b042ecd4c6ed1a934ee740db4f74e45c"
source = "github.com/tendermint/go-amino"
version = "v0.7.3" version = "v0.7.3"
[[projects]] [[projects]]
@ -283,8 +285,8 @@
"pubsub/query", "pubsub/query",
"test" "test"
] ]
revision = "2e24b64fc121dcdf1cabceab8dc2f7257675483c" revision = "97e1f1ad3f510048929a51475811a18686c894df"
version = "0.8.1" version = "0.8.2-rc0"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -299,7 +301,7 @@
"ripemd160", "ripemd160",
"salsa20/salsa" "salsa20/salsa"
] ]
revision = "b2aa35443fbc700ab74c586ae79b81c171851023" revision = "d6449816ce06963d9d136eee5a56fca5b0616e7e"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -313,13 +315,13 @@
"lex/httplex", "lex/httplex",
"trace" "trace"
] ]
revision = "b3c676e531a6dc479fa1b35ac961c13f5e2b4d2e" revision = "61147c48b25b599e5b561d2e9c4f3e1ef489ca41"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/sys" name = "golang.org/x/sys"
packages = ["unix"] packages = ["unix"]
revision = "1d206c9fa8975fb4cf00df1dc8bf3283dc24ba0e" revision = "3b87a42e500a6dc65dae1a55d0b641295971163e"
[[projects]] [[projects]]
name = "golang.org/x/text" name = "golang.org/x/text"
@ -346,7 +348,7 @@
branch = "master" branch = "master"
name = "google.golang.org/genproto" name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"] packages = ["googleapis/rpc/status"]
revision = "35de2414665fc36f56b72d982c5af480d86de5ab" revision = "51d0944304c3cbce4afe9e5247e21100037bff78"
[[projects]] [[projects]]
name = "google.golang.org/grpc" name = "google.golang.org/grpc"
@ -381,6 +383,6 @@
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
inputs-digest = "b7c02a311569ec5fe2197614444fb231ea60f3e65a11a20e318421f1752054d7" inputs-digest = "e70f8692c825e80ae8510546e297840b9560d00e11b2272749a55cc2ffd147f0"
solver-name = "gps-cdcl" solver-name = "gps-cdcl"
solver-version = 1 solver-version = 1

View File

@ -26,12 +26,12 @@
[[constraint]] [[constraint]]
branch = "master"
name = "github.com/ebuchman/fail-test" name = "github.com/ebuchman/fail-test"
branch = "master"
[[constraint]] [[constraint]]
branch = "master"
name = "github.com/fortytw2/leaktest" name = "github.com/fortytw2/leaktest"
branch = "master"
[[constraint]] [[constraint]]
name = "github.com/go-kit/kit" name = "github.com/go-kit/kit"
@ -54,8 +54,8 @@
version = "~0.8.0" version = "~0.8.0"
[[constraint]] [[constraint]]
branch = "master"
name = "github.com/rcrowley/go-metrics" name = "github.com/rcrowley/go-metrics"
branch = "master"
[[constraint]] [[constraint]]
name = "github.com/spf13/cobra" name = "github.com/spf13/cobra"
@ -71,22 +71,19 @@
[[constraint]] [[constraint]]
name = "github.com/tendermint/abci" name = "github.com/tendermint/abci"
version = "~0.10.2" version = "~0.10.3"
[[constraint]] [[constraint]]
name = "github.com/tendermint/go-crypto" name = "github.com/tendermint/go-crypto"
version = "~0.5.0" version = "~0.6.2"
[[constraint]] [[constraint]]
name = "github.com/tendermint/go-wire" name = "github.com/tendermint/go-amino"
source = "github.com/tendermint/go-amino" version = "~0.9.6"
version = "~0.7.3"
[[override]] [[constraint]]
# [[constraint]]
name = "github.com/tendermint/tmlibs" name = "github.com/tendermint/tmlibs"
version = "~0.8.1" version = "~0.8.2-rc0"
# branch = "develop"
[[constraint]] [[constraint]]
name = "google.golang.org/grpc" name = "google.golang.org/grpc"

View File

@ -178,7 +178,25 @@ metalinter_all:
@echo "--> Running linter (all)" @echo "--> Running linter (all)"
gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./... gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./...
###########################################################
### Local testnet using docker
# Build linux binary on other platforms
build-linux:
GOOS=linux GOARCH=amd64 $(MAKE) build
# Run a 4-node testnet locally
docker-start:
@echo "Wait until 'Attaching to node0, node1, node2, node3' message appears"
@if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v `pwd`/build:/tendermint:Z tendermint/localnode testnet --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi
docker-compose up
# Stop testnet
docker-stop:
docker-compose down
# To avoid unintended conflicts with file names, always add to .PHONY # To avoid unintended conflicts with file names, always add to .PHONY
# unless there is a reason not to. # unless there is a reason not to.
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html # https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
.PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt .PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt build-linux docker-start docker-stop

View File

@ -4,8 +4,8 @@ import (
"testing" "testing"
"time" "time"
"github.com/tendermint/go-amino"
"github.com/tendermint/go-crypto" "github.com/tendermint/go-crypto"
"github.com/tendermint/go-wire"
proto "github.com/tendermint/tendermint/benchmarks/proto" proto "github.com/tendermint/tendermint/benchmarks/proto"
"github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p"
@ -14,26 +14,35 @@ import (
func BenchmarkEncodeStatusWire(b *testing.B) { func BenchmarkEncodeStatusWire(b *testing.B) {
b.StopTimer() b.StopTimer()
pubKey := crypto.GenPrivKeyEd25519().PubKey() cdc := amino.NewCodec()
ctypes.RegisterAmino(cdc)
nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
status := &ctypes.ResultStatus{ status := &ctypes.ResultStatus{
NodeInfo: p2p.NodeInfo{ NodeInfo: p2p.NodeInfo{
PubKey: pubKey, ID: nodeKey.ID(),
Moniker: "SOMENAME", Moniker: "SOMENAME",
Network: "SOMENAME", Network: "SOMENAME",
ListenAddr: "SOMEADDR", ListenAddr: "SOMEADDR",
Version: "SOMEVER", Version: "SOMEVER",
Other: []string{"SOMESTRING", "OTHERSTRING"}, Other: []string{"SOMESTRING", "OTHERSTRING"},
}, },
PubKey: pubKey, SyncInfo: ctypes.SyncInfo{
LatestBlockHash: []byte("SOMEBYTES"), LatestBlockHash: []byte("SOMEBYTES"),
LatestBlockHeight: 123, LatestBlockHeight: 123,
LatestBlockTime: time.Unix(0, 1234), LatestBlockTime: time.Unix(0, 1234),
},
ValidatorInfo: ctypes.ValidatorInfo{
PubKey: nodeKey.PubKey(),
},
} }
b.StartTimer() b.StartTimer()
counter := 0 counter := 0
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
jsonBytes := wire.JSONBytes(status) jsonBytes, err := cdc.MarshalJSON(status)
if err != nil {
panic(err)
}
counter += len(jsonBytes) counter += len(jsonBytes)
} }
@ -41,9 +50,11 @@ func BenchmarkEncodeStatusWire(b *testing.B) {
func BenchmarkEncodeNodeInfoWire(b *testing.B) { func BenchmarkEncodeNodeInfoWire(b *testing.B) {
b.StopTimer() b.StopTimer()
pubKey := crypto.GenPrivKeyEd25519().PubKey() cdc := amino.NewCodec()
ctypes.RegisterAmino(cdc)
nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
nodeInfo := p2p.NodeInfo{ nodeInfo := p2p.NodeInfo{
PubKey: pubKey, ID: nodeKey.ID(),
Moniker: "SOMENAME", Moniker: "SOMENAME",
Network: "SOMENAME", Network: "SOMENAME",
ListenAddr: "SOMEADDR", ListenAddr: "SOMEADDR",
@ -54,16 +65,21 @@ func BenchmarkEncodeNodeInfoWire(b *testing.B) {
counter := 0 counter := 0
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
jsonBytes := wire.JSONBytes(nodeInfo) jsonBytes, err := cdc.MarshalJSON(nodeInfo)
if err != nil {
panic(err)
}
counter += len(jsonBytes) counter += len(jsonBytes)
} }
} }
func BenchmarkEncodeNodeInfoBinary(b *testing.B) { func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
b.StopTimer() b.StopTimer()
pubKey := crypto.GenPrivKeyEd25519().PubKey() cdc := amino.NewCodec()
ctypes.RegisterAmino(cdc)
nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
nodeInfo := p2p.NodeInfo{ nodeInfo := p2p.NodeInfo{
PubKey: pubKey, ID: nodeKey.ID(),
Moniker: "SOMENAME", Moniker: "SOMENAME",
Network: "SOMENAME", Network: "SOMENAME",
ListenAddr: "SOMEADDR", ListenAddr: "SOMEADDR",
@ -74,7 +90,7 @@ func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
counter := 0 counter := 0
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
jsonBytes := wire.BinaryBytes(nodeInfo) jsonBytes := cdc.MustMarshalBinaryBare(nodeInfo)
counter += len(jsonBytes) counter += len(jsonBytes)
} }
@ -82,15 +98,20 @@ func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
func BenchmarkEncodeNodeInfoProto(b *testing.B) { func BenchmarkEncodeNodeInfoProto(b *testing.B) {
b.StopTimer() b.StopTimer()
pubKey := crypto.GenPrivKeyEd25519().PubKey().Unwrap().(crypto.PubKeyEd25519) nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
pubKey2 := &proto.PubKey{Ed25519: &proto.PubKeyEd25519{Bytes: pubKey[:]}} nodeID := string(nodeKey.ID())
someName := "SOMENAME"
someAddr := "SOMEADDR"
someVer := "SOMEVER"
someString := "SOMESTRING"
otherString := "OTHERSTRING"
nodeInfo := proto.NodeInfo{ nodeInfo := proto.NodeInfo{
PubKey: pubKey2, Id: &proto.ID{Id: &nodeID},
Moniker: "SOMENAME", Moniker: &someName,
Network: "SOMENAME", Network: &someName,
ListenAddr: "SOMEADDR", ListenAddr: &someAddr,
Version: "SOMEVER", Version: &someVer,
Other: []string{"SOMESTRING", "OTHERSTRING"}, Other: []string{someString, otherString},
} }
b.StartTimer() b.StartTimer()

File diff suppressed because it is too large Load Diff

View File

@ -7,7 +7,7 @@ message ResultStatus {
} }
message NodeInfo { message NodeInfo {
required PubKey pubKey = 1; required ID id = 1;
required string moniker = 2; required string moniker = 2;
required string network = 3; required string network = 3;
required string remoteAddr = 4; required string remoteAddr = 4;
@ -16,6 +16,10 @@ message NodeInfo {
repeated string other = 7; repeated string other = 7;
} }
message ID {
required string id = 1;
}
message PubKey { message PubKey {
optional PubKeyEd25519 ed25519 = 1; optional PubKeyEd25519 ed25519 = 1;
} }

View File

@ -1,21 +1,16 @@
package blockchain package blockchain
import ( import (
"bytes"
"errors"
"fmt" "fmt"
"reflect" "reflect"
"sync"
"time" "time"
wire "github.com/tendermint/go-wire" "github.com/tendermint/go-amino"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
"github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p"
sm "github.com/tendermint/tendermint/state" sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
) )
const ( const (
@ -31,6 +26,13 @@ const (
statusUpdateIntervalSeconds = 10 statusUpdateIntervalSeconds = 10
// check if we should switch to consensus reactor // check if we should switch to consensus reactor
switchToConsensusIntervalSeconds = 1 switchToConsensusIntervalSeconds = 1
// NOTE: keep up to date with bcBlockResponseMessage
bcBlockResponseMessagePrefixSize = 4
bcBlockResponseMessageFieldKeySize = 1
maxMsgSize = types.MaxBlockSizeBytes +
bcBlockResponseMessagePrefixSize +
bcBlockResponseMessageFieldKeySize
) )
type consensusReactor interface { type consensusReactor interface {
@ -52,9 +54,6 @@ func (e peerError) Error() string {
type BlockchainReactor struct { type BlockchainReactor struct {
p2p.BaseReactor p2p.BaseReactor
mtx sync.Mutex
params types.ConsensusParams
// immutable // immutable
initialState sm.State initialState sm.State
@ -87,7 +86,6 @@ func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *Bl
) )
bcR := &BlockchainReactor{ bcR := &BlockchainReactor{
params: state.ConsensusParams,
initialState: state, initialState: state,
blockExec: blockExec, blockExec: blockExec,
store: store, store: store,
@ -131,17 +129,19 @@ func (bcR *BlockchainReactor) OnStop() {
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{ return []*p2p.ChannelDescriptor{
{ {
ID: BlockchainChannel, ID: BlockchainChannel,
Priority: 10, Priority: 10,
SendQueueCapacity: 1000, SendQueueCapacity: 1000,
RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: maxMsgSize,
}, },
} }
} }
// AddPeer implements Reactor by sending our state to peer. // AddPeer implements Reactor by sending our state to peer.
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) { func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
if !peer.Send(BlockchainChannel, msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}}) { if !peer.Send(BlockchainChannel, msgBytes) {
// doing nothing, will try later in `poolRoutine` // doing nothing, will try later in `poolRoutine`
} }
// peer is added to the pool once we receive the first // peer is added to the pool once we receive the first
@ -162,20 +162,19 @@ func (bcR *BlockchainReactor) respondToPeer(msg *bcBlockRequestMessage,
block := bcR.store.LoadBlock(msg.Height) block := bcR.store.LoadBlock(msg.Height)
if block != nil { if block != nil {
msg := &bcBlockResponseMessage{Block: block} msgBytes := cdc.MustMarshalBinaryBare(&bcBlockResponseMessage{Block: block})
return src.TrySend(BlockchainChannel, struct{ BlockchainMessage }{msg}) return src.TrySend(BlockchainChannel, msgBytes)
} }
bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height) bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height)
return src.TrySend(BlockchainChannel, struct{ BlockchainMessage }{ msgBytes := cdc.MustMarshalBinaryBare(&bcNoBlockResponseMessage{Height: msg.Height})
&bcNoBlockResponseMessage{Height: msg.Height}, return src.TrySend(BlockchainChannel, msgBytes)
})
} }
// Receive implements Reactor by handling 4 types of messages (look below). // Receive implements Reactor by handling 4 types of messages (look below).
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
_, msg, err := DecodeMessage(msgBytes, bcR.maxMsgSize()) msg, err := DecodeMessage(msgBytes)
if err != nil { if err != nil {
bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
bcR.Switch.StopPeerForError(src, err) bcR.Switch.StopPeerForError(src, err)
@ -194,8 +193,8 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
bcR.pool.AddBlock(src.ID(), msg.Block, len(msgBytes)) bcR.pool.AddBlock(src.ID(), msg.Block, len(msgBytes))
case *bcStatusRequestMessage: case *bcStatusRequestMessage:
// Send peer our state. // Send peer our state.
queued := src.TrySend(BlockchainChannel, msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}}) queued := src.TrySend(BlockchainChannel, msgBytes)
if !queued { if !queued {
// sorry // sorry
} }
@ -207,21 +206,6 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
} }
} }
// maxMsgSize returns the maximum allowable size of a
// message on the blockchain reactor.
func (bcR *BlockchainReactor) maxMsgSize() int {
bcR.mtx.Lock()
defer bcR.mtx.Unlock()
return bcR.params.BlockSize.MaxBytes + 2
}
// updateConsensusParams updates the internal consensus params
func (bcR *BlockchainReactor) updateConsensusParams(params types.ConsensusParams) {
bcR.mtx.Lock()
defer bcR.mtx.Unlock()
bcR.params = params
}
// Handle messages from the poolReactor telling the reactor what to do. // Handle messages from the poolReactor telling the reactor what to do.
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down! // NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
// (Except for the SYNC_LOOP, which is the primary purpose and must be synchronous.) // (Except for the SYNC_LOOP, which is the primary purpose and must be synchronous.)
@ -247,8 +231,8 @@ FOR_LOOP:
if peer == nil { if peer == nil {
continue FOR_LOOP // Peer has since been disconnected. continue FOR_LOOP // Peer has since been disconnected.
} }
msg := &bcBlockRequestMessage{request.Height} msgBytes := cdc.MustMarshalBinaryBare(&bcBlockRequestMessage{request.Height})
queued := peer.TrySend(BlockchainChannel, struct{ BlockchainMessage }{msg}) queued := peer.TrySend(BlockchainChannel, msgBytes)
if !queued { if !queued {
// We couldn't make the request, send-queue full. // We couldn't make the request, send-queue full.
// The pool handles timeouts, just let it go. // The pool handles timeouts, just let it go.
@ -321,9 +305,6 @@ FOR_LOOP:
} }
blocksSynced++ blocksSynced++
// update the consensus params
bcR.updateConsensusParams(state.ConsensusParams)
if blocksSynced%100 == 0 { if blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds()) lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height, bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
@ -341,43 +322,36 @@ FOR_LOOP:
// BroadcastStatusRequest broadcasts `BlockStore` height. // BroadcastStatusRequest broadcasts `BlockStore` height.
func (bcR *BlockchainReactor) BroadcastStatusRequest() error { func (bcR *BlockchainReactor) BroadcastStatusRequest() error {
bcR.Switch.Broadcast(BlockchainChannel, msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{bcR.store.Height()})
struct{ BlockchainMessage }{&bcStatusRequestMessage{bcR.store.Height()}}) bcR.Switch.Broadcast(BlockchainChannel, msgBytes)
return nil return nil
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// Messages // Messages
const (
msgTypeBlockRequest = byte(0x10)
msgTypeBlockResponse = byte(0x11)
msgTypeNoBlockResponse = byte(0x12)
msgTypeStatusResponse = byte(0x20)
msgTypeStatusRequest = byte(0x21)
)
// BlockchainMessage is a generic message for this reactor. // BlockchainMessage is a generic message for this reactor.
type BlockchainMessage interface{} type BlockchainMessage interface{}
var _ = wire.RegisterInterface( func RegisterBlockchainMessages(cdc *amino.Codec) {
struct{ BlockchainMessage }{}, cdc.RegisterInterface((*BlockchainMessage)(nil), nil)
wire.ConcreteType{&bcBlockRequestMessage{}, msgTypeBlockRequest}, cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/mempool/BlockRequest", nil)
wire.ConcreteType{&bcBlockResponseMessage{}, msgTypeBlockResponse}, cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/mempool/BlockResponse", nil)
wire.ConcreteType{&bcNoBlockResponseMessage{}, msgTypeNoBlockResponse}, cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/mempool/NoBlockResponse", nil)
wire.ConcreteType{&bcStatusResponseMessage{}, msgTypeStatusResponse}, cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/mempool/StatusResponse", nil)
wire.ConcreteType{&bcStatusRequestMessage{}, msgTypeStatusRequest}, cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/mempool/StatusRequest", nil)
) }
// DecodeMessage decodes BlockchainMessage. // DecodeMessage decodes BlockchainMessage.
// TODO: ensure that bz is completely read. // TODO: ensure that bz is completely read.
func DecodeMessage(bz []byte, maxSize int) (msgType byte, msg BlockchainMessage, err error) { func DecodeMessage(bz []byte) (msg BlockchainMessage, err error) {
msgType = bz[0] if len(bz) > maxMsgSize {
n := int(0) return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
r := bytes.NewReader(bz) len(bz), maxMsgSize)
msg = wire.ReadBinary(struct{ BlockchainMessage }{}, r, maxSize, &n, &err).(struct{ BlockchainMessage }).BlockchainMessage }
if err != nil && n != len(bz) { err = cdc.UnmarshalBinaryBare(bz, &msg)
err = errors.New("DecodeMessage() had bytes left over") if err != nil {
err = cmn.ErrorWrap(err, "DecodeMessage() had bytes left over")
} }
return return
} }
@ -402,7 +376,6 @@ func (brm *bcNoBlockResponseMessage) String() string {
//------------------------------------- //-------------------------------------
// NOTE: keep up-to-date with maxBlockchainResponseSize
type bcBlockResponseMessage struct { type bcBlockResponseMessage struct {
Block *types.Block Block *types.Block
} }

View File

@ -3,8 +3,6 @@ package blockchain
import ( import (
"testing" "testing"
wire "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db" dbm "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
@ -18,8 +16,15 @@ import (
func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) { func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) {
config := cfg.ResetTestRoot("blockchain_reactor_test") config := cfg.ResetTestRoot("blockchain_reactor_test")
blockStore := NewBlockStore(dbm.NewMemDB()) // blockDB := dbm.NewDebugDB("blockDB", dbm.NewMemDB())
state, _ := sm.LoadStateFromDBOrGenesisFile(dbm.NewMemDB(), config.GenesisFile()) // stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB())
blockDB := dbm.NewMemDB()
stateDB := dbm.NewMemDB()
blockStore := NewBlockStore(blockDB)
state, err := sm.LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile())
if err != nil {
panic(cmn.ErrorWrap(err, "error constructing state from genesis file"))
}
return state, blockStore return state, blockStore
} }
@ -76,10 +81,9 @@ func TestNoBlockResponse(t *testing.T) {
// wait for our response to be received on the peer // wait for our response to be received on the peer
for _, tt := range tests { for _, tt := range tests {
reqBlockMsg := &bcBlockRequestMessage{tt.height} reqBlockMsg := &bcBlockRequestMessage{tt.height}
reqBlockBytes := wire.BinaryBytes(struct{ BlockchainMessage }{reqBlockMsg}) reqBlockBytes := cdc.MustMarshalBinaryBare(reqBlockMsg)
bcr.Receive(chID, peer, reqBlockBytes) bcr.Receive(chID, peer, reqBlockBytes)
value := peer.lastValue() msg := peer.lastBlockchainMessage()
msg := value.(struct{ BlockchainMessage }).BlockchainMessage
if tt.existent { if tt.existent {
if blockMsg, ok := msg.(*bcBlockResponseMessage); !ok { if blockMsg, ok := msg.(*bcBlockResponseMessage); !ok {
@ -173,26 +177,30 @@ func newbcrTestPeer(id p2p.ID) *bcrTestPeer {
return bcr return bcr
} }
func (tp *bcrTestPeer) lastValue() interface{} { return <-tp.ch } func (tp *bcrTestPeer) lastBlockchainMessage() interface{} { return <-tp.ch }
func (tp *bcrTestPeer) TrySend(chID byte, value interface{}) bool { func (tp *bcrTestPeer) TrySend(chID byte, msgBytes []byte) bool {
if _, ok := value.(struct{ BlockchainMessage }). var msg BlockchainMessage
BlockchainMessage.(*bcStatusResponseMessage); ok { err := cdc.UnmarshalBinaryBare(msgBytes, &msg)
if err != nil {
panic(cmn.ErrorWrap(err, "Error while trying to parse a BlockchainMessage"))
}
if _, ok := msg.(*bcStatusResponseMessage); ok {
// Discard status response messages since they skew our results // Discard status response messages since they skew our results
// We only want to deal with: // We only want to deal with:
// + bcBlockResponseMessage // + bcBlockResponseMessage
// + bcNoBlockResponseMessage // + bcNoBlockResponseMessage
} else { } else {
tp.ch <- value tp.ch <- msg
} }
return true return true
} }
func (tp *bcrTestPeer) Send(chID byte, data interface{}) bool { return tp.TrySend(chID, data) } func (tp *bcrTestPeer) Send(chID byte, msgBytes []byte) bool { return tp.TrySend(chID, msgBytes) }
func (tp *bcrTestPeer) NodeInfo() p2p.NodeInfo { return p2p.NodeInfo{} } func (tp *bcrTestPeer) NodeInfo() p2p.NodeInfo { return p2p.NodeInfo{} }
func (tp *bcrTestPeer) Status() p2p.ConnectionStatus { return p2p.ConnectionStatus{} } func (tp *bcrTestPeer) Status() p2p.ConnectionStatus { return p2p.ConnectionStatus{} }
func (tp *bcrTestPeer) ID() p2p.ID { return tp.id } func (tp *bcrTestPeer) ID() p2p.ID { return tp.id }
func (tp *bcrTestPeer) IsOutbound() bool { return false } func (tp *bcrTestPeer) IsOutbound() bool { return false }
func (tp *bcrTestPeer) IsPersistent() bool { return true } func (tp *bcrTestPeer) IsPersistent() bool { return true }
func (tp *bcrTestPeer) Get(s string) interface{} { return s } func (tp *bcrTestPeer) Get(s string) interface{} { return s }
func (tp *bcrTestPeer) Set(string, interface{}) {} func (tp *bcrTestPeer) Set(string, interface{}) {}

View File

@ -1,14 +1,9 @@
package blockchain package blockchain
import ( import (
"bytes"
"encoding/json"
"fmt" "fmt"
"io"
"sync" "sync"
wire "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db" dbm "github.com/tendermint/tmlibs/db"
@ -54,38 +49,25 @@ func (bs *BlockStore) Height() int64 {
return bs.height return bs.height
} }
// GetReader returns the value associated with the given key wrapped in an io.Reader.
// If no value is found, it returns nil.
// It's mainly for use with wire.ReadBinary.
func (bs *BlockStore) GetReader(key []byte) io.Reader {
bytez := bs.db.Get(key)
if bytez == nil {
return nil
}
return bytes.NewReader(bytez)
}
// LoadBlock returns the block with the given height. // LoadBlock returns the block with the given height.
// If no block is found for that height, it returns nil. // If no block is found for that height, it returns nil.
func (bs *BlockStore) LoadBlock(height int64) *types.Block { func (bs *BlockStore) LoadBlock(height int64) *types.Block {
var n int var blockMeta = bs.LoadBlockMeta(height)
var err error if blockMeta == nil {
r := bs.GetReader(calcBlockMetaKey(height))
if r == nil {
return nil return nil
} }
blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
if err != nil { var block = new(types.Block)
panic(fmt.Sprintf("Error reading block meta: %v", err)) buf := []byte{}
}
bytez := []byte{}
for i := 0; i < blockMeta.BlockID.PartsHeader.Total; i++ { for i := 0; i < blockMeta.BlockID.PartsHeader.Total; i++ {
part := bs.LoadBlockPart(height, i) part := bs.LoadBlockPart(height, i)
bytez = append(bytez, part.Bytes...) buf = append(buf, part.Bytes...)
} }
block := wire.ReadBinary(&types.Block{}, bytes.NewReader(bytez), 0, &n, &err).(*types.Block) err := cdc.UnmarshalBinary(buf, block)
if err != nil { if err != nil {
panic(fmt.Sprintf("Error reading block: %v", err)) // NOTE: The existence of meta should imply the existence of the
// block. So, make sure meta is only saved after blocks are saved.
panic(cmn.ErrorWrap(err, "Error reading block"))
} }
return block return block
} }
@ -94,15 +76,14 @@ func (bs *BlockStore) LoadBlock(height int64) *types.Block {
// from the block at the given height. // from the block at the given height.
// If no part is found for the given height and index, it returns nil. // If no part is found for the given height and index, it returns nil.
func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part { func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part {
var n int var part = new(types.Part)
var err error bz := bs.db.Get(calcBlockPartKey(height, index))
r := bs.GetReader(calcBlockPartKey(height, index)) if len(bz) == 0 {
if r == nil {
return nil return nil
} }
part := wire.ReadBinary(&types.Part{}, r, 0, &n, &err).(*types.Part) err := cdc.UnmarshalBinaryBare(bz, part)
if err != nil { if err != nil {
panic(fmt.Sprintf("Error reading block part: %v", err)) panic(cmn.ErrorWrap(err, "Error reading block part"))
} }
return part return part
} }
@ -110,15 +91,14 @@ func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part {
// LoadBlockMeta returns the BlockMeta for the given height. // LoadBlockMeta returns the BlockMeta for the given height.
// If no block is found for the given height, it returns nil. // If no block is found for the given height, it returns nil.
func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
var n int var blockMeta = new(types.BlockMeta)
var err error bz := bs.db.Get(calcBlockMetaKey(height))
r := bs.GetReader(calcBlockMetaKey(height)) if len(bz) == 0 {
if r == nil {
return nil return nil
} }
blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta) err := cdc.UnmarshalBinaryBare(bz, blockMeta)
if err != nil { if err != nil {
panic(fmt.Sprintf("Error reading block meta: %v", err)) panic(cmn.ErrorWrap(err, "Error reading block meta"))
} }
return blockMeta return blockMeta
} }
@ -128,15 +108,14 @@ func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
// and it comes from the block.LastCommit for `height+1`. // and it comes from the block.LastCommit for `height+1`.
// If no commit is found for the given height, it returns nil. // If no commit is found for the given height, it returns nil.
func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit {
var n int var commit = new(types.Commit)
var err error bz := bs.db.Get(calcBlockCommitKey(height))
r := bs.GetReader(calcBlockCommitKey(height)) if len(bz) == 0 {
if r == nil {
return nil return nil
} }
commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit) err := cdc.UnmarshalBinaryBare(bz, commit)
if err != nil { if err != nil {
panic(fmt.Sprintf("Error reading commit: %v", err)) panic(cmn.ErrorWrap(err, "Error reading block commit"))
} }
return commit return commit
} }
@ -145,15 +124,14 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit {
// This is useful when we've seen a commit, but there has not yet been // This is useful when we've seen a commit, but there has not yet been
// a new block at `height + 1` that includes this commit in its block.LastCommit. // a new block at `height + 1` that includes this commit in its block.LastCommit.
func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit { func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit {
var n int var commit = new(types.Commit)
var err error bz := bs.db.Get(calcSeenCommitKey(height))
r := bs.GetReader(calcSeenCommitKey(height)) if len(bz) == 0 {
if r == nil {
return nil return nil
} }
commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit) err := cdc.UnmarshalBinaryBare(bz, commit)
if err != nil { if err != nil {
panic(fmt.Sprintf("Error reading commit: %v", err)) panic(cmn.ErrorWrap(err, "Error reading block seen commit"))
} }
return commit return commit
} }
@ -178,21 +156,22 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
// Save block meta // Save block meta
blockMeta := types.NewBlockMeta(block, blockParts) blockMeta := types.NewBlockMeta(block, blockParts)
metaBytes := wire.BinaryBytes(blockMeta) metaBytes := cdc.MustMarshalBinaryBare(blockMeta)
bs.db.Set(calcBlockMetaKey(height), metaBytes) bs.db.Set(calcBlockMetaKey(height), metaBytes)
// Save block parts // Save block parts
for i := 0; i < blockParts.Total(); i++ { for i := 0; i < blockParts.Total(); i++ {
bs.saveBlockPart(height, i, blockParts.GetPart(i)) part := blockParts.GetPart(i)
bs.saveBlockPart(height, i, part)
} }
// Save block commit (duplicate and separate from the Block) // Save block commit (duplicate and separate from the Block)
blockCommitBytes := wire.BinaryBytes(block.LastCommit) blockCommitBytes := cdc.MustMarshalBinaryBare(block.LastCommit)
bs.db.Set(calcBlockCommitKey(height-1), blockCommitBytes) bs.db.Set(calcBlockCommitKey(height-1), blockCommitBytes)
// Save seen commit (seen +2/3 precommits for block) // Save seen commit (seen +2/3 precommits for block)
// NOTE: we can delete this at a later height // NOTE: we can delete this at a later height
seenCommitBytes := wire.BinaryBytes(seenCommit) seenCommitBytes := cdc.MustMarshalBinaryBare(seenCommit)
bs.db.Set(calcSeenCommitKey(height), seenCommitBytes) bs.db.Set(calcSeenCommitKey(height), seenCommitBytes)
// Save new BlockStoreStateJSON descriptor // Save new BlockStoreStateJSON descriptor
@ -211,7 +190,7 @@ func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) {
if height != bs.Height()+1 { if height != bs.Height()+1 {
cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height)) cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
} }
partBytes := wire.BinaryBytes(part) partBytes := cdc.MustMarshalBinaryBare(part)
bs.db.Set(calcBlockPartKey(height, index), partBytes) bs.db.Set(calcBlockPartKey(height, index), partBytes)
} }
@ -238,12 +217,12 @@ func calcSeenCommitKey(height int64) []byte {
var blockStoreKey = []byte("blockStore") var blockStoreKey = []byte("blockStore")
type BlockStoreStateJSON struct { type BlockStoreStateJSON struct {
Height int64 Height int64 `json:"height"`
} }
// Save persists the blockStore state to the database as JSON. // Save persists the blockStore state to the database as JSON.
func (bsj BlockStoreStateJSON) Save(db dbm.DB) { func (bsj BlockStoreStateJSON) Save(db dbm.DB) {
bytes, err := json.Marshal(bsj) bytes, err := cdc.MarshalJSON(bsj)
if err != nil { if err != nil {
cmn.PanicSanity(cmn.Fmt("Could not marshal state bytes: %v", err)) cmn.PanicSanity(cmn.Fmt("Could not marshal state bytes: %v", err))
} }
@ -260,7 +239,7 @@ func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON {
} }
} }
bsj := BlockStoreStateJSON{} bsj := BlockStoreStateJSON{}
err := json.Unmarshal(bytes, &bsj) err := cdc.UnmarshalJSON(bytes, &bsj)
if err != nil { if err != nil {
panic(fmt.Sprintf("Could not unmarshal bytes: %X", bytes)) panic(fmt.Sprintf("Could not unmarshal bytes: %X", bytes))
} }

View File

@ -3,7 +3,6 @@ package blockchain
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"io/ioutil"
"runtime/debug" "runtime/debug"
"strings" "strings"
"testing" "testing"
@ -11,9 +10,6 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
wire "github.com/tendermint/go-wire"
"github.com/tendermint/tmlibs/db" "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
@ -35,7 +31,7 @@ func TestNewBlockStore(t *testing.T) {
db := db.NewMemDB() db := db.NewMemDB()
db.Set(blockStoreKey, []byte(`{"height": 10000}`)) db.Set(blockStoreKey, []byte(`{"height": 10000}`))
bs := NewBlockStore(db) bs := NewBlockStore(db)
assert.Equal(t, bs.Height(), int64(10000), "failed to properly parse blockstore") require.Equal(t, int64(10000), bs.Height(), "failed to properly parse blockstore")
panicCausers := []struct { panicCausers := []struct {
data []byte data []byte
@ -61,38 +57,6 @@ func TestNewBlockStore(t *testing.T) {
assert.Equal(t, bs.Height(), int64(0), "expecting nil bytes to be unmarshaled alright") assert.Equal(t, bs.Height(), int64(0), "expecting nil bytes to be unmarshaled alright")
} }
func TestBlockStoreGetReader(t *testing.T) {
db := db.NewMemDB()
// Initial setup
db.Set([]byte("Foo"), []byte("Bar"))
db.Set([]byte("Foo1"), nil)
bs := NewBlockStore(db)
tests := [...]struct {
key []byte
want []byte
}{
0: {key: []byte("Foo"), want: []byte("Bar")},
1: {key: []byte("KnoxNonExistent"), want: nil},
2: {key: []byte("Foo1"), want: []byte{}},
}
for i, tt := range tests {
r := bs.GetReader(tt.key)
if r == nil {
assert.Nil(t, tt.want, "#%d: expected a non-nil reader", i)
continue
}
slurp, err := ioutil.ReadAll(r)
if err != nil {
t.Errorf("#%d: unexpected Read err: %v", i, err)
} else {
assert.Equal(t, slurp, tt.want, "#%d: mismatch", i)
}
}
}
func freshBlockStore() (*BlockStore, db.DB) { func freshBlockStore() (*BlockStore, db.DB) {
db := db.NewMemDB() db := db.NewMemDB()
return NewBlockStore(db), db return NewBlockStore(db), db
@ -189,14 +153,14 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
parts: validPartSet, parts: validPartSet,
seenCommit: seenCommit1, seenCommit: seenCommit1,
corruptCommitInDB: true, // Corrupt the DB's commit entry corruptCommitInDB: true, // Corrupt the DB's commit entry
wantPanic: "rror reading commit", wantPanic: "Error reading block commit",
}, },
{ {
block: newBlock(&header1, commitAtH10), block: newBlock(&header1, commitAtH10),
parts: validPartSet, parts: validPartSet,
seenCommit: seenCommit1, seenCommit: seenCommit1,
wantPanic: "rror reading block", wantPanic: "Error reading block",
corruptBlockInDB: true, // Corrupt the DB's block entry corruptBlockInDB: true, // Corrupt the DB's block entry
}, },
@ -215,7 +179,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
seenCommit: seenCommit1, seenCommit: seenCommit1,
corruptSeenCommitInDB: true, corruptSeenCommitInDB: true,
wantPanic: "rror reading commit", wantPanic: "Error reading block seen commit",
}, },
{ {
@ -305,14 +269,6 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
} }
} }
func binarySerializeIt(v interface{}) []byte {
var n int
var err error
buf := new(bytes.Buffer)
wire.WriteBinary(v, buf, &n, &err)
return buf.Bytes()
}
func TestLoadBlockPart(t *testing.T) { func TestLoadBlockPart(t *testing.T) {
bs, db := freshBlockStore() bs, db := freshBlockStore()
height, index := int64(10), 1 height, index := int64(10), 1
@ -334,7 +290,7 @@ func TestLoadBlockPart(t *testing.T) {
require.Contains(t, panicErr.Error(), "Error reading block part") require.Contains(t, panicErr.Error(), "Error reading block part")
// 3. A good block serialized and saved to the DB should be retrievable // 3. A good block serialized and saved to the DB should be retrievable
db.Set(calcBlockPartKey(height, index), binarySerializeIt(part1)) db.Set(calcBlockPartKey(height, index), cdc.MustMarshalBinaryBare(part1))
gotPart, _, panicErr := doFn(loadPart) gotPart, _, panicErr := doFn(loadPart)
require.Nil(t, panicErr, "an existent and proper block should not panic") require.Nil(t, panicErr, "an existent and proper block should not panic")
require.Nil(t, res, "a properly saved block should return a proper block") require.Nil(t, res, "a properly saved block should return a proper block")
@ -364,11 +320,11 @@ func TestLoadBlockMeta(t *testing.T) {
// 3. A good blockMeta serialized and saved to the DB should be retrievable // 3. A good blockMeta serialized and saved to the DB should be retrievable
meta := &types.BlockMeta{} meta := &types.BlockMeta{}
db.Set(calcBlockMetaKey(height), binarySerializeIt(meta)) db.Set(calcBlockMetaKey(height), cdc.MustMarshalBinaryBare(meta))
gotMeta, _, panicErr := doFn(loadMeta) gotMeta, _, panicErr := doFn(loadMeta)
require.Nil(t, panicErr, "an existent and proper block should not panic") require.Nil(t, panicErr, "an existent and proper block should not panic")
require.Nil(t, res, "a properly saved blockMeta should return a proper blocMeta ") require.Nil(t, res, "a properly saved blockMeta should return a proper blocMeta ")
require.Equal(t, binarySerializeIt(meta), binarySerializeIt(gotMeta), require.Equal(t, cdc.MustMarshalBinaryBare(meta), cdc.MustMarshalBinaryBare(gotMeta),
"expecting successful retrieval of previously saved blockMeta") "expecting successful retrieval of previously saved blockMeta")
} }
@ -385,6 +341,9 @@ func TestBlockFetchAtHeight(t *testing.T) {
require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed") require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed")
blockAtHeight := bs.LoadBlock(bs.Height()) blockAtHeight := bs.LoadBlock(bs.Height())
bz1 := cdc.MustMarshalBinaryBare(block)
bz2 := cdc.MustMarshalBinaryBare(blockAtHeight)
require.Equal(t, bz1, bz2)
require.Equal(t, block.Hash(), blockAtHeight.Hash(), require.Equal(t, block.Hash(), blockAtHeight.Hash(),
"expecting a successful load of the last saved block") "expecting a successful load of the last saved block")

13
blockchain/wire.go Normal file
View File

@ -0,0 +1,13 @@
package blockchain
import (
"github.com/tendermint/go-amino"
"github.com/tendermint/go-crypto"
)
var cdc = amino.NewCodec()
func init() {
RegisterBlockchainMessages(cdc)
crypto.RegisterAmino(cdc)
}

View File

@ -30,7 +30,7 @@ func main() {
"privPath", *privValPath, "privPath", *privValPath,
) )
privVal := priv_val.LoadPrivValidatorJSON(*privValPath) privVal := priv_val.LoadFilePV(*privValPath)
rs := priv_val.NewRemoteSigner( rs := priv_val.NewRemoteSigner(
logger, logger,

View File

@ -0,0 +1,32 @@
package commands
import (
"fmt"
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/p2p"
cmn "github.com/tendermint/tmlibs/common"
)
// GenNodeKeyCmd allows the generation of a node key. It prints node's ID to
// the standard output.
var GenNodeKeyCmd = &cobra.Command{
Use: "gen_node_key",
Short: "Generate a node key for this node and print its ID",
RunE: genNodeKey,
}
func genNodeKey(cmd *cobra.Command, args []string) error {
nodeKeyFile := config.NodeKeyFile()
if cmn.FileExists(nodeKeyFile) {
return fmt.Errorf("node key at %s already exists", nodeKeyFile)
}
nodeKey, err := p2p.LoadOrGenNodeKey(nodeKeyFile)
if err != nil {
return err
}
fmt.Println(nodeKey.ID())
return nil
}

View File

@ -1,12 +1,11 @@
package commands package commands
import ( import (
"encoding/json"
"fmt" "fmt"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/tendermint/tendermint/types" pvm "github.com/tendermint/tendermint/types/priv_validator"
) )
// GenValidatorCmd allows the generation of a keypair for a // GenValidatorCmd allows the generation of a keypair for a
@ -18,11 +17,11 @@ var GenValidatorCmd = &cobra.Command{
} }
func genValidator(cmd *cobra.Command, args []string) { func genValidator(cmd *cobra.Command, args []string) {
privValidator := types.GenPrivValidatorFS("") pv := pvm.GenFilePV("")
privValidatorJSONBytes, err := json.MarshalIndent(privValidator, "", "\t") jsbz, err := cdc.MarshalJSON(pv)
if err != nil { if err != nil {
panic(err) panic(err)
} }
fmt.Printf(`%v fmt.Printf(`%v
`, string(privValidatorJSONBytes)) `, string(jsbz))
} }

View File

@ -3,7 +3,10 @@ package commands
import ( import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
pvm "github.com/tendermint/tendermint/types/priv_validator"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
) )
@ -11,22 +14,36 @@ import (
var InitFilesCmd = &cobra.Command{ var InitFilesCmd = &cobra.Command{
Use: "init", Use: "init",
Short: "Initialize Tendermint", Short: "Initialize Tendermint",
Run: initFiles, RunE: initFiles,
} }
func initFiles(cmd *cobra.Command, args []string) { func initFiles(cmd *cobra.Command, args []string) error {
return initFilesWithConfig(config)
}
func initFilesWithConfig(config *cfg.Config) error {
// private validator // private validator
privValFile := config.PrivValidatorFile() privValFile := config.PrivValidatorFile()
var privValidator *types.PrivValidatorFS var pv *pvm.FilePV
if cmn.FileExists(privValFile) { if cmn.FileExists(privValFile) {
privValidator = types.LoadPrivValidatorFS(privValFile) pv = pvm.LoadFilePV(privValFile)
logger.Info("Found private validator", "path", privValFile) logger.Info("Found private validator", "path", privValFile)
} else { } else {
privValidator = types.GenPrivValidatorFS(privValFile) pv = pvm.GenFilePV(privValFile)
privValidator.Save() pv.Save()
logger.Info("Generated private validator", "path", privValFile) logger.Info("Generated private validator", "path", privValFile)
} }
nodeKeyFile := config.NodeKeyFile()
if cmn.FileExists(nodeKeyFile) {
logger.Info("Found node key", "path", nodeKeyFile)
} else {
if _, err := p2p.LoadOrGenNodeKey(nodeKeyFile); err != nil {
return err
}
logger.Info("Generated node key", "path", nodeKeyFile)
}
// genesis file // genesis file
genFile := config.GenesisFile() genFile := config.GenesisFile()
if cmn.FileExists(genFile) { if cmn.FileExists(genFile) {
@ -36,13 +53,15 @@ func initFiles(cmd *cobra.Command, args []string) {
ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)), ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)),
} }
genDoc.Validators = []types.GenesisValidator{{ genDoc.Validators = []types.GenesisValidator{{
PubKey: privValidator.GetPubKey(), PubKey: pv.GetPubKey(),
Power: 10, Power: 10,
}} }}
if err := genDoc.SaveAs(genFile); err != nil { if err := genDoc.SaveAs(genFile); err != nil {
panic(err) return err
} }
logger.Info("Generated genesis file", "path", genFile) logger.Info("Generated genesis file", "path", genFile)
} }
return nil
} }

View File

@ -1,7 +1,6 @@
package commands package commands
import ( import (
"encoding/json"
"fmt" "fmt"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -22,7 +21,7 @@ func probeUpnp(cmd *cobra.Command, args []string) error {
fmt.Println("Probe failed: ", err) fmt.Println("Probe failed: ", err)
} else { } else {
fmt.Println("Probe success!") fmt.Println("Probe success!")
jsonBytes, err := json.Marshal(capabilities) jsonBytes, err := cdc.MarshalJSON(capabilities)
if err != nil { if err != nil {
return err return err
} }

View File

@ -5,7 +5,7 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/tendermint/tendermint/types" pvm "github.com/tendermint/tendermint/types/priv_validator"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
) )
@ -27,7 +27,7 @@ var ResetPrivValidatorCmd = &cobra.Command{
// ResetAll removes the privValidator files. // ResetAll removes the privValidator files.
// Exported so other CLI tools can use it. // Exported so other CLI tools can use it.
func ResetAll(dbDir, privValFile string, logger log.Logger) { func ResetAll(dbDir, privValFile string, logger log.Logger) {
resetPrivValidatorFS(privValFile, logger) resetFilePV(privValFile, logger)
if err := os.RemoveAll(dbDir); err != nil { if err := os.RemoveAll(dbDir); err != nil {
logger.Error("Error removing directory", "err", err) logger.Error("Error removing directory", "err", err)
return return
@ -44,18 +44,18 @@ func resetAll(cmd *cobra.Command, args []string) {
// XXX: this is totally unsafe. // XXX: this is totally unsafe.
// it's only suitable for testnets. // it's only suitable for testnets.
func resetPrivValidator(cmd *cobra.Command, args []string) { func resetPrivValidator(cmd *cobra.Command, args []string) {
resetPrivValidatorFS(config.PrivValidatorFile(), logger) resetFilePV(config.PrivValidatorFile(), logger)
} }
func resetPrivValidatorFS(privValFile string, logger log.Logger) { func resetFilePV(privValFile string, logger log.Logger) {
// Get PrivValidator // Get PrivValidator
if _, err := os.Stat(privValFile); err == nil { if _, err := os.Stat(privValFile); err == nil {
privValidator := types.LoadPrivValidatorFS(privValFile) pv := pvm.LoadFilePV(privValFile)
privValidator.Reset() pv.Reset()
logger.Info("Reset PrivValidator", "file", privValFile) logger.Info("Reset PrivValidator", "file", privValFile)
} else { } else {
privValidator := types.GenPrivValidatorFS(privValFile) pv := pvm.GenFilePV(privValFile)
privValidator.Save() pv.Save()
logger.Info("Generated PrivValidator", "file", privValFile) logger.Info("Generated PrivValidator", "file", privValFile)
} }
} }

View File

@ -6,6 +6,7 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p"
) )
// ShowNodeIDCmd dumps node's ID to the standard output. // ShowNodeIDCmd dumps node's ID to the standard output.
@ -16,10 +17,12 @@ var ShowNodeIDCmd = &cobra.Command{
} }
func showNodeID(cmd *cobra.Command, args []string) error { func showNodeID(cmd *cobra.Command, args []string) error {
nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
if err != nil { if err != nil {
return err return err
} }
fmt.Println(nodeKey.ID()) fmt.Println(nodeKey.ID())
return nil return nil
} }

View File

@ -2,11 +2,9 @@ package commands
import ( import (
"fmt" "fmt"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/tendermint/go-wire/data" privval "github.com/tendermint/tendermint/types/priv_validator"
"github.com/tendermint/tendermint/types"
) )
// ShowValidatorCmd adds capabilities for showing the validator info. // ShowValidatorCmd adds capabilities for showing the validator info.
@ -17,7 +15,7 @@ var ShowValidatorCmd = &cobra.Command{
} }
func showValidator(cmd *cobra.Command, args []string) { func showValidator(cmd *cobra.Command, args []string) {
privValidator := types.LoadOrGenPrivValidatorFS(config.PrivValidatorFile()) privValidator := privval.LoadOrGenFilePV(config.PrivValidatorFile())
pubKeyJSONBytes, _ := data.ToJSON(privValidator.PubKey) pubKeyJSONBytes, _ := cdc.MarshalJSON(privValidator.GetPubKey())
fmt.Println(string(pubKeyJSONBytes)) fmt.Println(string(pubKeyJSONBytes))
} }

View File

@ -2,59 +2,103 @@ package commands
import ( import (
"fmt" "fmt"
"net"
"os"
"path/filepath" "path/filepath"
"strings"
"time" "time"
"github.com/spf13/cobra" "github.com/spf13/cobra"
cfg "github.com/tendermint/tendermint/config" cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
pvm "github.com/tendermint/tendermint/types/priv_validator"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
) )
//flags
var ( var (
nValidators int nValidators int
dataDir string nNonValidators int
outputDir string
nodeDirPrefix string
populatePersistentPeers bool
hostnamePrefix string
startingIPAddress string
p2pPort int
)
const (
nodeDirPerm = 0755
) )
func init() { func init() {
TestnetFilesCmd.Flags().IntVar(&nValidators, "n", 4, TestnetFilesCmd.Flags().IntVar(&nValidators, "v", 4,
"Number of validators to initialize the testnet with") "Number of validators to initialize the testnet with")
TestnetFilesCmd.Flags().StringVar(&dataDir, "dir", "mytestnet", TestnetFilesCmd.Flags().IntVar(&nNonValidators, "n", 0,
"Number of non-validators to initialize the testnet with")
TestnetFilesCmd.Flags().StringVar(&outputDir, "o", "./mytestnet",
"Directory to store initialization data for the testnet") "Directory to store initialization data for the testnet")
TestnetFilesCmd.Flags().StringVar(&nodeDirPrefix, "node-dir-prefix", "node",
"Prefix the directory name for each node with (node results in node0, node1, ...)")
TestnetFilesCmd.Flags().BoolVar(&populatePersistentPeers, "populate-persistent-peers", true,
"Update config of each node with the list of persistent peers build using either hostname-prefix or starting-ip-address")
TestnetFilesCmd.Flags().StringVar(&hostnamePrefix, "hostname-prefix", "node",
"Hostname prefix (node results in persistent peers list ID0@node0:46656, ID1@node1:46656, ...)")
TestnetFilesCmd.Flags().StringVar(&startingIPAddress, "starting-ip-address", "",
"Starting IP address (192.168.0.1 results in persistent peers list ID0@192.168.0.1:46656, ID1@192.168.0.2:46656, ...)")
TestnetFilesCmd.Flags().IntVar(&p2pPort, "p2p-port", 46656,
"P2P Port")
} }
// TestnetFilesCmd allows initialisation of files for a // TestnetFilesCmd allows initialisation of files for a Tendermint testnet.
// Tendermint testnet.
var TestnetFilesCmd = &cobra.Command{ var TestnetFilesCmd = &cobra.Command{
Use: "testnet", Use: "testnet",
Short: "Initialize files for a Tendermint testnet", Short: "Initialize files for a Tendermint testnet",
Run: testnetFiles, RunE: testnetFiles,
} }
func testnetFiles(cmd *cobra.Command, args []string) { func testnetFiles(cmd *cobra.Command, args []string) error {
config := cfg.DefaultConfig()
genVals := make([]types.GenesisValidator, nValidators) genVals := make([]types.GenesisValidator, nValidators)
defaultConfig := cfg.DefaultBaseConfig()
// Initialize core dir and priv_validator.json's
for i := 0; i < nValidators; i++ { for i := 0; i < nValidators; i++ {
mach := cmn.Fmt("mach%d", i) nodeDirName := cmn.Fmt("%s%d", nodeDirPrefix, i)
err := initMachCoreDirectory(dataDir, mach) nodeDir := filepath.Join(outputDir, nodeDirName)
config.SetRoot(nodeDir)
err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm)
if err != nil { if err != nil {
cmn.Exit(err.Error()) _ = os.RemoveAll(outputDir)
return err
} }
// Read priv_validator.json to populate vals
privValFile := filepath.Join(dataDir, mach, defaultConfig.PrivValidator) initFilesWithConfig(config)
privVal := types.LoadPrivValidatorFS(privValFile)
pvFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidator)
pv := pvm.LoadFilePV(pvFile)
genVals[i] = types.GenesisValidator{ genVals[i] = types.GenesisValidator{
PubKey: privVal.GetPubKey(), PubKey: pv.GetPubKey(),
Power: 1, Power: 1,
Name: mach, Name: nodeDirName,
} }
} }
for i := 0; i < nNonValidators; i++ {
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i+nValidators))
config.SetRoot(nodeDir)
err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm)
if err != nil {
_ = os.RemoveAll(outputDir)
return err
}
initFilesWithConfig(config)
}
// Generate genesis doc from generated validators // Generate genesis doc from generated validators
genDoc := &types.GenesisDoc{ genDoc := &types.GenesisDoc{
GenesisTime: time.Now(), GenesisTime: time.Now(),
@ -63,36 +107,65 @@ func testnetFiles(cmd *cobra.Command, args []string) {
} }
// Write genesis file. // Write genesis file.
for i := 0; i < nValidators; i++ { for i := 0; i < nValidators+nNonValidators; i++ {
mach := cmn.Fmt("mach%d", i) nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i))
if err := genDoc.SaveAs(filepath.Join(dataDir, mach, defaultConfig.Genesis)); err != nil { if err := genDoc.SaveAs(filepath.Join(nodeDir, config.BaseConfig.Genesis)); err != nil {
panic(err) _ = os.RemoveAll(outputDir)
return err
} }
} }
fmt.Println(cmn.Fmt("Successfully initialized %v node directories", nValidators)) if populatePersistentPeers {
} err := populatePersistentPeersInConfigAndWriteIt(config)
if err != nil {
// Initialize per-machine core directory _ = os.RemoveAll(outputDir)
func initMachCoreDirectory(base, mach string) error { return err
// Create priv_validator.json file if not present }
defaultConfig := cfg.DefaultBaseConfig()
dir := filepath.Join(base, mach)
privValPath := filepath.Join(dir, defaultConfig.PrivValidator)
dir = filepath.Dir(privValPath)
err := cmn.EnsureDir(dir, 0700)
if err != nil {
return err
} }
ensurePrivValidator(privValPath)
fmt.Printf("Successfully initialized %v node directories\n", nValidators+nNonValidators)
return nil return nil
} }
func ensurePrivValidator(file string) { func hostnameOrIP(i int) string {
if cmn.FileExists(file) { if startingIPAddress != "" {
return ip := net.ParseIP(startingIPAddress)
ip = ip.To4()
if ip == nil {
fmt.Printf("%v: non ipv4 address\n", startingIPAddress)
os.Exit(1)
}
for j := 0; j < i; j++ {
ip[3]++
}
return ip.String()
} }
privValidator := types.GenPrivValidatorFS(file)
privValidator.Save() return fmt.Sprintf("%s%d", hostnamePrefix, i)
}
func populatePersistentPeersInConfigAndWriteIt(config *cfg.Config) error {
persistentPeers := make([]string, nValidators+nNonValidators)
for i := 0; i < nValidators+nNonValidators; i++ {
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i))
config.SetRoot(nodeDir)
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
if err != nil {
return err
}
persistentPeers[i] = p2p.IDAddressString(nodeKey.ID(), fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort))
}
persistentPeersList := strings.Join(persistentPeers, ",")
for i := 0; i < nValidators+nNonValidators; i++ {
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i))
config.SetRoot(nodeDir)
config.P2P.PersistentPeers = persistentPeersList
// overwrite default config
cfg.WriteConfigFile(filepath.Join(nodeDir, "config", "config.toml"), config)
}
return nil
} }

View File

@ -0,0 +1,12 @@
package commands
import (
"github.com/tendermint/go-amino"
"github.com/tendermint/go-crypto"
)
var cdc = amino.NewCodec()
func init() {
crypto.RegisterAmino(cdc)
}

View File

@ -25,6 +25,7 @@ func main() {
cmd.ShowValidatorCmd, cmd.ShowValidatorCmd,
cmd.TestnetFilesCmd, cmd.TestnetFilesCmd,
cmd.ShowNodeIDCmd, cmd.ShowNodeIDCmd,
cmd.GenNodeKeyCmd,
cmd.VersionCmd) cmd.VersionCmd)
// NOTE: // NOTE:

View File

@ -270,7 +270,7 @@ type P2PConfig struct {
FlushThrottleTimeout int `mapstructure:"flush_throttle_timeout"` FlushThrottleTimeout int `mapstructure:"flush_throttle_timeout"`
// Maximum size of a message packet payload, in bytes // Maximum size of a message packet payload, in bytes
MaxMsgPacketPayloadSize int `mapstructure:"max_msg_packet_payload_size"` MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"`
// Rate at which packets can be sent, in bytes/second // Rate at which packets can be sent, in bytes/second
SendRate int64 `mapstructure:"send_rate"` SendRate int64 `mapstructure:"send_rate"`
@ -302,7 +302,7 @@ func DefaultP2PConfig() *P2PConfig {
AddrBookStrict: true, AddrBookStrict: true,
MaxNumPeers: 50, MaxNumPeers: 50,
FlushThrottleTimeout: 100, FlushThrottleTimeout: 100,
MaxMsgPacketPayloadSize: 1024, // 1 kB MaxPacketMsgPayloadSize: 1024, // 1 kB
SendRate: 512000, // 500 kB/s SendRate: 512000, // 500 kB/s
RecvRate: 512000, // 500 kB/s RecvRate: 512000, // 500 kB/s
PexReactor: true, PexReactor: true,

View File

@ -37,16 +37,21 @@ func EnsureRoot(rootDir string) {
// Write default config file if missing. // Write default config file if missing.
if !cmn.FileExists(configFilePath) { if !cmn.FileExists(configFilePath) {
writeConfigFile(configFilePath) writeDefaultCondigFile(configFilePath)
} }
} }
// XXX: this func should probably be called by cmd/tendermint/commands/init.go // XXX: this func should probably be called by cmd/tendermint/commands/init.go
// alongside the writing of the genesis.json and priv_validator.json // alongside the writing of the genesis.json and priv_validator.json
func writeConfigFile(configFilePath string) { func writeDefaultCondigFile(configFilePath string) {
WriteConfigFile(configFilePath, DefaultConfig())
}
// WriteConfigFile renders config using the template and writes it to configFilePath.
func WriteConfigFile(configFilePath string, config *Config) {
var buffer bytes.Buffer var buffer bytes.Buffer
if err := configTemplate.Execute(&buffer, DefaultConfig()); err != nil { if err := configTemplate.Execute(&buffer, config); err != nil {
panic(err) panic(err)
} }
@ -124,11 +129,11 @@ unsafe = {{ .RPC.Unsafe }}
laddr = "{{ .P2P.ListenAddress }}" laddr = "{{ .P2P.ListenAddress }}"
# Comma separated list of seed nodes to connect to # Comma separated list of seed nodes to connect to
seeds = "" seeds = "{{ .P2P.Seeds }}"
# Comma separated list of nodes to keep persistent connections to # Comma separated list of nodes to keep persistent connections to
# Do not add private peers to this list if you don't want them advertised # Do not add private peers to this list if you don't want them advertised
persistent_peers = "" persistent_peers = "{{ .P2P.PersistentPeers }}"
# Path to address book # Path to address book
addr_book_file = "{{ .P2P.AddrBook }}" addr_book_file = "{{ .P2P.AddrBook }}"
@ -143,7 +148,7 @@ flush_throttle_timeout = {{ .P2P.FlushThrottleTimeout }}
max_num_peers = {{ .P2P.MaxNumPeers }} max_num_peers = {{ .P2P.MaxNumPeers }}
# Maximum size of a message packet payload, in bytes # Maximum size of a message packet payload, in bytes
max_msg_packet_payload_size = {{ .P2P.MaxMsgPacketPayloadSize }} max_packet_msg_payload_size = {{ .P2P.MaxPacketMsgPayloadSize }}
# Rate at which packets can be sent, in bytes/second # Rate at which packets can be sent, in bytes/second
send_rate = {{ .P2P.SendRate }} send_rate = {{ .P2P.SendRate }}
@ -262,7 +267,7 @@ func ResetTestRoot(testName string) *Config {
// Write default config file if missing. // Write default config file if missing.
if !cmn.FileExists(configFilePath) { if !cmn.FileExists(configFilePath) {
writeConfigFile(configFilePath) writeDefaultCondigFile(configFilePath)
} }
if !cmn.FileExists(genesisFilePath) { if !cmn.FileExists(genesisFilePath) {
cmn.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644) cmn.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644)
@ -280,8 +285,8 @@ var testGenesis = `{
"validators": [ "validators": [
{ {
"pub_key": { "pub_key": {
"type": "ed25519", "type": "AC26791624DE60",
"data":"3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8" "value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="
}, },
"power": 10, "power": 10,
"name": "" "name": ""
@ -291,14 +296,14 @@ var testGenesis = `{
}` }`
var testPrivValidator = `{ var testPrivValidator = `{
"address": "D028C9981F7A87F3093672BF0D5B0E2A1B3ED456", "address": "849CB2C877F87A20925F35D00AE6688342D25B47",
"pub_key": { "pub_key": {
"type": "ed25519", "type": "AC26791624DE60",
"data": "3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8" "value": "AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="
}, },
"priv_key": { "priv_key": {
"type": "ed25519", "type": "954568A3288910",
"data": "27F82582AEFAE7AB151CFB01C48BB6C1A0DA78F9BDDA979A9F70A84D074EB07D3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8" "value": "EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ=="
}, },
"last_height": 0, "last_height": 0,
"last_round": 0, "last_round": 0,

View File

@ -7,7 +7,6 @@ import (
"time" "time"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
crypto "github.com/tendermint/go-crypto"
"github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
@ -48,7 +47,9 @@ func TestByzantine(t *testing.T) {
for i := 0; i < N; i++ { for i := 0; i < N; i++ {
// make first val byzantine // make first val byzantine
if i == 0 { if i == 0 {
css[i].privValidator = NewByzantinePrivValidator(css[i].privValidator) // NOTE: Now, test validators are MockPV, which by default doesn't
// do any safety checks.
css[i].privValidator.(*types.MockPV).DisableChecks()
css[i].decideProposal = func(j int) func(int64, int) { css[i].decideProposal = func(j int) func(int64, int) {
return func(height int64, round int) { return func(height int64, round int) {
byzantineDecideProposalFunc(t, height, round, css[j], switches[j]) byzantineDecideProposalFunc(t, height, round, css[j], switches[j])
@ -203,7 +204,7 @@ func byzantineDecideProposalFunc(t *testing.T, height int64, round int, cs *Cons
func sendProposalAndParts(height int64, round int, cs *ConsensusState, peer p2p.Peer, proposal *types.Proposal, blockHash []byte, parts *types.PartSet) { func sendProposalAndParts(height int64, round int, cs *ConsensusState, peer p2p.Peer, proposal *types.Proposal, blockHash []byte, parts *types.PartSet) {
// proposal // proposal
msg := &ProposalMessage{Proposal: proposal} msg := &ProposalMessage{Proposal: proposal}
peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg))
// parts // parts
for i := 0; i < parts.Total(); i++ { for i := 0; i < parts.Total(); i++ {
@ -213,7 +214,7 @@ func sendProposalAndParts(height int64, round int, cs *ConsensusState, peer p2p.
Round: round, // This tells peer that this part applies to us. Round: round, // This tells peer that this part applies to us.
Part: part, Part: part,
} }
peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg))
} }
// votes // votes
@ -222,8 +223,8 @@ func sendProposalAndParts(height int64, round int, cs *ConsensusState, peer p2p.
precommit, _ := cs.signVote(types.VoteTypePrecommit, blockHash, parts.Header()) precommit, _ := cs.signVote(types.VoteTypePrecommit, blockHash, parts.Header())
cs.mtx.Unlock() cs.mtx.Unlock()
peer.Send(VoteChannel, struct{ ConsensusMessage }{&VoteMessage{prevote}}) peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(&VoteMessage{prevote}))
peer.Send(VoteChannel, struct{ ConsensusMessage }{&VoteMessage{precommit}}) peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(&VoteMessage{precommit}))
} }
//---------------------------------------- //----------------------------------------
@ -264,47 +265,3 @@ func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) { func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
br.reactor.Receive(chID, peer, msgBytes) br.reactor.Receive(chID, peer, msgBytes)
} }
//----------------------------------------
// byzantine privValidator
type ByzantinePrivValidator struct {
types.Signer
pv types.PrivValidator
}
// Return a priv validator that will sign anything
func NewByzantinePrivValidator(pv types.PrivValidator) *ByzantinePrivValidator {
return &ByzantinePrivValidator{
Signer: pv.(*types.PrivValidatorFS).Signer,
pv: pv,
}
}
func (privVal *ByzantinePrivValidator) GetAddress() types.Address {
return privVal.pv.GetAddress()
}
func (privVal *ByzantinePrivValidator) GetPubKey() crypto.PubKey {
return privVal.pv.GetPubKey()
}
func (privVal *ByzantinePrivValidator) SignVote(chainID string, vote *types.Vote) (err error) {
vote.Signature, err = privVal.Sign(vote.SignBytes(chainID))
return err
}
func (privVal *ByzantinePrivValidator) SignProposal(chainID string, proposal *types.Proposal) (err error) {
proposal.Signature, _ = privVal.Sign(proposal.SignBytes(chainID))
return nil
}
func (privVal *ByzantinePrivValidator) SignHeartbeat(chainID string, heartbeat *types.Heartbeat) (err error) {
heartbeat.Signature, _ = privVal.Sign(heartbeat.SignBytes(chainID))
return nil
}
func (privVal *ByzantinePrivValidator) String() string {
return cmn.Fmt("PrivValidator{%X}", privVal.GetAddress())
}

View File

@ -21,6 +21,7 @@ import (
"github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p"
sm "github.com/tendermint/tendermint/state" sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
pvm "github.com/tendermint/tendermint/types/priv_validator"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db" dbm "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
@ -222,7 +223,7 @@ func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
voteCh := make(chan interface{}) voteCh := make(chan interface{})
go func() { go func() {
for v := range voteCh0 { for v := range voteCh0 {
vote := v.(types.TMEventData).Unwrap().(types.EventDataVote) vote := v.(types.EventDataVote)
// we only fire for our own votes // we only fire for our own votes
if bytes.Equal(addr, vote.Vote.ValidatorAddress) { if bytes.Equal(addr, vote.Vote.ValidatorAddress) {
voteCh <- v voteCh <- v
@ -277,10 +278,10 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.S
return cs return cs
} }
func loadPrivValidator(config *cfg.Config) *types.PrivValidatorFS { func loadPrivValidator(config *cfg.Config) *pvm.FilePV {
privValidatorFile := config.PrivValidatorFile() privValidatorFile := config.PrivValidatorFile()
ensureDir(path.Dir(privValidatorFile), 0700) ensureDir(path.Dir(privValidatorFile), 0700)
privValidator := types.LoadOrGenPrivValidatorFS(privValidatorFile) privValidator := pvm.LoadOrGenFilePV(privValidatorFile)
privValidator.Reset() privValidator.Reset()
return privValidator return privValidator
} }
@ -378,7 +379,7 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
privVal = privVals[i] privVal = privVals[i]
} else { } else {
_, tempFilePath := cmn.Tempfile("priv_validator_") _, tempFilePath := cmn.Tempfile("priv_validator_")
privVal = types.GenPrivValidatorFS(tempFilePath) privVal = pvm.GenFilePV(tempFilePath)
} }
app := appFunc() app := appFunc()
@ -394,7 +395,7 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int { func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int {
for i, s := range switches { for i, s := range switches {
if bytes.Equal(peer.NodeInfo().PubKey.Address(), s.NodeInfo().PubKey.Address()) { if peer.NodeInfo().ID == s.NodeInfo().ID {
return i return i
} }
} }
@ -405,9 +406,9 @@ func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int {
//------------------------------------------------------------------------------- //-------------------------------------------------------------------------------
// genesis // genesis
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []*types.PrivValidatorFS) { func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) {
validators := make([]types.GenesisValidator, numValidators) validators := make([]types.GenesisValidator, numValidators)
privValidators := make([]*types.PrivValidatorFS, numValidators) privValidators := make([]types.PrivValidator, numValidators)
for i := 0; i < numValidators; i++ { for i := 0; i < numValidators; i++ {
val, privVal := types.RandValidator(randPower, minPower) val, privVal := types.RandValidator(randPower, minPower)
validators[i] = types.GenesisValidator{ validators[i] = types.GenesisValidator{
@ -425,7 +426,7 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
}, privValidators }, privValidators
} }
func randGenesisState(numValidators int, randPower bool, minPower int64) (sm.State, []*types.PrivValidatorFS) { func randGenesisState(numValidators int, randPower bool, minPower int64) (sm.State, []types.PrivValidator) {
genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower) genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower)
s0, _ := sm.MakeGenesisState(genDoc) s0, _ := sm.MakeGenesisState(genDoc)
db := dbm.NewMemDB() db := dbm.NewMemDB()

View File

@ -108,7 +108,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) {
ticker := time.NewTicker(time.Second * 30) ticker := time.NewTicker(time.Second * 30)
select { select {
case b := <-newBlockCh: case b := <-newBlockCh:
evt := b.(types.TMEventData).Unwrap().(types.EventDataNewBlock) evt := b.(types.EventDataNewBlock)
nTxs += int(evt.Block.Header.NumTxs) nTxs += int(evt.Block.Header.NumTxs)
case <-ticker.C: case <-ticker.C:
panic("Timed out waiting to commit blocks with transactions") panic("Timed out waiting to commit blocks with transactions")

View File

@ -1,7 +1,6 @@
package consensus package consensus
import ( import (
"bytes"
"context" "context"
"fmt" "fmt"
"reflect" "reflect"
@ -10,7 +9,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
wire "github.com/tendermint/go-wire" amino "github.com/tendermint/go-amino"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
@ -26,7 +25,7 @@ const (
VoteChannel = byte(0x22) VoteChannel = byte(0x22)
VoteSetBitsChannel = byte(0x23) VoteSetBitsChannel = byte(0x23)
maxConsensusMessageSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes. maxMsgSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes.
blocksToContributeToBecomeGoodPeer = 10000 blocksToContributeToBecomeGoodPeer = 10000
) )
@ -110,27 +109,31 @@ func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor {
// TODO optimize // TODO optimize
return []*p2p.ChannelDescriptor{ return []*p2p.ChannelDescriptor{
{ {
ID: StateChannel, ID: StateChannel,
Priority: 5, Priority: 5,
SendQueueCapacity: 100, SendQueueCapacity: 100,
RecvMessageCapacity: maxMsgSize,
}, },
{ {
ID: DataChannel, // maybe split between gossiping current block and catchup stuff ID: DataChannel, // maybe split between gossiping current block and catchup stuff
Priority: 10, // once we gossip the whole block there's nothing left to send until next height or round Priority: 10, // once we gossip the whole block there's nothing left to send until next height or round
SendQueueCapacity: 100, SendQueueCapacity: 100,
RecvBufferCapacity: 50 * 4096, RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: maxMsgSize,
}, },
{ {
ID: VoteChannel, ID: VoteChannel,
Priority: 5, Priority: 5,
SendQueueCapacity: 100, SendQueueCapacity: 100,
RecvBufferCapacity: 100 * 100, RecvBufferCapacity: 100 * 100,
RecvMessageCapacity: maxMsgSize,
}, },
{ {
ID: VoteSetBitsChannel, ID: VoteSetBitsChannel,
Priority: 1, Priority: 1,
SendQueueCapacity: 2, SendQueueCapacity: 2,
RecvBufferCapacity: 1024, RecvBufferCapacity: 1024,
RecvMessageCapacity: maxMsgSize,
}, },
} }
} }
@ -178,7 +181,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
return return
} }
_, msg, err := DecodeMessage(msgBytes) msg, err := DecodeMessage(msgBytes)
if err != nil { if err != nil {
conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
conR.Switch.StopPeerForError(src, err) conR.Switch.StopPeerForError(src, err)
@ -224,13 +227,13 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
conR.Logger.Error("Bad VoteSetBitsMessage field Type") conR.Logger.Error("Bad VoteSetBitsMessage field Type")
return return
} }
src.TrySend(VoteSetBitsChannel, struct{ ConsensusMessage }{&VoteSetBitsMessage{ src.TrySend(VoteSetBitsChannel, cdc.MustMarshalBinaryBare(&VoteSetBitsMessage{
Height: msg.Height, Height: msg.Height,
Round: msg.Round, Round: msg.Round,
Type: msg.Type, Type: msg.Type,
BlockID: msg.BlockID, BlockID: msg.BlockID,
Votes: ourVotes, Votes: ourVotes,
}}) }))
case *ProposalHeartbeatMessage: case *ProposalHeartbeatMessage:
hb := msg.Heartbeat hb := msg.Heartbeat
conR.Logger.Debug("Received proposal heartbeat message", conR.Logger.Debug("Received proposal heartbeat message",
@ -377,17 +380,17 @@ func (conR *ConsensusReactor) startBroadcastRoutine() error {
select { select {
case data, ok = <-stepsCh: case data, ok = <-stepsCh:
if ok { // a receive from a closed channel returns the zero value immediately if ok { // a receive from a closed channel returns the zero value immediately
edrs := data.(types.TMEventData).Unwrap().(types.EventDataRoundState) edrs := data.(types.EventDataRoundState)
conR.broadcastNewRoundStep(edrs.RoundState.(*cstypes.RoundState)) conR.broadcastNewRoundStep(edrs.RoundState.(*cstypes.RoundState))
} }
case data, ok = <-votesCh: case data, ok = <-votesCh:
if ok { if ok {
edv := data.(types.TMEventData).Unwrap().(types.EventDataVote) edv := data.(types.EventDataVote)
conR.broadcastHasVoteMessage(edv.Vote) conR.broadcastHasVoteMessage(edv.Vote)
} }
case data, ok = <-heartbeatsCh: case data, ok = <-heartbeatsCh:
if ok { if ok {
edph := data.(types.TMEventData).Unwrap().(types.EventDataProposalHeartbeat) edph := data.(types.EventDataProposalHeartbeat)
conR.broadcastProposalHeartbeatMessage(edph) conR.broadcastProposalHeartbeatMessage(edph)
} }
case <-conR.Quit(): case <-conR.Quit():
@ -409,16 +412,16 @@ func (conR *ConsensusReactor) broadcastProposalHeartbeatMessage(heartbeat types.
conR.Logger.Debug("Broadcasting proposal heartbeat message", conR.Logger.Debug("Broadcasting proposal heartbeat message",
"height", hb.Height, "round", hb.Round, "sequence", hb.Sequence) "height", hb.Height, "round", hb.Round, "sequence", hb.Sequence)
msg := &ProposalHeartbeatMessage{hb} msg := &ProposalHeartbeatMessage{hb}
conR.Switch.Broadcast(StateChannel, struct{ ConsensusMessage }{msg}) conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(msg))
} }
func (conR *ConsensusReactor) broadcastNewRoundStep(rs *cstypes.RoundState) { func (conR *ConsensusReactor) broadcastNewRoundStep(rs *cstypes.RoundState) {
nrsMsg, csMsg := makeRoundStepMessages(rs) nrsMsg, csMsg := makeRoundStepMessages(rs)
if nrsMsg != nil { if nrsMsg != nil {
conR.Switch.Broadcast(StateChannel, struct{ ConsensusMessage }{nrsMsg}) conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg))
} }
if csMsg != nil { if csMsg != nil {
conR.Switch.Broadcast(StateChannel, struct{ ConsensusMessage }{csMsg}) conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(csMsg))
} }
} }
@ -430,7 +433,7 @@ func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote) {
Type: vote.Type, Type: vote.Type,
Index: vote.ValidatorIndex, Index: vote.ValidatorIndex,
} }
conR.Switch.Broadcast(StateChannel, struct{ ConsensusMessage }{msg}) conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(msg))
/* /*
// TODO: Make this broadcast more selective. // TODO: Make this broadcast more selective.
for _, peer := range conR.Switch.Peers().List() { for _, peer := range conR.Switch.Peers().List() {
@ -470,10 +473,10 @@ func (conR *ConsensusReactor) sendNewRoundStepMessages(peer p2p.Peer) {
rs := conR.conS.GetRoundState() rs := conR.conS.GetRoundState()
nrsMsg, csMsg := makeRoundStepMessages(rs) nrsMsg, csMsg := makeRoundStepMessages(rs)
if nrsMsg != nil { if nrsMsg != nil {
peer.Send(StateChannel, struct{ ConsensusMessage }{nrsMsg}) peer.Send(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg))
} }
if csMsg != nil { if csMsg != nil {
peer.Send(StateChannel, struct{ ConsensusMessage }{csMsg}) peer.Send(StateChannel, cdc.MustMarshalBinaryBare(csMsg))
} }
} }
@ -500,7 +503,7 @@ OUTER_LOOP:
Part: part, Part: part,
} }
logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round) logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round)
if peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) { if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) {
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
} }
continue OUTER_LOOP continue OUTER_LOOP
@ -544,7 +547,7 @@ OUTER_LOOP:
{ {
msg := &ProposalMessage{Proposal: rs.Proposal} msg := &ProposalMessage{Proposal: rs.Proposal}
logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round) logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round)
if peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) { if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) {
ps.SetHasProposal(rs.Proposal) ps.SetHasProposal(rs.Proposal)
} }
} }
@ -559,7 +562,7 @@ OUTER_LOOP:
ProposalPOL: rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray(), ProposalPOL: rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray(),
} }
logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round) logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round)
peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg))
} }
continue OUTER_LOOP continue OUTER_LOOP
} }
@ -602,7 +605,7 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype
Part: part, Part: part,
} }
logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index) logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index)
if peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) { if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) {
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
} else { } else {
logger.Debug("Sending block part for catchup failed") logger.Debug("Sending block part for catchup failed")
@ -739,12 +742,12 @@ OUTER_LOOP:
prs := ps.GetRoundState() prs := ps.GetRoundState()
if rs.Height == prs.Height { if rs.Height == prs.Height {
if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok { if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok {
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{ peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{
Height: prs.Height, Height: prs.Height,
Round: prs.Round, Round: prs.Round,
Type: types.VoteTypePrevote, Type: types.VoteTypePrevote,
BlockID: maj23, BlockID: maj23,
}}) }))
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) time.Sleep(conR.conS.config.PeerQueryMaj23Sleep())
} }
} }
@ -756,12 +759,12 @@ OUTER_LOOP:
prs := ps.GetRoundState() prs := ps.GetRoundState()
if rs.Height == prs.Height { if rs.Height == prs.Height {
if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok { if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok {
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{ peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{
Height: prs.Height, Height: prs.Height,
Round: prs.Round, Round: prs.Round,
Type: types.VoteTypePrecommit, Type: types.VoteTypePrecommit,
BlockID: maj23, BlockID: maj23,
}}) }))
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) time.Sleep(conR.conS.config.PeerQueryMaj23Sleep())
} }
} }
@ -773,12 +776,12 @@ OUTER_LOOP:
prs := ps.GetRoundState() prs := ps.GetRoundState()
if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 { if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 {
if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok { if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok {
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{ peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{
Height: prs.Height, Height: prs.Height,
Round: prs.ProposalPOLRound, Round: prs.ProposalPOLRound,
Type: types.VoteTypePrevote, Type: types.VoteTypePrevote,
BlockID: maj23, BlockID: maj23,
}}) }))
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) time.Sleep(conR.conS.config.PeerQueryMaj23Sleep())
} }
} }
@ -792,12 +795,12 @@ OUTER_LOOP:
prs := ps.GetRoundState() prs := ps.GetRoundState()
if prs.CatchupCommitRound != -1 && 0 < prs.Height && prs.Height <= conR.conS.blockStore.Height() { if prs.CatchupCommitRound != -1 && 0 < prs.Height && prs.Height <= conR.conS.blockStore.Height() {
commit := conR.conS.LoadCommit(prs.Height) commit := conR.conS.LoadCommit(prs.Height)
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{ peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{
Height: prs.Height, Height: prs.Height,
Round: commit.Round(), Round: commit.Round(),
Type: types.VoteTypePrecommit, Type: types.VoteTypePrecommit,
BlockID: commit.BlockID, BlockID: commit.BlockID,
}}) }))
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) time.Sleep(conR.conS.config.PeerQueryMaj23Sleep())
} }
} }
@ -835,8 +838,8 @@ var (
ErrPeerStateInvalidStartTime = errors.New("Error peer state invalid startTime") ErrPeerStateInvalidStartTime = errors.New("Error peer state invalid startTime")
) )
// PeerState contains the known state of a peer, including its connection // PeerState contains the known state of a peer, including its connection and
// and threadsafe access to its PeerRoundState. // threadsafe access to its PeerRoundState.
type PeerState struct { type PeerState struct {
Peer p2p.Peer Peer p2p.Peer
logger log.Logger logger log.Logger
@ -875,12 +878,14 @@ func NewPeerState(peer p2p.Peer) *PeerState {
} }
} }
// SetLogger allows to set a logger on the peer state. Returns the peer state
// itself.
func (ps *PeerState) SetLogger(logger log.Logger) *PeerState { func (ps *PeerState) SetLogger(logger log.Logger) *PeerState {
ps.logger = logger ps.logger = logger
return ps return ps
} }
// GetRoundState returns an atomic snapshot of the PeerRoundState. // GetRoundState returns an shallow copy of the PeerRoundState.
// There's no point in mutating it since it won't change PeerState. // There's no point in mutating it since it won't change PeerState.
func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState { func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState {
ps.mtx.Lock() ps.mtx.Lock()
@ -890,6 +895,14 @@ func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState {
return &prs return &prs
} }
// GetRoundStateJSON returns a json of PeerRoundState, marshalled using go-amino.
func (ps *PeerState) GetRoundStateJSON() ([]byte, error) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
return cdc.MarshalJSON(ps.PeerRoundState)
}
// GetHeight returns an atomic snapshot of the PeerRoundState's height // GetHeight returns an atomic snapshot of the PeerRoundState's height
// used by the mempool to ensure peers are caught up before broadcasting new txs // used by the mempool to ensure peers are caught up before broadcasting new txs
func (ps *PeerState) GetHeight() int64 { func (ps *PeerState) GetHeight() int64 {
@ -948,7 +961,7 @@ func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool {
if vote, ok := ps.PickVoteToSend(votes); ok { if vote, ok := ps.PickVoteToSend(votes); ok {
msg := &VoteMessage{vote} msg := &VoteMessage{vote}
ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote) ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote)
return ps.Peer.Send(VoteChannel, struct{ ConsensusMessage }{msg}) return ps.Peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(msg))
} }
return false return false
} }
@ -1052,7 +1065,7 @@ func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValida
} }
} }
// EnsureVoteVitArrays ensures the bit-arrays have been allocated for tracking // EnsureVoteBitArrays ensures the bit-arrays have been allocated for tracking
// what votes this peer has received. // what votes this peer has received.
// NOTE: It's important to make sure that numValidators actually matches // NOTE: It's important to make sure that numValidators actually matches
// what the node sees as the number of validators for height. // what the node sees as the number of validators for height.
@ -1292,45 +1305,30 @@ func (ps *PeerState) StringIndented(indent string) string {
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// Messages // Messages
const (
msgTypeNewRoundStep = byte(0x01)
msgTypeCommitStep = byte(0x02)
msgTypeProposal = byte(0x11)
msgTypeProposalPOL = byte(0x12)
msgTypeBlockPart = byte(0x13) // both block & POL
msgTypeVote = byte(0x14)
msgTypeHasVote = byte(0x15)
msgTypeVoteSetMaj23 = byte(0x16)
msgTypeVoteSetBits = byte(0x17)
msgTypeProposalHeartbeat = byte(0x20)
)
// ConsensusMessage is a message that can be sent and received on the ConsensusReactor // ConsensusMessage is a message that can be sent and received on the ConsensusReactor
type ConsensusMessage interface{} type ConsensusMessage interface{}
var _ = wire.RegisterInterface( func RegisterConsensusMessages(cdc *amino.Codec) {
struct{ ConsensusMessage }{}, cdc.RegisterInterface((*ConsensusMessage)(nil), nil)
wire.ConcreteType{&NewRoundStepMessage{}, msgTypeNewRoundStep}, cdc.RegisterConcrete(&NewRoundStepMessage{}, "tendermint/NewRoundStepMessage", nil)
wire.ConcreteType{&CommitStepMessage{}, msgTypeCommitStep}, cdc.RegisterConcrete(&CommitStepMessage{}, "tendermint/CommitStep", nil)
wire.ConcreteType{&ProposalMessage{}, msgTypeProposal}, cdc.RegisterConcrete(&ProposalMessage{}, "tendermint/Proposal", nil)
wire.ConcreteType{&ProposalPOLMessage{}, msgTypeProposalPOL}, cdc.RegisterConcrete(&ProposalPOLMessage{}, "tendermint/ProposalPOL", nil)
wire.ConcreteType{&BlockPartMessage{}, msgTypeBlockPart}, cdc.RegisterConcrete(&BlockPartMessage{}, "tendermint/BlockPart", nil)
wire.ConcreteType{&VoteMessage{}, msgTypeVote}, cdc.RegisterConcrete(&VoteMessage{}, "tendermint/Vote", nil)
wire.ConcreteType{&HasVoteMessage{}, msgTypeHasVote}, cdc.RegisterConcrete(&HasVoteMessage{}, "tendermint/HasVote", nil)
wire.ConcreteType{&VoteSetMaj23Message{}, msgTypeVoteSetMaj23}, cdc.RegisterConcrete(&VoteSetMaj23Message{}, "tendermint/VoteSetMaj23", nil)
wire.ConcreteType{&VoteSetBitsMessage{}, msgTypeVoteSetBits}, cdc.RegisterConcrete(&VoteSetBitsMessage{}, "tendermint/VoteSetBits", nil)
wire.ConcreteType{&ProposalHeartbeatMessage{}, msgTypeProposalHeartbeat}, cdc.RegisterConcrete(&ProposalHeartbeatMessage{}, "tendermint/ProposalHeartbeat", nil)
) }
// DecodeMessage decodes the given bytes into a ConsensusMessage. // DecodeMessage decodes the given bytes into a ConsensusMessage.
// TODO: check for unnecessary extra bytes at the end. func DecodeMessage(bz []byte) (msg ConsensusMessage, err error) {
func DecodeMessage(bz []byte) (msgType byte, msg ConsensusMessage, err error) { if len(bz) > maxMsgSize {
msgType = bz[0] return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
n := new(int) len(bz), maxMsgSize)
r := bytes.NewReader(bz) }
msgI := wire.ReadBinary(struct{ ConsensusMessage }{}, r, maxConsensusMessageSize, n, &err) err = cdc.UnmarshalBinaryBare(bz, &msg)
msg = msgI.(struct{ ConsensusMessage }).ConsensusMessage
return return
} }

View File

@ -11,7 +11,6 @@ import (
"time" "time"
"github.com/tendermint/abci/example/kvstore" "github.com/tendermint/abci/example/kvstore"
wire "github.com/tendermint/tendermint/wire"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
@ -149,30 +148,30 @@ func TestReactorRecordsBlockParts(t *testing.T) {
Round: 0, Round: 0,
Part: parts.GetPart(0), Part: parts.GetPart(0),
} }
bz, err := wire.MarshalBinary(struct{ ConsensusMessage }{msg}) bz, err := cdc.MarshalBinaryBare(msg)
require.NoError(t, err) require.NoError(t, err)
reactor.Receive(DataChannel, peer, bz) reactor.Receive(DataChannel, peer, bz)
assert.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should have increased by 1") require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should have increased by 1")
// 2) block part with the same height, but different round // 2) block part with the same height, but different round
msg.Round = 1 msg.Round = 1
bz, err = wire.MarshalBinary(struct{ ConsensusMessage }{msg}) bz, err = cdc.MarshalBinaryBare(msg)
require.NoError(t, err) require.NoError(t, err)
reactor.Receive(DataChannel, peer, bz) reactor.Receive(DataChannel, peer, bz)
assert.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same") require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same")
// 3) block part from earlier height // 3) block part from earlier height
msg.Height = 1 msg.Height = 1
msg.Round = 0 msg.Round = 0
bz, err = wire.MarshalBinary(struct{ ConsensusMessage }{msg}) bz, err = cdc.MarshalBinaryBare(msg)
require.NoError(t, err) require.NoError(t, err)
reactor.Receive(DataChannel, peer, bz) reactor.Receive(DataChannel, peer, bz)
assert.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same") require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same")
} }
// Test we record votes from other peers // Test we record votes from other peers
@ -204,7 +203,7 @@ func TestReactorRecordsVotes(t *testing.T) {
Type: types.VoteTypePrevote, Type: types.VoteTypePrevote,
BlockID: types.BlockID{}, BlockID: types.BlockID{},
} }
bz, err := wire.MarshalBinary(struct{ ConsensusMessage }{&VoteMessage{vote}}) bz, err := cdc.MarshalBinaryBare(&VoteMessage{vote})
require.NoError(t, err) require.NoError(t, err)
reactor.Receive(VoteChannel, peer, bz) reactor.Receive(VoteChannel, peer, bz)
@ -213,7 +212,7 @@ func TestReactorRecordsVotes(t *testing.T) {
// 2) vote with the same height, but different round // 2) vote with the same height, but different round
vote.Round = 1 vote.Round = 1
bz, err = wire.MarshalBinary(struct{ ConsensusMessage }{&VoteMessage{vote}}) bz, err = cdc.MarshalBinaryBare(&VoteMessage{vote})
require.NoError(t, err) require.NoError(t, err)
reactor.Receive(VoteChannel, peer, bz) reactor.Receive(VoteChannel, peer, bz)
@ -223,7 +222,7 @@ func TestReactorRecordsVotes(t *testing.T) {
vote.Height = 1 vote.Height = 1
vote.Round = 0 vote.Round = 0
bz, err = wire.MarshalBinary(struct{ ConsensusMessage }{&VoteMessage{vote}}) bz, err = cdc.MarshalBinaryBare(&VoteMessage{vote})
require.NoError(t, err) require.NoError(t, err)
reactor.Receive(VoteChannel, peer, bz) reactor.Receive(VoteChannel, peer, bz)
@ -410,7 +409,7 @@ func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}
if !ok { if !ok {
return return
} }
newBlock := newBlockI.(types.TMEventData).Unwrap().(types.EventDataNewBlock).Block newBlock := newBlockI.(types.EventDataNewBlock).Block
css[j].Logger.Debug("waitForAndValidateBlock: Got block", "height", newBlock.Height) css[j].Logger.Debug("waitForAndValidateBlock: Got block", "height", newBlock.Height)
err := validateBlock(newBlock, activeVals) err := validateBlock(newBlock, activeVals)
assert.Nil(t, err) assert.Nil(t, err)
@ -431,7 +430,7 @@ func waitForAndValidateBlockWithTx(t *testing.T, n int, activeVals map[string]st
if !ok { if !ok {
return return
} }
newBlock := newBlockI.(types.TMEventData).Unwrap().(types.EventDataNewBlock).Block newBlock := newBlockI.(types.EventDataNewBlock).Block
css[j].Logger.Debug("waitForAndValidateBlockWithTx: Got block", "height", newBlock.Height) css[j].Logger.Debug("waitForAndValidateBlockWithTx: Got block", "height", newBlock.Height)
err := validateBlock(newBlock, activeVals) err := validateBlock(newBlock, activeVals)
assert.Nil(t, err) assert.Nil(t, err)
@ -463,7 +462,7 @@ func waitForBlockWithUpdatedValsAndValidateIt(t *testing.T, n int, updatedVals m
if !ok { if !ok {
return return
} }
newBlock = newBlockI.(types.TMEventData).Unwrap().(types.EventDataNewBlock).Block newBlock = newBlockI.(types.EventDataNewBlock).Block
if newBlock.LastCommit.Size() == len(updatedVals) { if newBlock.LastCommit.Size() == len(updatedVals) {
css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height) css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height)
break LOOP break LOOP

View File

@ -17,8 +17,7 @@ import (
"github.com/tendermint/abci/example/kvstore" "github.com/tendermint/abci/example/kvstore"
abci "github.com/tendermint/abci/types" abci "github.com/tendermint/abci/types"
crypto "github.com/tendermint/go-crypto" "github.com/tendermint/go-crypto"
wire "github.com/tendermint/go-wire"
auto "github.com/tendermint/tmlibs/autofile" auto "github.com/tendermint/tmlibs/autofile"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db" dbm "github.com/tendermint/tmlibs/db"
@ -27,6 +26,7 @@ import (
"github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state" sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
pvm "github.com/tendermint/tendermint/types/priv_validator"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
) )
@ -60,7 +60,7 @@ func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64,
bytes, _ := ioutil.ReadFile(cs.config.WalFile()) bytes, _ := ioutil.ReadFile(cs.config.WalFile())
// fmt.Printf("====== WAL: \n\r%s\n", bytes) // fmt.Printf("====== WAL: \n\r%s\n", bytes)
t.Logf("====== WAL: \n\r%s\n", bytes) t.Logf("====== WAL: \n\r%X\n", bytes)
err := cs.Start() err := cs.Start()
require.NoError(t, err) require.NoError(t, err)
@ -325,7 +325,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
walFile := tempWALWithData(walBody) walFile := tempWALWithData(walBody)
config.Consensus.SetWalFile(walFile) config.Consensus.SetWalFile(walFile)
privVal := types.LoadPrivValidatorFS(config.PrivValidatorFile()) privVal := pvm.LoadFilePV(config.PrivValidatorFile())
wal, err := NewWAL(walFile, false) wal, err := NewWAL(walFile, false)
if err != nil { if err != nil {
@ -519,8 +519,8 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
case EndHeightMessage: case EndHeightMessage:
// if its not the first one, we have a full block // if its not the first one, we have a full block
if thisBlockParts != nil { if thisBlockParts != nil {
var n int var block = new(types.Block)
block := wire.ReadBinary(&types.Block{}, thisBlockParts.GetReader(), 0, &n, &err).(*types.Block) _, err = cdc.UnmarshalBinaryReader(thisBlockParts.GetReader(), block, 0)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -552,8 +552,8 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
} }
} }
// grab the last block too // grab the last block too
var n int var block = new(types.Block)
block := wire.ReadBinary(&types.Block{}, thisBlockParts.GetReader(), 0, &n, &err).(*types.Block) _, err = cdc.UnmarshalBinaryReader(thisBlockParts.GetReader(), block, 0)
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -10,8 +10,6 @@ import (
"time" "time"
fail "github.com/ebuchman/fail-test" fail "github.com/ebuchman/fail-test"
wire "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
@ -170,18 +168,23 @@ func (cs *ConsensusState) GetState() sm.State {
return cs.state.Copy() return cs.state.Copy()
} }
// GetRoundState returns a copy of the internal consensus state. // GetRoundState returns a shallow copy of the internal consensus state.
func (cs *ConsensusState) GetRoundState() *cstypes.RoundState { func (cs *ConsensusState) GetRoundState() *cstypes.RoundState {
cs.mtx.Lock() cs.mtx.Lock()
defer cs.mtx.Unlock() defer cs.mtx.Unlock()
return cs.getRoundState()
}
func (cs *ConsensusState) getRoundState() *cstypes.RoundState {
rs := cs.RoundState // copy rs := cs.RoundState // copy
return &rs return &rs
} }
// GetRoundStateJSON returns a json of RoundState, marshalled using go-amino.
func (cs *ConsensusState) GetRoundStateJSON() ([]byte, error) {
cs.mtx.Lock()
defer cs.mtx.Unlock()
return cdc.MarshalJSON(cs.RoundState)
}
// GetValidators returns a copy of the current validators. // GetValidators returns a copy of the current validators.
func (cs *ConsensusState) GetValidators() (int64, []*types.Validator) { func (cs *ConsensusState) GetValidators() (int64, []*types.Validator) {
cs.mtx.Lock() cs.mtx.Lock()
@ -776,7 +779,7 @@ func (cs *ConsensusState) enterPropose(height int64, round int) {
// if not a validator, we're done // if not a validator, we're done
if !cs.Validators.HasAddress(cs.privValidator.GetAddress()) { if !cs.Validators.HasAddress(cs.privValidator.GetAddress()) {
cs.Logger.Debug("This node is not a validator") cs.Logger.Debug("This node is not a validator", "addr", cs.privValidator.GetAddress(), "vals", cs.Validators)
return return
} }
cs.Logger.Debug("This node is a validator") cs.Logger.Debug("This node is a validator")
@ -1297,10 +1300,10 @@ func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part, v
} }
if added && cs.ProposalBlockParts.IsComplete() { if added && cs.ProposalBlockParts.IsComplete() {
// Added and completed! // Added and completed!
var n int _, err = cdc.UnmarshalBinaryReader(cs.ProposalBlockParts.GetReader(), &cs.ProposalBlock, int64(cs.state.ConsensusParams.BlockSize.MaxBytes))
var err error if err != nil {
cs.ProposalBlock = wire.ReadBinary(&types.Block{}, cs.ProposalBlockParts.GetReader(), return true, err
cs.state.ConsensusParams.BlockSize.MaxBytes, &n, &err).(*types.Block) }
// NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal // NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal
cs.Logger.Info("Received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash()) cs.Logger.Info("Received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash())
if cs.Step == cstypes.RoundStepPropose && cs.isProposalComplete() { if cs.Step == cstypes.RoundStepPropose && cs.isProposalComplete() {
@ -1310,7 +1313,7 @@ func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part, v
// If we're waiting on the proposal block... // If we're waiting on the proposal block...
cs.tryFinalizeCommit(height) cs.tryFinalizeCommit(height)
} }
return true, err return true, nil
} }
return added, nil return added, nil
} }

View File

@ -261,7 +261,7 @@ func TestStateFullRound1(t *testing.T) {
// grab proposal // grab proposal
re := <-propCh re := <-propCh
propBlockHash := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState).ProposalBlock.Hash() propBlockHash := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState).ProposalBlock.Hash()
<-voteCh // wait for prevote <-voteCh // wait for prevote
validatePrevote(t, cs, round, vss[0], propBlockHash) validatePrevote(t, cs, round, vss[0], propBlockHash)
@ -356,7 +356,7 @@ func TestStateLockNoPOL(t *testing.T) {
cs1.startRoutines(0) cs1.startRoutines(0)
re := <-proposalCh re := <-proposalCh
rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
theBlockHash := rs.ProposalBlock.Hash() theBlockHash := rs.ProposalBlock.Hash()
<-voteCh // prevote <-voteCh // prevote
@ -396,7 +396,7 @@ func TestStateLockNoPOL(t *testing.T) {
// now we're on a new round and not the proposer, so wait for timeout // now we're on a new round and not the proposer, so wait for timeout
re = <-timeoutProposeCh re = <-timeoutProposeCh
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
if rs.ProposalBlock != nil { if rs.ProposalBlock != nil {
panic("Expected proposal block to be nil") panic("Expected proposal block to be nil")
@ -409,7 +409,7 @@ func TestStateLockNoPOL(t *testing.T) {
validatePrevote(t, cs1, 1, vss[0], rs.LockedBlock.Hash()) validatePrevote(t, cs1, 1, vss[0], rs.LockedBlock.Hash())
// add a conflicting prevote from the other validator // add a conflicting prevote from the other validator
signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) signAddVotes(cs1, types.VoteTypePrevote, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2)
<-voteCh <-voteCh
// now we're going to enter prevote again, but with invalid args // now we're going to enter prevote again, but with invalid args
@ -424,7 +424,7 @@ func TestStateLockNoPOL(t *testing.T) {
// add conflicting precommit from vs2 // add conflicting precommit from vs2
// NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round // NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round
signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2)
<-voteCh <-voteCh
// (note we're entering precommit for a second time this round, but with invalid args // (note we're entering precommit for a second time this round, but with invalid args
@ -440,7 +440,7 @@ func TestStateLockNoPOL(t *testing.T) {
incrementRound(vs2) incrementRound(vs2)
re = <-proposalCh re = <-proposalCh
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
// now we're on a new round and are the proposer // now we're on a new round and are the proposer
if !bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()) { if !bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()) {
@ -529,7 +529,7 @@ func TestStateLockPOLRelock(t *testing.T) {
<-newRoundCh <-newRoundCh
re := <-proposalCh re := <-proposalCh
rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
theBlockHash := rs.ProposalBlock.Hash() theBlockHash := rs.ProposalBlock.Hash()
<-voteCh // prevote <-voteCh // prevote
@ -605,9 +605,9 @@ func TestStateLockPOLRelock(t *testing.T) {
discardFromChan(voteCh, 2) discardFromChan(voteCh, 2)
be := <-newBlockCh be := <-newBlockCh
b := be.(types.TMEventData).Unwrap().(types.EventDataNewBlockHeader) b := be.(types.EventDataNewBlockHeader)
re = <-newRoundCh re = <-newRoundCh
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
if rs.Height != 2 { if rs.Height != 2 {
panic("Expected height to increment") panic("Expected height to increment")
} }
@ -643,7 +643,7 @@ func TestStateLockPOLUnlock(t *testing.T) {
startTestRound(cs1, cs1.Height, 0) startTestRound(cs1, cs1.Height, 0)
<-newRoundCh <-newRoundCh
re := <-proposalCh re := <-proposalCh
rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
theBlockHash := rs.ProposalBlock.Hash() theBlockHash := rs.ProposalBlock.Hash()
<-voteCh // prevote <-voteCh // prevote
@ -669,7 +669,7 @@ func TestStateLockPOLUnlock(t *testing.T) {
// timeout to new round // timeout to new round
re = <-timeoutWaitCh re = <-timeoutWaitCh
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
lockedBlockHash := rs.LockedBlock.Hash() lockedBlockHash := rs.LockedBlock.Hash()
//XXX: this isnt guaranteed to get there before the timeoutPropose ... //XXX: this isnt guaranteed to get there before the timeoutPropose ...
@ -731,7 +731,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
startTestRound(cs1, cs1.Height, 0) startTestRound(cs1, cs1.Height, 0)
<-newRoundCh <-newRoundCh
re := <-proposalCh re := <-proposalCh
rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
propBlock := rs.ProposalBlock propBlock := rs.ProposalBlock
<-voteCh // prevote <-voteCh // prevote
@ -781,7 +781,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
re = <-proposalCh re = <-proposalCh
} }
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
if rs.LockedBlock != nil { if rs.LockedBlock != nil {
panic("we should not be locked!") panic("we should not be locked!")
@ -1033,7 +1033,7 @@ func TestStateHalt1(t *testing.T) {
startTestRound(cs1, cs1.Height, 0) startTestRound(cs1, cs1.Height, 0)
<-newRoundCh <-newRoundCh
re := <-proposalCh re := <-proposalCh
rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
propBlock := rs.ProposalBlock propBlock := rs.ProposalBlock
propBlockParts := propBlock.MakePartSet(partSize) propBlockParts := propBlock.MakePartSet(partSize)
@ -1056,7 +1056,7 @@ func TestStateHalt1(t *testing.T) {
// timeout to new round // timeout to new round
<-timeoutWaitCh <-timeoutWaitCh
re = <-newRoundCh re = <-newRoundCh
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
t.Log("### ONTO ROUND 1") t.Log("### ONTO ROUND 1")
/*Round2 /*Round2
@ -1074,7 +1074,7 @@ func TestStateHalt1(t *testing.T) {
// receiving that precommit should take us straight to commit // receiving that precommit should take us straight to commit
<-newBlockCh <-newBlockCh
re = <-newRoundCh re = <-newRoundCh
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
if rs.Height != 2 { if rs.Height != 2 {
panic("expected height to increment") panic("expected height to increment")

View File

@ -48,7 +48,7 @@ func TestPeerCatchupRounds(t *testing.T) {
} }
func makeVoteHR(t *testing.T, height int64, round int, privVals []*types.PrivValidatorFS, valIndex int) *types.Vote { func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivValidator, valIndex int) *types.Vote {
privVal := privVals[valIndex] privVal := privVals[valIndex]
vote := &types.Vote{ vote := &types.Vote{
ValidatorAddress: privVal.GetAddress(), ValidatorAddress: privVal.GetAddress(),

View File

@ -52,9 +52,6 @@ func (rs RoundStepType) String() string {
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// RoundState defines the internal consensus state. // RoundState defines the internal consensus state.
// It is Immutable when returned from ConsensusState.GetRoundState()
// TODO: Actually, only the top pointer is copied,
// so access to field pointers is still racey
// NOTE: Not thread safe. Should only be manipulated by functions downstream // NOTE: Not thread safe. Should only be manipulated by functions downstream
// of the cs.receiveRoutine // of the cs.receiveRoutine
type RoundState struct { type RoundState struct {

View File

@ -1,7 +1,6 @@
package consensus package consensus
import ( import (
"bytes"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"hash/crc32" "hash/crc32"
@ -11,7 +10,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
wire "github.com/tendermint/go-wire" "github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
auto "github.com/tendermint/tmlibs/autofile" auto "github.com/tendermint/tmlibs/autofile"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
@ -38,13 +37,13 @@ type EndHeightMessage struct {
type WALMessage interface{} type WALMessage interface{}
var _ = wire.RegisterInterface( func RegisterWALMessages(cdc *amino.Codec) {
struct{ WALMessage }{}, cdc.RegisterInterface((*WALMessage)(nil), nil)
wire.ConcreteType{types.EventDataRoundState{}, 0x01}, cdc.RegisterConcrete(types.EventDataRoundState{}, "tendermint/wal/EventDataRoundState", nil)
wire.ConcreteType{msgInfo{}, 0x02}, cdc.RegisterConcrete(msgInfo{}, "tendermint/wal/MsgInfo", nil)
wire.ConcreteType{timeoutInfo{}, 0x03}, cdc.RegisterConcrete(timeoutInfo{}, "tendermint/wal/TimeoutInfo", nil)
wire.ConcreteType{EndHeightMessage{}, 0x04}, cdc.RegisterConcrete(EndHeightMessage{}, "tendermint/wal/EndHeightMessage", nil)
) }
//-------------------------------------------------------- //--------------------------------------------------------
// Simple write-ahead logger // Simple write-ahead logger
@ -193,7 +192,7 @@ func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions)
// A WALEncoder writes custom-encoded WAL messages to an output stream. // A WALEncoder writes custom-encoded WAL messages to an output stream.
// //
// Format: 4 bytes CRC sum + 4 bytes length + arbitrary-length value (go-wire encoded) // Format: 4 bytes CRC sum + 4 bytes length + arbitrary-length value (go-amino encoded)
type WALEncoder struct { type WALEncoder struct {
wr io.Writer wr io.Writer
} }
@ -205,7 +204,7 @@ func NewWALEncoder(wr io.Writer) *WALEncoder {
// Encode writes the custom encoding of v to the stream. // Encode writes the custom encoding of v to the stream.
func (enc *WALEncoder) Encode(v *TimedWALMessage) error { func (enc *WALEncoder) Encode(v *TimedWALMessage) error {
data := wire.BinaryBytes(v) data := cdc.MustMarshalBinaryBare(v)
crc := crc32.Checksum(data, crc32c) crc := crc32.Checksum(data, crc32c)
length := uint32(len(data)) length := uint32(len(data))
@ -298,9 +297,8 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) {
return nil, DataCorruptionError{fmt.Errorf("checksums do not match: (read: %v, actual: %v)", crc, actualCRC)} return nil, DataCorruptionError{fmt.Errorf("checksums do not match: (read: %v, actual: %v)", crc, actualCRC)}
} }
var nn int var res = new(TimedWALMessage) // nolint: gosimple
var res *TimedWALMessage // nolint: gosimple err = cdc.UnmarshalBinaryBare(data, res)
res = wire.ReadBinary(&TimedWALMessage{}, bytes.NewBuffer(data), int(length), &nn, &err).(*TimedWALMessage)
if err != nil { if err != nil {
return nil, DataCorruptionError{fmt.Errorf("failed to decode data: %v", err)} return nil, DataCorruptionError{fmt.Errorf("failed to decode data: %v", err)}
} }

View File

@ -4,7 +4,6 @@ import (
"bufio" "bufio"
"bytes" "bytes"
"fmt" "fmt"
"math/rand"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@ -17,6 +16,7 @@ import (
"github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state" sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
pvm "github.com/tendermint/tendermint/types/priv_validator"
auto "github.com/tendermint/tmlibs/autofile" auto "github.com/tendermint/tmlibs/autofile"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/db" "github.com/tendermint/tmlibs/db"
@ -40,7 +40,7 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
// COPY PASTE FROM node.go WITH A FEW MODIFICATIONS // COPY PASTE FROM node.go WITH A FEW MODIFICATIONS
// NOTE: we can't import node package because of circular dependency // NOTE: we can't import node package because of circular dependency
privValidatorFile := config.PrivValidatorFile() privValidatorFile := config.PrivValidatorFile()
privValidator := types.LoadOrGenPrivValidatorFS(privValidatorFile) privValidator := pvm.LoadOrGenFilePV(privValidatorFile)
genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) genDoc, err := types.GenesisDocFromFile(config.GenesisFile())
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to read genesis file") return nil, errors.Wrap(err, "failed to read genesis file")
@ -116,7 +116,7 @@ func makePathname() string {
func randPort() int { func randPort() int {
// returns between base and base + spread // returns between base and base + spread
base, spread := 20000, 20000 base, spread := 20000, 20000
return base + rand.Intn(spread) return base + cmn.RandIntn(spread)
} }
func makeAddrs() (string, string, string) { func makeAddrs() (string, string, string) {

View File

@ -3,11 +3,10 @@ package consensus
import ( import (
"bytes" "bytes"
"crypto/rand" "crypto/rand"
"sync" // "sync"
"testing" "testing"
"time" "time"
wire "github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/consensus/types" "github.com/tendermint/tendermint/consensus/types"
tmtypes "github.com/tendermint/tendermint/types" tmtypes "github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
@ -36,7 +35,7 @@ func TestWALEncoderDecoder(t *testing.T) {
decoded, err := dec.Decode() decoded, err := dec.Decode()
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, msg.Time.Truncate(time.Millisecond), decoded.Time) assert.Equal(t, msg.Time.UTC(), decoded.Time)
assert.Equal(t, msg.Msg, decoded.Msg) assert.Equal(t, msg.Msg, decoded.Msg)
} }
} }
@ -68,6 +67,7 @@ func TestWALSearchForEndHeight(t *testing.T) {
assert.Equal(t, rs.Height, h+1, cmn.Fmt("wrong height")) assert.Equal(t, rs.Height, h+1, cmn.Fmt("wrong height"))
} }
/*
var initOnce sync.Once var initOnce sync.Once
func registerInterfacesOnce() { func registerInterfacesOnce() {
@ -78,6 +78,7 @@ func registerInterfacesOnce() {
) )
}) })
} }
*/
func nBytes(n int) []byte { func nBytes(n int) []byte {
buf := make([]byte, n) buf := make([]byte, n)
@ -86,7 +87,7 @@ func nBytes(n int) []byte {
} }
func benchmarkWalDecode(b *testing.B, n int) { func benchmarkWalDecode(b *testing.B, n int) {
registerInterfacesOnce() // registerInterfacesOnce()
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
enc := NewWALEncoder(buf) enc := NewWALEncoder(buf)

14
consensus/wire.go Normal file
View File

@ -0,0 +1,14 @@
package consensus
import (
"github.com/tendermint/go-amino"
"github.com/tendermint/go-crypto"
)
var cdc = amino.NewCodec()
func init() {
RegisterConsensusMessages(cdc)
RegisterWALMessages(cdc)
crypto.RegisterAmino(cdc)
}

68
docker-compose.yml Normal file
View File

@ -0,0 +1,68 @@
version: '3'
services:
node0:
container_name: node0
image: "tendermint/localnode"
ports:
- "46656-46657:46656-46657"
environment:
- ID=0
- LOG=${LOG:-tendermint.log}
volumes:
- ${FOLDER:-./build}:/tendermint:Z
networks:
localnet:
ipv4_address: 192.167.10.2
node1:
container_name: node1
image: "tendermint/localnode"
ports:
- "46659-46660:46656-46657"
environment:
- ID=1
- LOG=${LOG:-tendermint.log}
volumes:
- ${FOLDER:-./build}:/tendermint:Z
networks:
localnet:
ipv4_address: 192.167.10.3
node2:
container_name: node2
image: "tendermint/localnode"
environment:
- ID=2
- LOG=${LOG:-tendermint.log}
ports:
- "46661-46662:46656-46657"
volumes:
- ${FOLDER:-./build}:/tendermint:Z
networks:
localnet:
ipv4_address: 192.167.10.4
node3:
container_name: node3
image: "tendermint/localnode"
environment:
- ID=3
- LOG=${LOG:-tendermint.log}
ports:
- "46663-46664:46656-46657"
volumes:
- ${FOLDER:-./build}:/tendermint:Z
networks:
localnet:
ipv4_address: 192.167.10.5
networks:
localnet:
driver: bridge
ipam:
driver: default
config:
-
subnet: 192.167.10.0/16

7
docker-compose/Makefile Normal file
View File

@ -0,0 +1,7 @@
# Makefile for the "localnode" docker image.
all:
docker build --tag tendermint/localnode localnode
.PHONY: all

40
docker-compose/README.rst Normal file
View File

@ -0,0 +1,40 @@
localnode
=========
It is assumed that you have already `setup docker <https://docs.docker.com/engine/installation/>`__.
Description
-----------
Image for local testnets.
Add the tendermint binary to the image by attaching it in a folder to the `/tendermint` mount point.
It assumes that the configuration was created by the `tendermint testnet` command and it is also attached to the `/tendermint` mount point.
Example:
This example builds a linux tendermint binary under the `build/` folder, creates tendermint configuration for a single-node validator and runs the node:
```
cd $GOPATH/src/github.com/tendermint/tendermint
#Build binary
make build-linux
#Create configuration
docker run -e LOG="stdout" -v `pwd`/build:/tendermint tendermint/localnode testnet --o . --v 1
#Run the node
docker run -v `pwd`/build:/tendermint tendermint/localnode
```
Logging
-------
Log is saved under the attached volume, in the `tendermint.log` file. If the `LOG` environment variable is set to `stdout` at start, the log is not saved, but printed on the screen.
Special binaries
----------------
If you have multiple binaries with different names, you can specify which one to run with the BINARY environment variable. The path of the binary is relative to the attached volume.
docker-compose.yml
==================
This file creates a 4-node network using the localnode image. The nodes of the network are exposed to the host machine on ports 46656-46657, 46659-46660, 46661-46662, 46663-46664 respectively.

View File

@ -0,0 +1,16 @@
FROM alpine:3.7
MAINTAINER Greg Szabo <greg@tendermint.com>
RUN apk update && \
apk upgrade && \
apk --no-cache add curl jq file
VOLUME [ /tendermint ]
WORKDIR /tendermint
EXPOSE 46656 46657
ENTRYPOINT ["/usr/bin/wrapper.sh"]
CMD ["node", "--proxy_app dummy"]
STOPSIGNAL SIGTERM
COPY wrapper.sh /usr/bin/wrapper.sh

View File

@ -0,0 +1,33 @@
#!/usr/bin/env sh
##
## Input parameters
##
BINARY=/tendermint/${BINARY:-tendermint}
ID=${ID:-0}
LOG=${LOG:-tendermint.log}
##
## Assert linux binary
##
if ! [ -f "${BINARY}" ]; then
echo "The binary `basename ${BINARY}` cannot be found. Please add the binary to the shared folder. Please use the BINARY environment variable if the name of the binary is not 'tendermint' E.g.: -e BINARY=tendermint_my_test_version"
exit 1
fi
BINARY_CHECK="`file $BINARY | grep 'ELF 64-bit LSB executable, x86-64'`"
if [ -z "${BINARY_CHECK}" ]; then
echo "Binary needs to be OS linux, ARCH amd64"
exit 1
fi
##
## Run binary with all parameters
##
export TMHOME="/tendermint/node${ID}"
if [ -d "${TMHOME}/${LOG}" ]; then
"$BINARY" $@ | tee "${TMHOME}/${LOG}"
else
"$BINARY" $@
fi

View File

@ -1,128 +1,29 @@
# ADR 008: PrivValidator # ADR 008: SocketPV
## Context Tendermint node's should support only two in-process PrivValidator
implementations:
The current PrivValidator is monolithic and isn't easily reuseable by alternative signers. - FilePV uses an unencrypted private key in a "priv_validator.json" file - no
configuration required (just `tendermint init`).
- SocketPV uses a socket to send signing requests to another process - user is
responsible for starting that process themselves.
For instance, see https://github.com/tendermint/tendermint/issues/673 The SocketPV address can be provided via flags at the command line - doing so
will cause Tendermint to ignore any "priv_validator.json" file and to listen on
the given address for incoming connections from an external priv_validator
process. It will halt any operation until at least one external process
succesfully connected.
The goal is to have a clean PrivValidator interface like: The external priv_validator process will dial the address to connect to
Tendermint, and then Tendermint will send requests on the ensuing connection to
sign votes and proposals. Thus the external process initiates the connection,
but the Tendermint process makes all requests. In a later stage we're going to
support multiple validators for fault tolerance. To prevent double signing they
need to be synced, which is deferred to an external solution (see #1185).
``` In addition, Tendermint will provide implementations that can be run in that
type PrivValidator interface { external process. These include:
Address() data.Bytes
PubKey() crypto.PubKey
SignVote(chainID string, vote *types.Vote) error
SignProposal(chainID string, proposal *types.Proposal) error
SignHeartbeat(chainID string, heartbeat *types.Heartbeat) error
}
```
It should also be easy to re-use the LastSignedInfo logic to avoid double signing.
## Decision
Tendermint node's should support only two in-process PrivValidator implementations:
- PrivValidatorUnencrypted uses an unencrypted private key in a "priv_validator.json" file - no configuration required (just `tendermint init`).
- PrivValidatorSocket uses a socket to send signing requests to another process - user is responsible for starting that process themselves.
The PrivValidatorSocket address can be provided via flags at the command line -
doing so will cause Tendermint to ignore any "priv_validator.json" file and to listen
on the given address for incoming connections from an external priv_validator process.
It will halt any operation until at least one external process succesfully
connected.
The external priv_validator process will dial the address to connect to Tendermint,
and then Tendermint will send requests on the ensuing connection to sign votes and proposals.
Thus the external process initiates the connection, but the Tendermint process makes all requests.
In a later stage we're going to support multiple validators for fault
tolerance. To prevent double signing they need to be synced, which is deferred
to an external solution (see #1185).
In addition, Tendermint will provide implementations that can be run in that external process.
These include:
- PrivValidatorEncrypted uses an encrypted private key persisted to disk - user must enter password to decrypt key when process is started.
- PrivValidatorLedger uses a Ledger Nano S to handle all signing.
What follows are descriptions of useful types
### Signer
```
type Signer interface {
Sign(msg []byte) (crypto.Signature, error)
}
```
Signer signs a message. It can also return an error.
### ValidatorID
ValidatorID is just the Address and PubKey
```
type ValidatorID struct {
Address data.Bytes `json:"address"`
PubKey crypto.PubKey `json:"pub_key"`
}
```
### LastSignedInfo
LastSignedInfo tracks the last thing we signed:
```
type LastSignedInfo struct {
Height int64 `json:"height"`
Round int `json:"round"`
Step int8 `json:"step"`
Signature crypto.Signature `json:"signature,omitempty"` // so we dont lose signatures
SignBytes data.Bytes `json:"signbytes,omitempty"` // so we dont lose signatures
}
```
It exposes methods for signing votes and proposals using a `Signer`.
This allows it to easily be reused by developers implemented their own PrivValidator.
### PrivValidatorUnencrypted
```
type PrivValidatorUnencrypted struct {
ID types.ValidatorID `json:"id"`
PrivKey PrivKey `json:"priv_key"`
LastSignedInfo *LastSignedInfo `json:"last_signed_info"`
}
```
Has the same structure as currently, but broken up into sub structs.
Note the LastSignedInfo is mutated in place every time we sign.
### PrivValidatorJSON
The "priv_validator.json" file supports only the PrivValidatorUnencrypted type.
It unmarshals into PrivValidatorJSON, which is used as the default PrivValidator type.
It wraps the PrivValidatorUnencrypted and persists it to disk after every signature.
## Status
Accepted.
## Consequences
### Positive
- Cleaner separation of components enabling re-use.
### Negative
- More files - led to creation of new directory.
### Neutral
- FilePV will encrypt the private key, and the user must enter password to
decrypt key when process is started.
- LedgerPV uses a Ledger Nano S to handle all signing.

View File

@ -11,26 +11,26 @@ Manual Deployments
It's relatively easy to setup a Tendermint cluster manually. The only It's relatively easy to setup a Tendermint cluster manually. The only
requirements for a particular Tendermint node are a private key for the requirements for a particular Tendermint node are a private key for the
validator, stored as ``priv_validator.json``, and a list of the public validator, stored as ``priv_validator.json``, a node key, stored as
keys of all validators, stored as ``genesis.json``. These files should ``node_key.json`` and a list of the public keys of all validators, stored as
be stored in ``~/.tendermint/config``, or wherever the ``$TMHOME`` variable ``genesis.json``. These files should be stored in ``~/.tendermint/config``, or
might be set to. wherever the ``$TMHOME`` variable might be set to.
Here are the steps to setting up a testnet manually: Here are the steps to setting up a testnet manually:
1) Provision nodes on your cloud provider of choice 1) Provision nodes on your cloud provider of choice
2) Install Tendermint and the application of interest on all nodes 2) Install Tendermint and the application of interest on all nodes
3) Generate a private key for each validator using 3) Generate a private key and a node key for each validator using
``tendermint gen_validator`` ``tendermint init``
4) Compile a list of public keys for each validator into a 4) Compile a list of public keys for each validator into a
``genesis.json`` file. ``genesis.json`` file and replace the existing file with it.
5) Run ``tendermint node --p2p.persistent_peers=< peer addresses >`` on each node, 5) Run ``tendermint node --p2p.persistent_peers=< peer addresses >`` on each node,
where ``< peer addresses >`` is a comma separated list of the IP:PORT where ``< peer addresses >`` is a comma separated list of the IP:PORT
combination for each node. The default port for Tendermint is combination for each node. The default port for Tendermint is
``46656``. Thus, if the IP addresses of your nodes were ``46656``. Thus, if the IP addresses of your nodes were
``192.168.0.1, 192.168.0.2, 192.168.0.3, 192.168.0.4``, the command ``192.168.0.1, 192.168.0.2, 192.168.0.3, 192.168.0.4``, the command
would look like: would look like:
``tendermint node --p2p.persistent_peers=192.168.0.1:46656,192.168.0.2:46656,192.168.0.3:46656,192.168.0.4:46656``. ``tendermint node --p2p.persistent_peers=96663a3dd0d7b9d17d4c8211b191af259621c693@192.168.0.1:46656, 429fcf25974313b95673f58d77eacdd434402665@192.168.0.2:46656, 0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@192.168.0.3:46656, f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@192.168.0.4:46656``.
After a few seconds, all the nodes should connect to each other and start After a few seconds, all the nodes should connect to each other and start
making blocks! For more information, see the Tendermint Networks section making blocks! For more information, see the Tendermint Networks section

View File

@ -2,8 +2,9 @@
## Overview ## Overview
This is a quick start guide. If you have a vague idea about how Tendermint works This is a quick start guide. If you have a vague idea about how Tendermint
and want to get started right away, continue. Otherwise, [review the documentation](http://tendermint.readthedocs.io/en/master/) works and want to get started right away, continue. Otherwise, [review the
documentation](http://tendermint.readthedocs.io/en/master/).
## Install ## Install
@ -42,7 +43,7 @@ Confirm installation:
``` ```
$ tendermint version $ tendermint version
0.15.0-381fe19 0.18.0-XXXXXXX
``` ```
## Initialization ## Initialization
@ -117,7 +118,9 @@ where the value is returned in hex.
## Cluster of Nodes ## Cluster of Nodes
First create four Ubuntu cloud machines. The following was tested on Digital Ocean Ubuntu 16.04 x64 (3GB/1CPU, 20GB SSD). We'll refer to their respective IP addresses below as IP1, IP2, IP3, IP4. First create four Ubuntu cloud machines. The following was tested on Digital
Ocean Ubuntu 16.04 x64 (3GB/1CPU, 20GB SSD). We'll refer to their respective IP
addresses below as IP1, IP2, IP3, IP4.
Then, `ssh` into each machine, and execute [this script](https://git.io/vNLfY): Then, `ssh` into each machine, and execute [this script](https://git.io/vNLfY):
@ -131,12 +134,16 @@ This will install `go` and other dependencies, get the Tendermint source code, t
Next, `cd` into `docs/examples`. Each command below should be run from each node, in sequence: Next, `cd` into `docs/examples`. Each command below should be run from each node, in sequence:
``` ```
tendermint node --home ./node1 --proxy_app=kvstore --p2p.seeds IP1:46656,IP2:46656,IP3:46656,IP4:46656 tendermint node --home ./node1 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
tendermint node --home ./node2 --proxy_app=kvstore --p2p.seeds IP1:46656,IP2:46656,IP3:46656,IP4:46656 tendermint node --home ./node2 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
tendermint node --home ./node3 --proxy_app=kvstore --p2p.seeds IP1:46656,IP2:46656,IP3:46656,IP4:46656 tendermint node --home ./node3 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
tendermint node --home ./node4 --proxy_app=kvstore --p2p.seeds IP1:46656,IP2:46656,IP3:46656,IP4:46656 tendermint node --home ./node4 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
``` ```
Note that after the third node is started, blocks will start to stream in because >2/3 of validators (defined in the `genesis.json`) have come online. Seeds can also be specified in the `config.toml`. See [this PR](https://github.com/tendermint/tendermint/pull/792) for more information about configuration options. Note that after the third node is started, blocks will start to stream in
because >2/3 of validators (defined in the `genesis.json`) have come online.
Seeds can also be specified in the `config.toml`. See [this
PR](https://github.com/tendermint/tendermint/pull/792) for more information
about configuration options.
Transactions can then be sent as covered in the single, local node example above. Transactions can then be sent as covered in the single, local node example above.

View File

@ -26,7 +26,7 @@ go get $REPO
cd $GOPATH/src/$REPO cd $GOPATH/src/$REPO
## build ## build
git checkout v0.17.0 git checkout v0.18.0
make get_tools make get_tools
make get_vendor_deps make get_vendor_deps
make install make install

View File

@ -0,0 +1,6 @@
{
"priv_key" : {
"data" : "DA9BAABEA7211A6D93D9A1986B4279EAB3021FAA1653D459D53E6AB4D1CFB4C69BF7D52E48CF00AC5779AA0A6D3C368955D5636A677F72370B8ED19989714CFC",
"type" : "ed25519"
}
}

View File

@ -0,0 +1,6 @@
{
"priv_key" : {
"data" : "F7BCABA165DFC0DDD50AE563EFB285BAA236EA805D35612504238A36EFA105958756442B1D9F942D7ABD259F2D59671657B6378E9C7194342A7AAA47A66D1E95",
"type" : "ed25519"
}
}

View File

@ -0,0 +1,6 @@
{
"priv_key" : {
"data" : "95136FCC97E4446B3141EDF9841078107ECE755E99925D79CCBF91085492680B3CA1034D9917DF1DED4E4AB2D9BC225919F6CB2176F210D2368697CC339DF4E7",
"type" : "ed25519"
}
}

View File

@ -0,0 +1,6 @@
{
"priv_key" : {
"data" : "8895D6C9A1B46AB83A8E2BAE2121B8C3E245B9E9126EBD797FEAC5058285F2F64FDE2E8182C88AD5185A49D837C581465D57BD478C41865A66D7D9742D8AEF57",
"type" : "ed25519"
}
}

View File

@ -81,9 +81,8 @@ Tendermint node as follows:
curl -s localhost:46657/status curl -s localhost:46657/status
The ``-s`` just silences ``curl``. For nicer output, pipe the result The ``-s`` just silences ``curl``. For nicer output, pipe the result into a
into a tool like `jq <https://stedolan.github.io/jq/>`__ or tool like `jq <https://stedolan.github.io/jq/>`__ or ``json_pp``.
`jsonpp <https://github.com/jmhodges/jsonpp>`__.
Now let's send some transactions to the kvstore. Now let's send some transactions to the kvstore.
@ -104,17 +103,23 @@ like:
"id": "", "id": "",
"result": { "result": {
"check_tx": { "check_tx": {
"code": 0, "fee": {}
"data": "",
"log": ""
}, },
"deliver_tx": { "deliver_tx": {
"code": 0, "tags": [
"data": "", {
"log": "" "key": "YXBwLmNyZWF0b3I=",
"value": "amFl"
},
{
"key": "YXBwLmtleQ==",
"value": "YWJjZA=="
}
],
"fee": {}
}, },
"hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF", "hash": "9DF66553F98DE3C26E3C3317A3E4CED54F714E39",
"height": 154 "height": 14
} }
} }
@ -134,20 +139,17 @@ The result should look like:
"id": "", "id": "",
"result": { "result": {
"response": { "response": {
"code": 0, "log": "exists",
"index": 0, "index": "-1",
"key": "", "key": "YWJjZA==",
"value": "61626364", "value": "YWJjZA=="
"proof": "",
"height": 0,
"log": "exists"
} }
} }
} }
Note the ``value`` in the result (``61626364``); this is the Note the ``value`` in the result (``YWJjZA==``); this is the
hex-encoding of the ASCII of ``abcd``. You can verify this in base64-encoding of the ASCII of ``abcd``. You can verify this in
a python 2 shell by running ``"61626364".decode('hex')`` or in python 3 shell by running ``import codecs; codecs.decode("61626364", 'hex').decode('ascii')``. Stay a python 2 shell by running ``"61626364".decode('base64')`` or in python 3 shell by running ``import codecs; codecs.decode("61626364", 'base64').decode('ascii')``. Stay
tuned for a future release that `makes this output more human-readable <https://github.com/tendermint/abci/issues/32>`__. tuned for a future release that `makes this output more human-readable <https://github.com/tendermint/abci/issues/32>`__.
Now let's try setting a different key and value: Now let's try setting a different key and value:
@ -157,7 +159,7 @@ Now let's try setting a different key and value:
curl -s 'localhost:46657/broadcast_tx_commit?tx="name=satoshi"' curl -s 'localhost:46657/broadcast_tx_commit?tx="name=satoshi"'
Now if we query for ``name``, we should get ``satoshi``, or Now if we query for ``name``, we should get ``satoshi``, or
``7361746F736869`` in hex: ``c2F0b3NoaQ==`` in base64:
:: ::
@ -226,17 +228,15 @@ the number ``1``. If instead, we try to send a ``5``, we get an error:
"id": "", "id": "",
"result": { "result": {
"check_tx": { "check_tx": {
"code": 0, "fee": {}
"data": "",
"log": ""
}, },
"deliver_tx": { "deliver_tx": {
"code": 3, "code": 2,
"data": "", "log": "Invalid nonce. Expected 1, got 5",
"log": "Invalid nonce. Expected 1, got 5" "fee": {}
}, },
"hash": "33B93DFF98749B0D6996A70F64071347060DC19C", "hash": "33B93DFF98749B0D6996A70F64071347060DC19C",
"height": 38 "height": 34
} }
} }
@ -250,17 +250,13 @@ But if we send a ``1``, it works again:
"id": "", "id": "",
"result": { "result": {
"check_tx": { "check_tx": {
"code": 0, "fee": {}
"data": "",
"log": ""
}, },
"deliver_tx": { "deliver_tx": {
"code": 0, "fee": {}
"data": "",
"log": ""
}, },
"hash": "F17854A977F6FA7EEA1BD758E296710B86F72F3D", "hash": "F17854A977F6FA7EEA1BD758E296710B86F72F3D",
"height": 87 "height": 60
} }
} }

View File

@ -59,7 +59,7 @@ Next we replay all the messages from the WAL.
:: ::
I[10-04|13:54:30.391] Starting RPC HTTP server on tcp socket 0.0.0.0:46657 module=rpc-server I[10-04|13:54:30.391] Starting RPC HTTP server on tcp socket 0.0.0.0:46657 module=rpc-server
I[10-04|13:54:30.392] Started node module=main nodeInfo="NodeInfo{pk: PubKeyEd25519{DF22D7C92C91082324A1312F092AA1DA197FA598DBBFB6526E177003C4D6FD66}, moniker: anonymous, network: test-chain-3MNw2N [remote , listen 10.0.2.15:46656], version: 0.11.0-10f361fc ([wire_version=0.6.2 p2p_version=0.5.0 consensus_version=v1/0.2.2 rpc_version=0.7.0/3 tx_index=on rpc_addr=tcp://0.0.0.0:46657])}" I[10-04|13:54:30.392] Started node module=main nodeInfo="NodeInfo{id: DF22D7C92C91082324A1312F092AA1DA197FA598DBBFB6526E, moniker: anonymous, network: test-chain-3MNw2N [remote , listen 10.0.2.15:46656], version: 0.11.0-10f361fc ([wire_version=0.6.2 p2p_version=0.5.0 consensus_version=v1/0.2.2 rpc_version=0.7.0/3 tx_index=on rpc_addr=tcp://0.0.0.0:46657])}"
Next follows a standard block creation cycle, where we enter a new round, Next follows a standard block creation cycle, where we enter a new round,
propose a block, receive more than 2/3 of prevotes, then precommits and finally propose a block, receive more than 2/3 of prevotes, then precommits and finally

View File

@ -4,7 +4,7 @@ Install Tendermint
From Binary From Binary
----------- -----------
To download pre-built binaries, see the `Download page <https://tendermint.com/download>`__. To download pre-built binaries, see the `Download page <https://tendermint.com/downloads>`__.
From Source From Source
----------- -----------
@ -37,13 +37,13 @@ First, install ``dep``:
:: ::
cd $GOPATH/src/github.com/tendermint/tendermint
make get_tools make get_tools
Now we can fetch the correct versions of each dependency by running: Now we can fetch the correct versions of each dependency by running:
:: ::
cd $GOPATH/src/github.com/tendermint/tendermint
make get_vendor_deps make get_vendor_deps
make install make install
@ -96,6 +96,7 @@ If ``go get`` failing bothers you, fetch the code using ``git``:
mkdir -p $GOPATH/src/github.com/tendermint mkdir -p $GOPATH/src/github.com/tendermint
git clone https://github.com/tendermint/tendermint $GOPATH/src/github.com/tendermint/tendermint git clone https://github.com/tendermint/tendermint $GOPATH/src/github.com/tendermint/tendermint
cd $GOPATH/src/github.com/tendermint/tendermint cd $GOPATH/src/github.com/tendermint/tendermint
make get_tools
make get_vendor_deps make get_vendor_deps
make install make install

View File

@ -83,7 +83,7 @@ The Tendermint Version Handshake allows the peers to exchange their NodeInfo:
```golang ```golang
type NodeInfo struct { type NodeInfo struct {
PubKey crypto.PubKey ID p2p.ID
Moniker string Moniker string
Network string Network string
RemoteAddr string RemoteAddr string
@ -95,7 +95,7 @@ type NodeInfo struct {
``` ```
The connection is disconnected if: The connection is disconnected if:
- `peer.NodeInfo.PubKey != peer.PubKey` - `peer.NodeInfo.ID` is not equal `peerConn.ID`
- `peer.NodeInfo.Version` is not formatted as `X.X.X` where X are integers known as Major, Minor, and Revision - `peer.NodeInfo.Version` is not formatted as `X.X.X` where X are integers known as Major, Minor, and Revision
- `peer.NodeInfo.Version` Major is not the same as ours - `peer.NodeInfo.Version` Major is not the same as ours
- `peer.NodeInfo.Version` Minor is not the same as ours - `peer.NodeInfo.Version` Minor is not the same as ours

View File

@ -62,6 +62,13 @@ such as the Web-of-Trust or Certificate Authorities. In our case, we can
use the blockchain itself as a certificate authority to ensure that we use the blockchain itself as a certificate authority to ensure that we
are connected to at least one validator. are connected to at least one validator.
Config
------
Authenticated encryption is enabled by default. If you wish to use another
authentication scheme or your peers are connected via VPN, you can turn it off
by setting ``auth_enc`` to ``false`` in the config file.
Additional Reading Additional Reading
------------------ ------------------

View File

@ -74,20 +74,17 @@ RPC server, for example:
curl http://localhost:46657/broadcast_tx_commit?tx=\"abcd\" curl http://localhost:46657/broadcast_tx_commit?tx=\"abcd\"
For handling responses, we recommend you `install the jsonpp
tool <http://jmhodges.github.io/jsonpp/>`__ to pretty print the JSON.
We can see the chain's status at the ``/status`` end-point: We can see the chain's status at the ``/status`` end-point:
:: ::
curl http://localhost:46657/status | jsonpp curl http://localhost:46657/status | json_pp
and the ``latest_app_hash`` in particular: and the ``latest_app_hash`` in particular:
:: ::
curl http://localhost:46657/status | jsonpp | grep app_hash curl http://localhost:46657/status | json_pp | grep latest_app_hash
Visit http://localhost:46657 in your browser to see the list of other Visit http://localhost:46657 in your browser to see the list of other
endpoints. Some take no arguments (like ``/status``), while others endpoints. Some take no arguments (like ``/status``), while others
@ -260,19 +257,19 @@ When ``tendermint init`` is run, both a ``genesis.json`` and
:: ::
{ {
"app_hash": "", "validators" : [
"chain_id": "test-chain-HZw6TB", {
"genesis_time": "0001-01-01T00:00:00.000Z", "pub_key" : {
"validators": [ "value" : "h3hk+QE8c6QLTySp8TcfzclJw/BG79ziGB/pIA+DfPE=",
{ "type" : "AC26791624DE60"
"power": 10, },
"name": "", "power" : 10,
"pub_key": [ "name" : ""
1, }
"5770B4DD55B3E08B7F5711C48B516347D8C33F47C30C226315D21AA64E0DFF2E" ],
] "app_hash" : "",
} "chain_id" : "test-chain-rDlYSN",
] "genesis_time" : "0001-01-01T00:00:00Z"
} }
And the ``priv_validator.json``: And the ``priv_validator.json``:
@ -280,20 +277,18 @@ And the ``priv_validator.json``:
:: ::
{ {
"address": "4F4D895F882A18E1D1FC608D102601DA8D3570E5", "last_step" : 0,
"last_height": 0, "last_round" : 0,
"last_round": 0, "address" : "B788DEDE4F50AD8BC9462DE76741CCAFF87D51E2",
"last_signature": null, "pub_key" : {
"last_signbytes": "", "value" : "h3hk+QE8c6QLTySp8TcfzclJw/BG79ziGB/pIA+DfPE=",
"last_step": 0, "type" : "AC26791624DE60"
"priv_key": [ },
1, "last_height" : 0,
"F9FA3CD435BDAE54D0BCA8F1BC289D718C23D855C6DB21E8543F5E4F457E62805770B4DD55B3E08B7F5711C48B516347D8C33F47C30C226315D21AA64E0DFF2E" "priv_key" : {
], "value" : "JPivl82x+LfVkp8i3ztoTjY6c6GJ4pBxQexErOCyhwqHeGT5ATxzpAtPJKnxNx/NyUnD8Ebv3OIYH+kgD4N88Q==",
"pub_key": [ "type" : "954568A3288910"
1, }
"5770B4DD55B3E08B7F5711C48B516347D8C33F47C30C226315D21AA64E0DFF2E"
]
} }
The ``priv_validator.json`` actually contains a private key, and should The ``priv_validator.json`` actually contains a private key, and should
@ -334,14 +329,14 @@ For instance,
:: ::
tendermint node --p2p.seeds "1.2.3.4:46656,5.6.7.8:46656" tendermint node --p2p.seeds "f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@1.2.3.4:46656,0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@5.6.7.8:46656"
Alternatively, you can use the ``/dial_seeds`` endpoint of the RPC to Alternatively, you can use the ``/dial_seeds`` endpoint of the RPC to
specify seeds for a running node to connect to: specify seeds for a running node to connect to:
:: ::
curl 'localhost:46657/dial_seeds?seeds=\["1.2.3.4:46656","5.6.7.8:46656"\]' curl 'localhost:46657/dial_seeds?seeds=\["f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@1.2.3.4:46656","0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@5.6.7.8:46656"\]'
Note, if the peer-exchange protocol (PEX) is enabled (default), you should not Note, if the peer-exchange protocol (PEX) is enabled (default), you should not
normally need seeds after the first start. Peers will be gossipping about known normally need seeds after the first start. Peers will be gossipping about known
@ -355,8 +350,8 @@ core instance.
:: ::
tendermint node --p2p.persistent_peers "10.11.12.13:46656,10.11.12.14:46656" tendermint node --p2p.persistent_peers "429fcf25974313b95673f58d77eacdd434402665@10.11.12.13:46656,96663a3dd0d7b9d17d4c8211b191af259621c693@10.11.12.14:46656"
curl 'localhost:46657/dial_peers?persistent=true&peers=\["1.2.3.4:46656","5.6.7.8:46656"\]' curl 'localhost:46657/dial_peers?persistent=true&peers=\["429fcf25974313b95673f58d77eacdd434402665@10.11.12.13:46656","96663a3dd0d7b9d17d4c8211b191af259621c693@10.11.12.14:46656"\]'
Adding a Non-Validator Adding a Non-Validator
~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~
@ -387,20 +382,18 @@ Now we can update our genesis file. For instance, if the new
:: ::
{ {
"address": "AC379688105901436A34A65F185C115B8BB277A1", "address" : "5AF49D2A2D4F5AD4C7C8C4CC2FB020131E9C4902",
"last_height": 0, "pub_key" : {
"last_round": 0, "value" : "l9X9+fjkeBzDfPGbUM7AMIRE6uJN78zN5+lk5OYotek=",
"last_signature": null, "type" : "AC26791624DE60"
"last_signbytes": "", },
"last_step": 0, "priv_key" : {
"priv_key": [ "value" : "EDJY9W6zlAw+su6ITgTKg2nTZcHAH1NMTW5iwlgmNDuX1f35+OR4HMN88ZtQzsAwhETq4k3vzM3n6WTk5ii16Q==",
1, "type" : "954568A3288910"
"0D2ED337D748ADF79BE28559B9E59EBE1ABBA0BAFE6D65FCB9797985329B950C8F2B5AACAACC9FCE41881349743B0CFDE190DF0177744568D4E82A18F0B7DF94" },
], "last_step" : 0,
"pub_key": [ "last_round" : 0,
1, "last_height" : 0
"8F2B5AACAACC9FCE41881349743B0CFDE190DF0177744568D4E82A18F0B7DF94"
]
} }
then the new ``genesis.json`` will be: then the new ``genesis.json`` will be:
@ -408,27 +401,27 @@ then the new ``genesis.json`` will be:
:: ::
{ {
"app_hash": "", "validators" : [
"chain_id": "test-chain-HZw6TB", {
"genesis_time": "0001-01-01T00:00:00.000Z", "pub_key" : {
"validators": [ "value" : "h3hk+QE8c6QLTySp8TcfzclJw/BG79ziGB/pIA+DfPE=",
{ "type" : "AC26791624DE60"
"power": 10, },
"name": "", "power" : 10,
"pub_key": [ "name" : ""
1, },
"5770B4DD55B3E08B7F5711C48B516347D8C33F47C30C226315D21AA64E0DFF2E" {
] "pub_key" : {
}, "value" : "l9X9+fjkeBzDfPGbUM7AMIRE6uJN78zN5+lk5OYotek=",
{ "type" : "AC26791624DE60"
"power": 10, },
"name": "", "power" : 10,
"pub_key": [ "name" : ""
1, }
"8F2B5AACAACC9FCE41881349743B0CFDE190DF0177744568D4E82A18F0B7DF94" ],
] "app_hash" : "",
} "chain_id" : "test-chain-rDlYSN",
] "genesis_time" : "0001-01-01T00:00:00Z"
} }
Update the ``genesis.json`` in ``~/.tendermint/config``. Copy the genesis file Update the ``genesis.json`` in ``~/.tendermint/config``. Copy the genesis file

View File

@ -1,12 +1,11 @@
package evidence package evidence
import ( import (
"bytes"
"fmt" "fmt"
"reflect" "reflect"
"time" "time"
wire "github.com/tendermint/go-wire" "github.com/tendermint/go-amino"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
"github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p"
@ -16,7 +15,7 @@ import (
const ( const (
EvidenceChannel = byte(0x38) EvidenceChannel = byte(0x38)
maxEvidenceMessageSize = 1048576 // 1MB TODO make it configurable maxMsgSize = 1048576 // 1MB TODO make it configurable
broadcastEvidenceIntervalS = 60 // broadcast uncommitted evidence this often broadcastEvidenceIntervalS = 60 // broadcast uncommitted evidence this often
) )
@ -68,7 +67,7 @@ func (evR *EvidenceReactor) AddPeer(peer p2p.Peer) {
// the rest will be sent by the broadcastRoutine // the rest will be sent by the broadcastRoutine
evidences := evR.evpool.PriorityEvidence() evidences := evR.evpool.PriorityEvidence()
msg := &EvidenceListMessage{evidences} msg := &EvidenceListMessage{evidences}
success := peer.Send(EvidenceChannel, struct{ EvidenceMessage }{msg}) success := peer.Send(EvidenceChannel, cdc.MustMarshalBinaryBare(msg))
if !success { if !success {
// TODO: remove peer ? // TODO: remove peer ?
} }
@ -82,7 +81,7 @@ func (evR *EvidenceReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
// Receive implements Reactor. // Receive implements Reactor.
// It adds any received evidence to the evpool. // It adds any received evidence to the evpool.
func (evR *EvidenceReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { func (evR *EvidenceReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
_, msg, err := DecodeMessage(msgBytes) msg, err := DecodeMessage(msgBytes)
if err != nil { if err != nil {
evR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) evR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
evR.Switch.StopPeerForError(src, err) evR.Switch.StopPeerForError(src, err)
@ -119,7 +118,7 @@ func (evR *EvidenceReactor) broadcastRoutine() {
case evidence := <-evR.evpool.EvidenceChan(): case evidence := <-evR.evpool.EvidenceChan():
// broadcast some new evidence // broadcast some new evidence
msg := &EvidenceListMessage{[]types.Evidence{evidence}} msg := &EvidenceListMessage{[]types.Evidence{evidence}}
evR.Switch.Broadcast(EvidenceChannel, struct{ EvidenceMessage }{msg}) evR.Switch.Broadcast(EvidenceChannel, cdc.MustMarshalBinaryBare(msg))
// TODO: Broadcast runs asynchronously, so this should wait on the successChan // TODO: Broadcast runs asynchronously, so this should wait on the successChan
// in another routine before marking to be proper. // in another routine before marking to be proper.
@ -127,7 +126,7 @@ func (evR *EvidenceReactor) broadcastRoutine() {
case <-ticker.C: case <-ticker.C:
// broadcast all pending evidence // broadcast all pending evidence
msg := &EvidenceListMessage{evR.evpool.PendingEvidence()} msg := &EvidenceListMessage{evR.evpool.PendingEvidence()}
evR.Switch.Broadcast(EvidenceChannel, struct{ EvidenceMessage }{msg}) evR.Switch.Broadcast(EvidenceChannel, cdc.MustMarshalBinaryBare(msg))
case <-evR.Quit(): case <-evR.Quit():
return return
} }
@ -137,24 +136,22 @@ func (evR *EvidenceReactor) broadcastRoutine() {
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// Messages // Messages
const (
msgTypeEvidence = byte(0x01)
)
// EvidenceMessage is a message sent or received by the EvidenceReactor. // EvidenceMessage is a message sent or received by the EvidenceReactor.
type EvidenceMessage interface{} type EvidenceMessage interface{}
var _ = wire.RegisterInterface( func RegisterEvidenceMessages(cdc *amino.Codec) {
struct{ EvidenceMessage }{}, cdc.RegisterInterface((*EvidenceMessage)(nil), nil)
wire.ConcreteType{&EvidenceListMessage{}, msgTypeEvidence}, cdc.RegisterConcrete(&EvidenceListMessage{},
) "tendermint/evidence/EvidenceListMessage", nil)
}
// DecodeMessage decodes a byte-array into a EvidenceMessage. // DecodeMessage decodes a byte-array into a EvidenceMessage.
func DecodeMessage(bz []byte) (msgType byte, msg EvidenceMessage, err error) { func DecodeMessage(bz []byte) (msg EvidenceMessage, err error) {
msgType = bz[0] if len(bz) > maxMsgSize {
n := new(int) return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
r := bytes.NewReader(bz) len(bz), maxMsgSize)
msg = wire.ReadBinary(struct{ EvidenceMessage }{}, r, maxEvidenceMessageSize, n, &err).(struct{ EvidenceMessage }).EvidenceMessage }
err = cdc.UnmarshalBinaryBare(bz, &msg)
return return
} }

View File

@ -3,7 +3,6 @@ package evidence
import ( import (
"fmt" "fmt"
wire "github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tmlibs/db" dbm "github.com/tendermint/tmlibs/db"
) )
@ -104,7 +103,10 @@ func (store *EvidenceStore) ListEvidence(prefixKey string) (evidence []types.Evi
val := iter.Value() val := iter.Value()
var ei EvidenceInfo var ei EvidenceInfo
wire.ReadBinaryBytes(val, &ei) err := cdc.UnmarshalBinaryBare(val, &ei)
if err != nil {
panic(err)
}
evidence = append(evidence, ei.Evidence) evidence = append(evidence, ei.Evidence)
} }
return evidence return evidence
@ -119,7 +121,10 @@ func (store *EvidenceStore) GetEvidence(height int64, hash []byte) *EvidenceInfo
return nil return nil
} }
var ei EvidenceInfo var ei EvidenceInfo
wire.ReadBinaryBytes(val, &ei) err := cdc.UnmarshalBinaryBare(val, &ei)
if err != nil {
panic(err)
}
return &ei return &ei
} }
@ -137,7 +142,7 @@ func (store *EvidenceStore) AddNewEvidence(evidence types.Evidence, priority int
Priority: priority, Priority: priority,
Evidence: evidence, Evidence: evidence,
} }
eiBytes := wire.BinaryBytes(ei) eiBytes := cdc.MustMarshalBinaryBare(ei)
// add it to the store // add it to the store
key := keyOutqueue(evidence, priority) key := keyOutqueue(evidence, priority)
@ -171,7 +176,7 @@ func (store *EvidenceStore) MarkEvidenceAsCommitted(evidence types.Evidence) {
ei.Committed = true ei.Committed = true
lookupKey := keyLookup(evidence) lookupKey := keyLookup(evidence)
store.db.SetSync(lookupKey, wire.BinaryBytes(ei)) store.db.SetSync(lookupKey, cdc.MustMarshalBinaryBare(ei))
} }
//--------------------------------------------------- //---------------------------------------------------
@ -181,6 +186,9 @@ func (store *EvidenceStore) getEvidenceInfo(evidence types.Evidence) EvidenceInf
key := keyLookup(evidence) key := keyLookup(evidence)
var ei EvidenceInfo var ei EvidenceInfo
b := store.db.Get(key) b := store.db.Get(key)
wire.ReadBinaryBytes(b, &ei) err := cdc.UnmarshalBinaryBare(b, &ei)
if err != nil {
panic(err)
}
return ei return ei
} }

View File

@ -4,7 +4,6 @@ import (
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
wire "github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tmlibs/db" dbm "github.com/tendermint/tmlibs/db"
) )
@ -108,15 +107,3 @@ func TestStorePriority(t *testing.T) {
assert.Equal(ev, cases[i].ev) assert.Equal(ev, cases[i].ev)
} }
} }
//-------------------------------------------
const (
evidenceTypeMockGood = byte(0x01)
evidenceTypeMockBad = byte(0x02)
)
var _ = wire.RegisterInterface(
struct{ types.Evidence }{},
wire.ConcreteType{types.MockGoodEvidence{}, evidenceTypeMockGood},
wire.ConcreteType{types.MockBadEvidence{}, evidenceTypeMockBad},
)

25
evidence/wire.go Normal file
View File

@ -0,0 +1,25 @@
package evidence
import (
"github.com/tendermint/go-amino"
"github.com/tendermint/go-crypto"
"github.com/tendermint/tendermint/types"
)
var cdc = amino.NewCodec()
func init() {
RegisterEvidenceMessages(cdc)
crypto.RegisterAmino(cdc)
types.RegisterEvidences(cdc)
RegisterMockEvidences(cdc) // For testing
}
//-------------------------------------------
func RegisterMockEvidences(cdc *amino.Codec) {
cdc.RegisterConcrete(types.MockGoodEvidence{},
"tendermint/MockGoodEvidence", nil)
cdc.RegisterConcrete(types.MockBadEvidence{},
"tendermint/MockBadEvidence", nil)
}

View File

@ -93,7 +93,7 @@ func (p *provider) GetLatestCommit() (*ctypes.ResultCommit, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return p.node.Commit(&status.LatestBlockHeight) return p.node.Commit(&status.SyncInfo.LatestBlockHeight)
} }
// CommitFromResult ... // CommitFromResult ...

View File

@ -1,13 +1,11 @@
package files package files
import ( import (
"encoding/json" "io/ioutil"
"os" "os"
"github.com/pkg/errors" "github.com/pkg/errors"
wire "github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/lite" "github.com/tendermint/tendermint/lite"
liteErr "github.com/tendermint/tendermint/lite/errors" liteErr "github.com/tendermint/tendermint/lite/errors"
) )
@ -19,7 +17,7 @@ const (
MaxFullCommitSize = 1024 * 1024 MaxFullCommitSize = 1024 * 1024
) )
// SaveFullCommit exports the seed in binary / go-wire style // SaveFullCommit exports the seed in binary / go-amino style
func SaveFullCommit(fc lite.FullCommit, path string) error { func SaveFullCommit(fc lite.FullCommit, path string) error {
f, err := os.Create(path) f, err := os.Create(path)
if err != nil { if err != nil {
@ -27,9 +25,11 @@ func SaveFullCommit(fc lite.FullCommit, path string) error {
} }
defer f.Close() defer f.Close()
var n int _, err = cdc.MarshalBinaryWriter(f, fc)
wire.WriteBinary(fc, f, &n, &err) if err != nil {
return errors.WithStack(err) return errors.WithStack(err)
}
return nil
} }
// SaveFullCommitJSON exports the seed in a json format // SaveFullCommitJSON exports the seed in a json format
@ -39,9 +39,15 @@ func SaveFullCommitJSON(fc lite.FullCommit, path string) error {
return errors.WithStack(err) return errors.WithStack(err)
} }
defer f.Close() defer f.Close()
stream := json.NewEncoder(f) bz, err := cdc.MarshalJSON(fc)
err = stream.Encode(fc) if err != nil {
return errors.WithStack(err) return errors.WithStack(err)
}
_, err = f.Write(bz)
if err != nil {
return errors.WithStack(err)
}
return nil
} }
// LoadFullCommit loads the full commit from the file system. // LoadFullCommit loads the full commit from the file system.
@ -56,9 +62,11 @@ func LoadFullCommit(path string) (lite.FullCommit, error) {
} }
defer f.Close() defer f.Close()
var n int _, err = cdc.UnmarshalBinaryReader(f, &fc, 0)
wire.ReadBinaryPtr(&fc, f, MaxFullCommitSize, &n, &err) if err != nil {
return fc, errors.WithStack(err) return fc, errors.WithStack(err)
}
return fc, nil
} }
// LoadFullCommitJSON loads the commit from the file system in JSON format. // LoadFullCommitJSON loads the commit from the file system in JSON format.
@ -73,7 +81,13 @@ func LoadFullCommitJSON(path string) (lite.FullCommit, error) {
} }
defer f.Close() defer f.Close()
stream := json.NewDecoder(f) bz, err := ioutil.ReadAll(f)
err = stream.Decode(&fc) if err != nil {
return fc, errors.WithStack(err) return fc, errors.WithStack(err)
}
err = cdc.UnmarshalJSON(bz, &fc)
if err != nil {
return fc, errors.WithStack(err)
}
return fc, nil
} }

12
lite/files/wire.go Normal file
View File

@ -0,0 +1,12 @@
package files
import (
"github.com/tendermint/go-amino"
"github.com/tendermint/go-crypto"
)
var cdc = amino.NewCodec()
func init() {
crypto.RegisterAmino(cdc)
}

View File

@ -23,7 +23,7 @@ type ValKeys []crypto.PrivKey
func GenValKeys(n int) ValKeys { func GenValKeys(n int) ValKeys {
res := make(ValKeys, n) res := make(ValKeys, n)
for i := range res { for i := range res {
res[i] = crypto.GenPrivKeyEd25519().Wrap() res[i] = crypto.GenPrivKeyEd25519()
} }
return res return res
} }
@ -32,7 +32,7 @@ func GenValKeys(n int) ValKeys {
func (v ValKeys) Change(i int) ValKeys { func (v ValKeys) Change(i int) ValKeys {
res := make(ValKeys, len(v)) res := make(ValKeys, len(v))
copy(res, v) copy(res, v)
res[i] = crypto.GenPrivKeyEd25519().Wrap() res[i] = crypto.GenPrivKeyEd25519()
return res return res
} }
@ -46,7 +46,7 @@ func (v ValKeys) Extend(n int) ValKeys {
func GenSecpValKeys(n int) ValKeys { func GenSecpValKeys(n int) ValKeys {
res := make(ValKeys, n) res := make(ValKeys, n)
for i := range res { for i := range res {
res[i] = crypto.GenPrivKeySecp256k1().Wrap() res[i] = crypto.GenPrivKeySecp256k1()
} }
return res return res
} }

View File

@ -3,10 +3,12 @@ package proxy
import ( import (
"net/http" "net/http"
"github.com/tendermint/go-amino"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
rpcclient "github.com/tendermint/tendermint/rpc/client" rpcclient "github.com/tendermint/tendermint/rpc/client"
"github.com/tendermint/tendermint/rpc/core" "github.com/tendermint/tendermint/rpc/core"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
rpc "github.com/tendermint/tendermint/rpc/lib/server" rpc "github.com/tendermint/tendermint/rpc/lib/server"
) )
@ -23,13 +25,15 @@ func StartProxy(c rpcclient.Client, listenAddr string, logger log.Logger) error
return err return err
} }
cdc := amino.NewCodec()
ctypes.RegisterAmino(cdc)
r := RPCRoutes(c) r := RPCRoutes(c)
// build the handler... // build the handler...
mux := http.NewServeMux() mux := http.NewServeMux()
rpc.RegisterRPCFuncs(mux, r, logger) rpc.RegisterRPCFuncs(mux, r, cdc, logger)
wm := rpc.NewWebsocketManager(r, rpc.EventSubscriber(c)) wm := rpc.NewWebsocketManager(r, cdc, rpc.EventSubscriber(c))
wm.SetLogger(logger) wm.SetLogger(logger)
core.SetLogger(logger) core.SetLogger(logger)
mux.HandleFunc(wsEndpoint, wm.WebsocketHandler) mux.HandleFunc(wsEndpoint, wm.WebsocketHandler)

View File

@ -146,7 +146,7 @@ func (w Wrapper) Commit(height *int64) (*ctypes.ResultCommit, error) {
// } // }
// // check to validate it if possible, and drop if not valid // // check to validate it if possible, and drop if not valid
// switch t := tm.Unwrap().(type) { // switch t := tm.(type) {
// case types.EventDataNewBlockHeader: // case types.EventDataNewBlockHeader:
// err := verifyHeader(s.client, t.Header) // err := verifyHeader(s.client, t.Header)
// if err != nil { // if err != nil {

View File

@ -1,13 +1,12 @@
package mempool package mempool
import ( import (
"bytes"
"fmt" "fmt"
"reflect" "reflect"
"time" "time"
abci "github.com/tendermint/abci/types" abci "github.com/tendermint/abci/types"
wire "github.com/tendermint/go-wire" "github.com/tendermint/go-amino"
"github.com/tendermint/tmlibs/clist" "github.com/tendermint/tmlibs/clist"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
@ -19,7 +18,7 @@ import (
const ( const (
MempoolChannel = byte(0x30) MempoolChannel = byte(0x30)
maxMempoolMessageSize = 1048576 // 1MB TODO make it configurable maxMsgSize = 1048576 // 1MB TODO make it configurable
peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount
) )
@ -71,7 +70,7 @@ func (memR *MempoolReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
// Receive implements Reactor. // Receive implements Reactor.
// It adds any received transactions to the mempool. // It adds any received transactions to the mempool.
func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
_, msg, err := DecodeMessage(msgBytes) msg, err := DecodeMessage(msgBytes)
if err != nil { if err != nil {
memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
memR.Switch.StopPeerForError(src, err) memR.Switch.StopPeerForError(src, err)
@ -137,7 +136,7 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) {
} }
// send memTx // send memTx
msg := &TxMessage{Tx: memTx.tx} msg := &TxMessage{Tx: memTx.tx}
success := peer.Send(MempoolChannel, struct{ MempoolMessage }{msg}) success := peer.Send(MempoolChannel, cdc.MustMarshalBinaryBare(msg))
if !success { if !success {
time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
continue continue
@ -158,24 +157,21 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) {
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// Messages // Messages
const (
msgTypeTx = byte(0x01)
)
// MempoolMessage is a message sent or received by the MempoolReactor. // MempoolMessage is a message sent or received by the MempoolReactor.
type MempoolMessage interface{} type MempoolMessage interface{}
var _ = wire.RegisterInterface( func RegisterMempoolMessages(cdc *amino.Codec) {
struct{ MempoolMessage }{}, cdc.RegisterInterface((*MempoolMessage)(nil), nil)
wire.ConcreteType{&TxMessage{}, msgTypeTx}, cdc.RegisterConcrete(&TxMessage{}, "tendermint/mempool/TxMessage", nil)
) }
// DecodeMessage decodes a byte-array into a MempoolMessage. // DecodeMessage decodes a byte-array into a MempoolMessage.
func DecodeMessage(bz []byte) (msgType byte, msg MempoolMessage, err error) { func DecodeMessage(bz []byte) (msg MempoolMessage, err error) {
msgType = bz[0] if len(bz) > maxMsgSize {
n := new(int) return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
r := bytes.NewReader(bz) len(bz), maxMsgSize)
msg = wire.ReadBinary(struct{ MempoolMessage }{}, r, maxMempoolMessageSize, n, &err).(struct{ MempoolMessage }).MempoolMessage }
err = cdc.UnmarshalBinaryBare(bz, &msg)
return return
} }

11
mempool/wire.go Normal file
View File

@ -0,0 +1,11 @@
package mempool
import (
"github.com/tendermint/go-amino"
)
var cdc = amino.NewCodec()
func init() {
RegisterMempoolMessages(cdc)
}

View File

@ -2,15 +2,14 @@ package node
import ( import (
"bytes" "bytes"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"net" "net"
"net/http" "net/http"
abci "github.com/tendermint/abci/types" abci "github.com/tendermint/abci/types"
amino "github.com/tendermint/go-amino"
crypto "github.com/tendermint/go-crypto" crypto "github.com/tendermint/go-crypto"
wire "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db" dbm "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
@ -25,6 +24,7 @@ import (
"github.com/tendermint/tendermint/p2p/trust" "github.com/tendermint/tendermint/p2p/trust"
"github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/proxy"
rpccore "github.com/tendermint/tendermint/rpc/core" rpccore "github.com/tendermint/tendermint/rpc/core"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
grpccore "github.com/tendermint/tendermint/rpc/grpc" grpccore "github.com/tendermint/tendermint/rpc/grpc"
rpc "github.com/tendermint/tendermint/rpc/lib" rpc "github.com/tendermint/tendermint/rpc/lib"
rpcserver "github.com/tendermint/tendermint/rpc/lib/server" rpcserver "github.com/tendermint/tendermint/rpc/lib/server"
@ -33,7 +33,7 @@ import (
"github.com/tendermint/tendermint/state/txindex/kv" "github.com/tendermint/tendermint/state/txindex/kv"
"github.com/tendermint/tendermint/state/txindex/null" "github.com/tendermint/tendermint/state/txindex/null"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
priv_val "github.com/tendermint/tendermint/types/priv_validator" pvm "github.com/tendermint/tendermint/types/priv_validator"
"github.com/tendermint/tendermint/version" "github.com/tendermint/tendermint/version"
_ "net/http/pprof" _ "net/http/pprof"
@ -78,7 +78,7 @@ type NodeProvider func(*cfg.Config, log.Logger) (*Node, error)
// It implements NodeProvider. // It implements NodeProvider.
func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) { func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
return NewNode(config, return NewNode(config,
types.LoadOrGenPrivValidatorFS(config.PrivValidatorFile()), pvm.LoadOrGenFilePV(config.PrivValidatorFile()),
proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
DefaultGenesisDocProviderFunc(config), DefaultGenesisDocProviderFunc(config),
DefaultDBProvider, DefaultDBProvider,
@ -179,8 +179,8 @@ func NewNode(config *cfg.Config,
// TODO: persist this key so external signer // TODO: persist this key so external signer
// can actually authenticate us // can actually authenticate us
privKey = crypto.GenPrivKeyEd25519() privKey = crypto.GenPrivKeyEd25519()
pvsc = priv_val.NewSocketClient( pvsc = pvm.NewSocketPV(
logger.With("module", "priv_val"), logger.With("module", "pvm"),
config.PrivValidatorListenAddr, config.PrivValidatorListenAddr,
privKey, privKey,
) )
@ -405,7 +405,7 @@ func (n *Node) OnStart() error {
} }
n.Logger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", n.config.NodeKeyFile()) n.Logger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", n.config.NodeKeyFile())
nodeInfo := n.makeNodeInfo(nodeKey.PubKey()) nodeInfo := n.makeNodeInfo(nodeKey.ID())
n.sw.SetNodeInfo(nodeInfo) n.sw.SetNodeInfo(nodeInfo)
n.sw.SetNodeKey(nodeKey) n.sw.SetNodeKey(nodeKey)
@ -448,7 +448,7 @@ func (n *Node) OnStop() {
n.eventBus.Stop() n.eventBus.Stop()
n.indexerService.Stop() n.indexerService.Stop()
if pvsc, ok := n.privValidator.(*priv_val.SocketClient); ok { if pvsc, ok := n.privValidator.(*pvm.SocketPV); ok {
if err := pvsc.Stop(); err != nil { if err := pvsc.Stop(); err != nil {
n.Logger.Error("Error stopping priv validator socket client", "err", err) n.Logger.Error("Error stopping priv validator socket client", "err", err)
} }
@ -492,6 +492,8 @@ func (n *Node) ConfigureRPC() {
func (n *Node) startRPC() ([]net.Listener, error) { func (n *Node) startRPC() ([]net.Listener, error) {
n.ConfigureRPC() n.ConfigureRPC()
listenAddrs := cmn.SplitAndTrim(n.config.RPC.ListenAddress, ",", " ") listenAddrs := cmn.SplitAndTrim(n.config.RPC.ListenAddress, ",", " ")
coreCodec := amino.NewCodec()
ctypes.RegisterAmino(coreCodec)
if n.config.RPC.Unsafe { if n.config.RPC.Unsafe {
rpccore.AddUnsafeRoutes() rpccore.AddUnsafeRoutes()
@ -502,10 +504,10 @@ func (n *Node) startRPC() ([]net.Listener, error) {
for i, listenAddr := range listenAddrs { for i, listenAddr := range listenAddrs {
mux := http.NewServeMux() mux := http.NewServeMux()
rpcLogger := n.Logger.With("module", "rpc-server") rpcLogger := n.Logger.With("module", "rpc-server")
wm := rpcserver.NewWebsocketManager(rpccore.Routes, rpcserver.EventSubscriber(n.eventBus)) wm := rpcserver.NewWebsocketManager(rpccore.Routes, coreCodec, rpcserver.EventSubscriber(n.eventBus))
wm.SetLogger(rpcLogger.With("protocol", "websocket")) wm.SetLogger(rpcLogger.With("protocol", "websocket"))
mux.HandleFunc("/websocket", wm.WebsocketHandler) mux.HandleFunc("/websocket", wm.WebsocketHandler)
rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger) rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, coreCodec, rpcLogger)
listener, err := rpcserver.StartHTTPServer(listenAddr, mux, rpcLogger) listener, err := rpcserver.StartHTTPServer(listenAddr, mux, rpcLogger)
if err != nil { if err != nil {
return nil, err return nil, err
@ -577,13 +579,13 @@ func (n *Node) ProxyApp() proxy.AppConns {
return n.proxyApp return n.proxyApp
} }
func (n *Node) makeNodeInfo(pubKey crypto.PubKey) p2p.NodeInfo { func (n *Node) makeNodeInfo(nodeID p2p.ID) p2p.NodeInfo {
txIndexerStatus := "on" txIndexerStatus := "on"
if _, ok := n.txIndexer.(*null.TxIndex); ok { if _, ok := n.txIndexer.(*null.TxIndex); ok {
txIndexerStatus = "off" txIndexerStatus = "off"
} }
nodeInfo := p2p.NodeInfo{ nodeInfo := p2p.NodeInfo{
PubKey: pubKey, ID: nodeID,
Network: n.genesisDoc.ChainID, Network: n.genesisDoc.ChainID,
Version: version.Version, Version: version.Version,
Channels: []byte{ Channels: []byte{
@ -594,7 +596,7 @@ func (n *Node) makeNodeInfo(pubKey crypto.PubKey) p2p.NodeInfo {
}, },
Moniker: n.config.Moniker, Moniker: n.config.Moniker,
Other: []string{ Other: []string{
cmn.Fmt("wire_version=%v", wire.Version), cmn.Fmt("amino_version=%v", amino.Version),
cmn.Fmt("p2p_version=%v", p2p.Version), cmn.Fmt("p2p_version=%v", p2p.Version),
cmn.Fmt("consensus_version=%v", cs.Version), cmn.Fmt("consensus_version=%v", cs.Version),
cmn.Fmt("rpc_version=%v/%v", rpc.Version, rpccore.Version), cmn.Fmt("rpc_version=%v/%v", rpc.Version, rpccore.Version),
@ -641,7 +643,7 @@ func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
return nil, errors.New("Genesis doc not found") return nil, errors.New("Genesis doc not found")
} }
var genDoc *types.GenesisDoc var genDoc *types.GenesisDoc
err := json.Unmarshal(bytes, &genDoc) err := cdc.UnmarshalJSON(bytes, &genDoc)
if err != nil { if err != nil {
cmn.PanicCrisis(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, bytes)) cmn.PanicCrisis(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, bytes))
} }
@ -650,7 +652,7 @@ func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
// panics if failed to marshal the given genesis document // panics if failed to marshal the given genesis document
func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) { func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) {
bytes, err := json.Marshal(genDoc) bytes, err := cdc.MarshalJSON(genDoc)
if err != nil { if err != nil {
cmn.PanicCrisis(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err)) cmn.PanicCrisis(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err))
} }

12
node/wire.go Normal file
View File

@ -0,0 +1,12 @@
package node
import (
amino "github.com/tendermint/go-amino"
crypto "github.com/tendermint/go-crypto"
)
var cdc = amino.NewCodec()
func init() {
crypto.RegisterAmino(cdc)
}

View File

@ -7,17 +7,21 @@ import (
"io" "io"
"math" "math"
"net" "net"
"reflect"
"sync/atomic" "sync/atomic"
"time" "time"
wire "github.com/tendermint/go-wire" amino "github.com/tendermint/go-amino"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
flow "github.com/tendermint/tmlibs/flowrate" flow "github.com/tendermint/tmlibs/flowrate"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
) )
const ( const (
numBatchMsgPackets = 10 maxPacketMsgPayloadSizeDefault = 1024 // NOTE: Must be below 16,384 bytes for 14 below.
maxPacketMsgOverheadSize = 14 // NOTE: See connection_test for derivation.
numBatchPacketMsgs = 10
minReadBufferSize = 1024 minReadBufferSize = 1024
minWriteBufferSize = 65536 minWriteBufferSize = 65536
updateStats = 2 * time.Second updateStats = 2 * time.Second
@ -52,35 +56,34 @@ The byte id and the relative priorities of each `Channel` are configured upon
initialization of the connection. initialization of the connection.
There are two methods for sending messages: There are two methods for sending messages:
func (m MConnection) Send(chID byte, msg interface{}) bool {} func (m MConnection) Send(chID byte, msgBytes []byte) bool {}
func (m MConnection) TrySend(chID byte, msg interface{}) bool {} func (m MConnection) TrySend(chID byte, msgBytes []byte}) bool {}
`Send(chID, msg)` is a blocking call that waits until `msg` is successfully queued `Send(chID, msgBytes)` is a blocking call that waits until `msg` is
for the channel with the given id byte `chID`, or until the request times out. successfully queued for the channel with the given id byte `chID`, or until the
The message `msg` is serialized using the `tendermint/wire` submodule's request times out. The message `msg` is serialized using Go-Amino.
`WriteBinary()` reflection routine.
`TrySend(chID, msg)` is a nonblocking call that returns false if the channel's `TrySend(chID, msgBytes)` is a nonblocking call that returns false if the
queue is full. channel's queue is full.
Inbound message bytes are handled with an onReceive callback function. Inbound message bytes are handled with an onReceive callback function.
*/ */
type MConnection struct { type MConnection struct {
cmn.BaseService cmn.BaseService
conn net.Conn conn net.Conn
bufReader *bufio.Reader bufConnReader *bufio.Reader
bufWriter *bufio.Writer bufConnWriter *bufio.Writer
sendMonitor *flow.Monitor sendMonitor *flow.Monitor
recvMonitor *flow.Monitor recvMonitor *flow.Monitor
send chan struct{} send chan struct{}
pong chan struct{} pong chan struct{}
channels []*Channel channels []*Channel
channelsIdx map[byte]*Channel channelsIdx map[byte]*Channel
onReceive receiveCbFunc onReceive receiveCbFunc
onError errorCbFunc onError errorCbFunc
errored uint32 errored uint32
config *MConnConfig config *MConnConfig
quit chan struct{} quit chan struct{}
flushTimer *cmn.ThrottleTimer // flush writes as necessary but throttled. flushTimer *cmn.ThrottleTimer // flush writes as necessary but throttled.
@ -101,7 +104,7 @@ type MConnConfig struct {
RecvRate int64 `mapstructure:"recv_rate"` RecvRate int64 `mapstructure:"recv_rate"`
// Maximum payload size // Maximum payload size
MaxMsgPacketPayloadSize int `mapstructure:"max_msg_packet_payload_size"` MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"`
// Interval to flush writes (throttled) // Interval to flush writes (throttled)
FlushThrottle time.Duration `mapstructure:"flush_throttle"` FlushThrottle time.Duration `mapstructure:"flush_throttle"`
@ -113,8 +116,8 @@ type MConnConfig struct {
PongTimeout time.Duration `mapstructure:"pong_timeout"` PongTimeout time.Duration `mapstructure:"pong_timeout"`
} }
func (cfg *MConnConfig) maxMsgPacketTotalSize() int { func (cfg *MConnConfig) maxPacketMsgTotalSize() int {
return cfg.MaxMsgPacketPayloadSize + maxMsgPacketOverheadSize return cfg.MaxPacketMsgPayloadSize + maxPacketMsgOverheadSize
} }
// DefaultMConnConfig returns the default config. // DefaultMConnConfig returns the default config.
@ -122,7 +125,7 @@ func DefaultMConnConfig() *MConnConfig {
return &MConnConfig{ return &MConnConfig{
SendRate: defaultSendRate, SendRate: defaultSendRate,
RecvRate: defaultRecvRate, RecvRate: defaultRecvRate,
MaxMsgPacketPayloadSize: defaultMaxMsgPacketPayloadSize, MaxPacketMsgPayloadSize: maxPacketMsgPayloadSizeDefault,
FlushThrottle: defaultFlushThrottle, FlushThrottle: defaultFlushThrottle,
PingInterval: defaultPingInterval, PingInterval: defaultPingInterval,
PongTimeout: defaultPongTimeout, PongTimeout: defaultPongTimeout,
@ -146,16 +149,16 @@ func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onRec
} }
mconn := &MConnection{ mconn := &MConnection{
conn: conn, conn: conn,
bufReader: bufio.NewReaderSize(conn, minReadBufferSize), bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize),
bufWriter: bufio.NewWriterSize(conn, minWriteBufferSize), bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize),
sendMonitor: flow.New(0, 0), sendMonitor: flow.New(0, 0),
recvMonitor: flow.New(0, 0), recvMonitor: flow.New(0, 0),
send: make(chan struct{}, 1), send: make(chan struct{}, 1),
pong: make(chan struct{}, 1), pong: make(chan struct{}, 1),
onReceive: onReceive, onReceive: onReceive,
onError: onError, onError: onError,
config: config, config: config,
} }
// Create channels // Create channels
@ -220,7 +223,7 @@ func (c *MConnection) String() string {
func (c *MConnection) flush() { func (c *MConnection) flush() {
c.Logger.Debug("Flush", "conn", c) c.Logger.Debug("Flush", "conn", c)
err := c.bufWriter.Flush() err := c.bufConnWriter.Flush()
if err != nil { if err != nil {
c.Logger.Error("MConnection flush failed", "err", err) c.Logger.Error("MConnection flush failed", "err", err)
} }
@ -229,7 +232,7 @@ func (c *MConnection) flush() {
// Catch panics, usually caused by remote disconnects. // Catch panics, usually caused by remote disconnects.
func (c *MConnection) _recover() { func (c *MConnection) _recover() {
if r := recover(); r != nil { if r := recover(); r != nil {
err := cmn.ErrorWrap(r, "recovered from panic") err := cmn.ErrorWrap(r, "recovered panic in MConnection")
c.stopForError(err) c.stopForError(err)
} }
} }
@ -244,12 +247,12 @@ func (c *MConnection) stopForError(r interface{}) {
} }
// Queues a message to be sent to channel. // Queues a message to be sent to channel.
func (c *MConnection) Send(chID byte, msg interface{}) bool { func (c *MConnection) Send(chID byte, msgBytes []byte) bool {
if !c.IsRunning() { if !c.IsRunning() {
return false return false
} }
c.Logger.Debug("Send", "channel", chID, "conn", c, "msg", msg) //, "bytes", wire.BinaryBytes(msg)) c.Logger.Debug("Send", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
// Send message to channel. // Send message to channel.
channel, ok := c.channelsIdx[chID] channel, ok := c.channelsIdx[chID]
@ -258,7 +261,7 @@ func (c *MConnection) Send(chID byte, msg interface{}) bool {
return false return false
} }
success := channel.sendBytes(wire.BinaryBytes(msg)) success := channel.sendBytes(msgBytes)
if success { if success {
// Wake up sendRoutine if necessary // Wake up sendRoutine if necessary
select { select {
@ -266,19 +269,19 @@ func (c *MConnection) Send(chID byte, msg interface{}) bool {
default: default:
} }
} else { } else {
c.Logger.Error("Send failed", "channel", chID, "conn", c, "msg", msg) c.Logger.Error("Send failed", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
} }
return success return success
} }
// Queues a message to be sent to channel. // Queues a message to be sent to channel.
// Nonblocking, returns true if successful. // Nonblocking, returns true if successful.
func (c *MConnection) TrySend(chID byte, msg interface{}) bool { func (c *MConnection) TrySend(chID byte, msgBytes []byte) bool {
if !c.IsRunning() { if !c.IsRunning() {
return false return false
} }
c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msg", msg) c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
// Send message to channel. // Send message to channel.
channel, ok := c.channelsIdx[chID] channel, ok := c.channelsIdx[chID]
@ -287,7 +290,7 @@ func (c *MConnection) TrySend(chID byte, msg interface{}) bool {
return false return false
} }
ok = channel.trySendBytes(wire.BinaryBytes(msg)) ok = channel.trySendBytes(msgBytes)
if ok { if ok {
// Wake up sendRoutine if necessary // Wake up sendRoutine if necessary
select { select {
@ -320,12 +323,13 @@ func (c *MConnection) sendRoutine() {
FOR_LOOP: FOR_LOOP:
for { for {
var n int var _n int64
var err error var err error
SELECTION:
select { select {
case <-c.flushTimer.Ch: case <-c.flushTimer.Ch:
// NOTE: flushTimer.Set() must be called every time // NOTE: flushTimer.Set() must be called every time
// something is written to .bufWriter. // something is written to .bufConnWriter.
c.flush() c.flush()
case <-c.chStatsTimer.Chan(): case <-c.chStatsTimer.Chan():
for _, channel := range c.channels { for _, channel := range c.channels {
@ -333,8 +337,11 @@ FOR_LOOP:
} }
case <-c.pingTimer.Chan(): case <-c.pingTimer.Chan():
c.Logger.Debug("Send Ping") c.Logger.Debug("Send Ping")
wire.WriteByte(packetTypePing, c.bufWriter, &n, &err) _n, err = cdc.MarshalBinaryWriter(c.bufConnWriter, PacketPing{})
c.sendMonitor.Update(int(n)) if err != nil {
break SELECTION
}
c.sendMonitor.Update(int(_n))
c.Logger.Debug("Starting pong timer", "dur", c.config.PongTimeout) c.Logger.Debug("Starting pong timer", "dur", c.config.PongTimeout)
c.pongTimer = time.AfterFunc(c.config.PongTimeout, func() { c.pongTimer = time.AfterFunc(c.config.PongTimeout, func() {
select { select {
@ -352,14 +359,17 @@ FOR_LOOP:
} }
case <-c.pong: case <-c.pong:
c.Logger.Debug("Send Pong") c.Logger.Debug("Send Pong")
wire.WriteByte(packetTypePong, c.bufWriter, &n, &err) _n, err = cdc.MarshalBinaryWriter(c.bufConnWriter, PacketPong{})
c.sendMonitor.Update(int(n)) if err != nil {
break SELECTION
}
c.sendMonitor.Update(int(_n))
c.flush() c.flush()
case <-c.quit: case <-c.quit:
break FOR_LOOP break FOR_LOOP
case <-c.send: case <-c.send:
// Send some msgPackets // Send some PacketMsgs
eof := c.sendSomeMsgPackets() eof := c.sendSomePacketMsgs()
if !eof { if !eof {
// Keep sendRoutine awake. // Keep sendRoutine awake.
select { select {
@ -385,15 +395,15 @@ FOR_LOOP:
// Returns true if messages from channels were exhausted. // Returns true if messages from channels were exhausted.
// Blocks in accordance to .sendMonitor throttling. // Blocks in accordance to .sendMonitor throttling.
func (c *MConnection) sendSomeMsgPackets() bool { func (c *MConnection) sendSomePacketMsgs() bool {
// Block until .sendMonitor says we can write. // Block until .sendMonitor says we can write.
// Once we're ready we send more than we asked for, // Once we're ready we send more than we asked for,
// but amortized it should even out. // but amortized it should even out.
c.sendMonitor.Limit(c.config.maxMsgPacketTotalSize(), atomic.LoadInt64(&c.config.SendRate), true) c.sendMonitor.Limit(c.config.maxPacketMsgTotalSize(), atomic.LoadInt64(&c.config.SendRate), true)
// Now send some msgPackets. // Now send some PacketMsgs.
for i := 0; i < numBatchMsgPackets; i++ { for i := 0; i < numBatchPacketMsgs; i++ {
if c.sendMsgPacket() { if c.sendPacketMsg() {
return true return true
} }
} }
@ -401,8 +411,8 @@ func (c *MConnection) sendSomeMsgPackets() bool {
} }
// Returns true if messages from channels were exhausted. // Returns true if messages from channels were exhausted.
func (c *MConnection) sendMsgPacket() bool { func (c *MConnection) sendPacketMsg() bool {
// Choose a channel to create a msgPacket from. // Choose a channel to create a PacketMsg from.
// The chosen channel will be the one whose recentlySent/priority is the least. // The chosen channel will be the one whose recentlySent/priority is the least.
var leastRatio float32 = math.MaxFloat32 var leastRatio float32 = math.MaxFloat32
var leastChannel *Channel var leastChannel *Channel
@ -425,19 +435,19 @@ func (c *MConnection) sendMsgPacket() bool {
} }
// c.Logger.Info("Found a msgPacket to send") // c.Logger.Info("Found a msgPacket to send")
// Make & send a msgPacket from this channel // Make & send a PacketMsg from this channel
n, err := leastChannel.writeMsgPacketTo(c.bufWriter) _n, err := leastChannel.writePacketMsgTo(c.bufConnWriter)
if err != nil { if err != nil {
c.Logger.Error("Failed to write msgPacket", "err", err) c.Logger.Error("Failed to write PacketMsg", "err", err)
c.stopForError(err) c.stopForError(err)
return true return true
} }
c.sendMonitor.Update(int(n)) c.sendMonitor.Update(int(_n))
c.flushTimer.Set() c.flushTimer.Set()
return false return false
} }
// recvRoutine reads msgPackets and reconstructs the message using the channels' "recving" buffer. // recvRoutine reads PacketMsgs and reconstructs the message using the channels' "recving" buffer.
// After a whole message has been assembled, it's pushed to onReceive(). // After a whole message has been assembled, it's pushed to onReceive().
// Blocks depending on how the connection is throttled. // Blocks depending on how the connection is throttled.
// Otherwise, it never blocks. // Otherwise, it never blocks.
@ -447,28 +457,28 @@ func (c *MConnection) recvRoutine() {
FOR_LOOP: FOR_LOOP:
for { for {
// Block until .recvMonitor says we can read. // Block until .recvMonitor says we can read.
c.recvMonitor.Limit(c.config.maxMsgPacketTotalSize(), atomic.LoadInt64(&c.config.RecvRate), true) c.recvMonitor.Limit(c.config.maxPacketMsgTotalSize(), atomic.LoadInt64(&c.config.RecvRate), true)
// Peek into bufConnReader for debugging
/* /*
// Peek into bufReader for debugging if numBytes := c.bufConnReader.Buffered(); numBytes > 0 {
if numBytes := c.bufReader.Buffered(); numBytes > 0 { bz, err := c.bufConnReader.Peek(cmn.MinInt(numBytes, 100))
log.Info("Peek connection buffer", "numBytes", numBytes, "bytes", log15.Lazy{func() []byte { if err == nil {
bytes, err := c.bufReader.Peek(cmn.MinInt(numBytes, 100)) // return
if err == nil { } else {
return bytes c.Logger.Debug("Error peeking connection buffer", "err", err)
} else { // return nil
log.Warn("Error peeking connection buffer", "err", err) }
return nil c.Logger.Info("Peek connection buffer", "numBytes", numBytes, "bz", bz)
}
}})
} }
*/ */
// Read packet type // Read packet type
var n int var packet Packet
var _n int64
var err error var err error
pktType := wire.ReadByte(c.bufReader, &n, &err) _n, err = cdc.UnmarshalBinaryReader(c.bufConnReader, &packet, int64(c.config.maxPacketMsgTotalSize()))
c.recvMonitor.Update(int(n)) c.recvMonitor.Update(int(_n))
if err != nil { if err != nil {
if c.IsRunning() { if c.IsRunning() {
c.Logger.Error("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err) c.Logger.Error("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err)
@ -478,8 +488,8 @@ FOR_LOOP:
} }
// Read more depending on packet type. // Read more depending on packet type.
switch pktType { switch pkt := packet.(type) {
case packetTypePing: case PacketPing:
// TODO: prevent abuse, as they cause flush()'s. // TODO: prevent abuse, as they cause flush()'s.
// https://github.com/tendermint/tendermint/issues/1190 // https://github.com/tendermint/tendermint/issues/1190
c.Logger.Debug("Receive Ping") c.Logger.Debug("Receive Ping")
@ -488,24 +498,14 @@ FOR_LOOP:
default: default:
// never block // never block
} }
case packetTypePong: case PacketPong:
c.Logger.Debug("Receive Pong") c.Logger.Debug("Receive Pong")
select { select {
case c.pongTimeoutCh <- false: case c.pongTimeoutCh <- false:
default: default:
// never block // never block
} }
case packetTypeMsg: case PacketMsg:
pkt, n, err := msgPacket{}, int(0), error(nil)
wire.ReadBinaryPtr(&pkt, c.bufReader, c.config.maxMsgPacketTotalSize(), &n, &err)
c.recvMonitor.Update(int(n))
if err != nil {
if c.IsRunning() {
c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
c.stopForError(err)
}
break FOR_LOOP
}
channel, ok := c.channelsIdx[pkt.ChannelID] channel, ok := c.channelsIdx[pkt.ChannelID]
if !ok || channel == nil { if !ok || channel == nil {
err := fmt.Errorf("Unknown channel %X", pkt.ChannelID) err := fmt.Errorf("Unknown channel %X", pkt.ChannelID)
@ -514,7 +514,7 @@ FOR_LOOP:
break FOR_LOOP break FOR_LOOP
} }
msgBytes, err := channel.recvMsgPacket(pkt) msgBytes, err := channel.recvPacketMsg(pkt)
if err != nil { if err != nil {
if c.IsRunning() { if c.IsRunning() {
c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
@ -528,7 +528,7 @@ FOR_LOOP:
c.onReceive(pkt.ChannelID, msgBytes) c.onReceive(pkt.ChannelID, msgBytes)
} }
default: default:
err := fmt.Errorf("Unknown message type %X", pktType) err := fmt.Errorf("Unknown message type %v", reflect.TypeOf(packet))
c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
c.stopForError(err) c.stopForError(err)
break FOR_LOOP break FOR_LOOP
@ -620,7 +620,7 @@ type Channel struct {
sending []byte sending []byte
recentlySent int64 // exponential moving average recentlySent int64 // exponential moving average
maxMsgPacketPayloadSize int maxPacketMsgPayloadSize int
Logger log.Logger Logger log.Logger
} }
@ -635,7 +635,7 @@ func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel {
desc: desc, desc: desc,
sendQueue: make(chan []byte, desc.SendQueueCapacity), sendQueue: make(chan []byte, desc.SendQueueCapacity),
recving: make([]byte, 0, desc.RecvBufferCapacity), recving: make([]byte, 0, desc.RecvBufferCapacity),
maxMsgPacketPayloadSize: conn.config.MaxMsgPacketPayloadSize, maxPacketMsgPayloadSize: conn.config.MaxPacketMsgPayloadSize,
} }
} }
@ -680,8 +680,8 @@ func (ch *Channel) canSend() bool {
return ch.loadSendQueueSize() < defaultSendQueueCapacity return ch.loadSendQueueSize() < defaultSendQueueCapacity
} }
// Returns true if any msgPackets are pending to be sent. // Returns true if any PacketMsgs are pending to be sent.
// Call before calling nextMsgPacket() // Call before calling nextPacketMsg()
// Goroutine-safe // Goroutine-safe
func (ch *Channel) isSendPending() bool { func (ch *Channel) isSendPending() bool {
if len(ch.sending) == 0 { if len(ch.sending) == 0 {
@ -693,12 +693,12 @@ func (ch *Channel) isSendPending() bool {
return true return true
} }
// Creates a new msgPacket to send. // Creates a new PacketMsg to send.
// Not goroutine-safe // Not goroutine-safe
func (ch *Channel) nextMsgPacket() msgPacket { func (ch *Channel) nextPacketMsg() PacketMsg {
packet := msgPacket{} packet := PacketMsg{}
packet.ChannelID = byte(ch.desc.ID) packet.ChannelID = byte(ch.desc.ID)
maxSize := ch.maxMsgPacketPayloadSize maxSize := ch.maxPacketMsgPayloadSize
packet.Bytes = ch.sending[:cmn.MinInt(maxSize, len(ch.sending))] packet.Bytes = ch.sending[:cmn.MinInt(maxSize, len(ch.sending))]
if len(ch.sending) <= maxSize { if len(ch.sending) <= maxSize {
packet.EOF = byte(0x01) packet.EOF = byte(0x01)
@ -711,30 +711,23 @@ func (ch *Channel) nextMsgPacket() msgPacket {
return packet return packet
} }
// Writes next msgPacket to w. // Writes next PacketMsg to w and updates c.recentlySent.
// Not goroutine-safe // Not goroutine-safe
func (ch *Channel) writeMsgPacketTo(w io.Writer) (n int, err error) { func (ch *Channel) writePacketMsgTo(w io.Writer) (n int64, err error) {
packet := ch.nextMsgPacket() var packet = ch.nextPacketMsg()
ch.Logger.Debug("Write Msg Packet", "conn", ch.conn, "packet", packet) n, err = cdc.MarshalBinaryWriter(w, packet)
writeMsgPacketTo(packet, w, &n, &err) ch.recentlySent += n
if err == nil {
ch.recentlySent += int64(n)
}
return return
} }
func writeMsgPacketTo(packet msgPacket, w io.Writer, n *int, err *error) { // Handles incoming PacketMsgs. It returns a message bytes if message is
wire.WriteByte(packetTypeMsg, w, n, err) // complete. NOTE message bytes may change on next call to recvPacketMsg.
wire.WriteBinary(packet, w, n, err)
}
// Handles incoming msgPackets. It returns a message bytes if message is
// complete. NOTE message bytes may change on next call to recvMsgPacket.
// Not goroutine-safe // Not goroutine-safe
func (ch *Channel) recvMsgPacket(packet msgPacket) ([]byte, error) { func (ch *Channel) recvPacketMsg(packet PacketMsg) ([]byte, error) {
ch.Logger.Debug("Read Msg Packet", "conn", ch.conn, "packet", packet) ch.Logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet)
if ch.desc.RecvMessageCapacity < len(ch.recving)+len(packet.Bytes) { var recvCap, recvReceived = ch.desc.RecvMessageCapacity, len(ch.recving) + len(packet.Bytes)
return nil, wire.ErrBinaryReadOverflow if recvCap < recvReceived {
return nil, fmt.Errorf("Received message exceeds available capacity: %v < %v", recvCap, recvReceived)
} }
ch.recving = append(ch.recving, packet.Bytes...) ch.recving = append(ch.recving, packet.Bytes...)
if packet.EOF == byte(0x01) { if packet.EOF == byte(0x01) {
@ -758,24 +751,36 @@ func (ch *Channel) updateStats() {
ch.recentlySent = int64(float64(ch.recentlySent) * 0.8) ch.recentlySent = int64(float64(ch.recentlySent) * 0.8)
} }
//----------------------------------------------------------------------------- //----------------------------------------
// Packet
const ( type Packet interface {
defaultMaxMsgPacketPayloadSize = 1024 AssertIsPacket()
}
maxMsgPacketOverheadSize = 10 // It's actually lower but good enough func RegisterPacket(cdc *amino.Codec) {
packetTypePing = byte(0x01) cdc.RegisterInterface((*Packet)(nil), nil)
packetTypePong = byte(0x02) cdc.RegisterConcrete(PacketPing{}, "tendermint/p2p/PacketPing", nil)
packetTypeMsg = byte(0x03) cdc.RegisterConcrete(PacketPong{}, "tendermint/p2p/PacketPong", nil)
) cdc.RegisterConcrete(PacketMsg{}, "tendermint/p2p/PacketMsg", nil)
}
// Messages in channels are chopped into smaller msgPackets for multiplexing. func (_ PacketPing) AssertIsPacket() {}
type msgPacket struct { func (_ PacketPong) AssertIsPacket() {}
func (_ PacketMsg) AssertIsPacket() {}
type PacketPing struct {
}
type PacketPong struct {
}
type PacketMsg struct {
ChannelID byte ChannelID byte
EOF byte // 1 means message ends here. EOF byte // 1 means message ends here.
Bytes []byte Bytes []byte
} }
func (p msgPacket) String() string { func (mp PacketMsg) String() string {
return fmt.Sprintf("MsgPacket{%X:%X T:%X}", p.ChannelID, p.Bytes, p.EOF) return fmt.Sprintf("PacketMsg{%X:%X T:%X}", mp.ChannelID, mp.Bytes, mp.EOF)
} }

View File

@ -1,13 +1,14 @@
package conn package conn
import ( import (
"bytes"
"net" "net"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
wire "github.com/tendermint/go-wire" "github.com/tendermint/go-amino"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
) )
@ -32,41 +33,37 @@ func createMConnectionWithCallbacks(conn net.Conn, onReceive func(chID byte, msg
} }
func TestMConnectionSend(t *testing.T) { func TestMConnectionSend(t *testing.T) {
assert, require := assert.New(t), require.New(t)
server, client := NetPipe() server, client := NetPipe()
defer server.Close() // nolint: errcheck defer server.Close() // nolint: errcheck
defer client.Close() // nolint: errcheck defer client.Close() // nolint: errcheck
mconn := createTestMConnection(client) mconn := createTestMConnection(client)
err := mconn.Start() err := mconn.Start()
require.Nil(err) require.Nil(t, err)
defer mconn.Stop() defer mconn.Stop()
msg := "Ant-Man" msg := []byte("Ant-Man")
assert.True(mconn.Send(0x01, msg)) assert.True(t, mconn.Send(0x01, msg))
// Note: subsequent Send/TrySend calls could pass because we are reading from // Note: subsequent Send/TrySend calls could pass because we are reading from
// the send queue in a separate goroutine. // the send queue in a separate goroutine.
_, err = server.Read(make([]byte, len(msg))) _, err = server.Read(make([]byte, len(msg)))
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
assert.True(mconn.CanSend(0x01)) assert.True(t, mconn.CanSend(0x01))
msg = "Spider-Man" msg = []byte("Spider-Man")
assert.True(mconn.TrySend(0x01, msg)) assert.True(t, mconn.TrySend(0x01, msg))
_, err = server.Read(make([]byte, len(msg))) _, err = server.Read(make([]byte, len(msg)))
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
assert.False(mconn.CanSend(0x05), "CanSend should return false because channel is unknown") assert.False(t, mconn.CanSend(0x05), "CanSend should return false because channel is unknown")
assert.False(mconn.Send(0x05, "Absorbing Man"), "Send should return false because channel is unknown") assert.False(t, mconn.Send(0x05, []byte("Absorbing Man")), "Send should return false because channel is unknown")
} }
func TestMConnectionReceive(t *testing.T) { func TestMConnectionReceive(t *testing.T) {
assert, require := assert.New(t), require.New(t)
server, client := NetPipe() server, client := NetPipe()
defer server.Close() // nolint: errcheck defer server.Close() // nolint: errcheck
defer client.Close() // nolint: errcheck defer client.Close() // nolint: errcheck
@ -81,20 +78,20 @@ func TestMConnectionReceive(t *testing.T) {
} }
mconn1 := createMConnectionWithCallbacks(client, onReceive, onError) mconn1 := createMConnectionWithCallbacks(client, onReceive, onError)
err := mconn1.Start() err := mconn1.Start()
require.Nil(err) require.Nil(t, err)
defer mconn1.Stop() defer mconn1.Stop()
mconn2 := createTestMConnection(server) mconn2 := createTestMConnection(server)
err = mconn2.Start() err = mconn2.Start()
require.Nil(err) require.Nil(t, err)
defer mconn2.Stop() defer mconn2.Stop()
msg := "Cyclops" msg := []byte("Cyclops")
assert.True(mconn2.Send(0x01, msg)) assert.True(t, mconn2.Send(0x01, msg))
select { select {
case receivedBytes := <-receivedCh: case receivedBytes := <-receivedCh:
assert.Equal([]byte(msg), receivedBytes[2:]) // first 3 bytes are internal assert.Equal(t, []byte(msg), receivedBytes)
case err := <-errorsCh: case err := <-errorsCh:
t.Fatalf("Expected %s, got %+v", msg, err) t.Fatalf("Expected %s, got %+v", msg, err)
case <-time.After(500 * time.Millisecond): case <-time.After(500 * time.Millisecond):
@ -103,20 +100,18 @@ func TestMConnectionReceive(t *testing.T) {
} }
func TestMConnectionStatus(t *testing.T) { func TestMConnectionStatus(t *testing.T) {
assert, require := assert.New(t), require.New(t)
server, client := NetPipe() server, client := NetPipe()
defer server.Close() // nolint: errcheck defer server.Close() // nolint: errcheck
defer client.Close() // nolint: errcheck defer client.Close() // nolint: errcheck
mconn := createTestMConnection(client) mconn := createTestMConnection(client)
err := mconn.Start() err := mconn.Start()
require.Nil(err) require.Nil(t, err)
defer mconn.Stop() defer mconn.Stop()
status := mconn.Status() status := mconn.Status()
assert.NotNil(status) assert.NotNil(t, status)
assert.Zero(status.Channels[0].SendQueueSize) assert.Zero(t, status.Channels[0].SendQueueSize)
} }
func TestMConnectionPongTimeoutResultsInError(t *testing.T) { func TestMConnectionPongTimeoutResultsInError(t *testing.T) {
@ -140,7 +135,10 @@ func TestMConnectionPongTimeoutResultsInError(t *testing.T) {
serverGotPing := make(chan struct{}) serverGotPing := make(chan struct{})
go func() { go func() {
// read ping // read ping
server.Read(make([]byte, 1)) var pkt PacketPing
const maxPacketPingSize = 1024
_, err = cdc.UnmarshalBinaryReader(server, &pkt, maxPacketPingSize)
assert.Nil(t, err)
serverGotPing <- struct{}{} serverGotPing <- struct{}{}
}() }()
<-serverGotPing <-serverGotPing
@ -175,21 +173,22 @@ func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) {
defer mconn.Stop() defer mconn.Stop()
// sending 3 pongs in a row (abuse) // sending 3 pongs in a row (abuse)
_, err = server.Write([]byte{packetTypePong}) _, err = server.Write(cdc.MustMarshalBinary(PacketPong{}))
require.Nil(t, err) require.Nil(t, err)
_, err = server.Write([]byte{packetTypePong}) _, err = server.Write(cdc.MustMarshalBinary(PacketPong{}))
require.Nil(t, err) require.Nil(t, err)
_, err = server.Write([]byte{packetTypePong}) _, err = server.Write(cdc.MustMarshalBinary(PacketPong{}))
require.Nil(t, err) require.Nil(t, err)
serverGotPing := make(chan struct{}) serverGotPing := make(chan struct{})
go func() { go func() {
// read ping (one byte) // read ping (one byte)
_, err = server.Read(make([]byte, 1)) var packet, err = Packet(nil), error(nil)
_, err = cdc.UnmarshalBinaryReader(server, &packet, 1024)
require.Nil(t, err) require.Nil(t, err)
serverGotPing <- struct{}{} serverGotPing <- struct{}{}
// respond with pong // respond with pong
_, err = server.Write([]byte{packetTypePong}) _, err = server.Write(cdc.MustMarshalBinary(PacketPong{}))
require.Nil(t, err) require.Nil(t, err)
}() }()
<-serverGotPing <-serverGotPing
@ -225,17 +224,18 @@ func TestMConnectionMultiplePings(t *testing.T) {
// sending 3 pings in a row (abuse) // sending 3 pings in a row (abuse)
// see https://github.com/tendermint/tendermint/issues/1190 // see https://github.com/tendermint/tendermint/issues/1190
_, err = server.Write([]byte{packetTypePing}) _, err = server.Write(cdc.MustMarshalBinary(PacketPing{}))
require.Nil(t, err) require.Nil(t, err)
_, err = server.Read(make([]byte, 1)) var pkt PacketPong
_, err = cdc.UnmarshalBinaryReader(server, &pkt, 1024)
require.Nil(t, err) require.Nil(t, err)
_, err = server.Write([]byte{packetTypePing}) _, err = server.Write(cdc.MustMarshalBinary(PacketPing{}))
require.Nil(t, err) require.Nil(t, err)
_, err = server.Read(make([]byte, 1)) _, err = cdc.UnmarshalBinaryReader(server, &pkt, 1024)
require.Nil(t, err) require.Nil(t, err)
_, err = server.Write([]byte{packetTypePing}) _, err = server.Write(cdc.MustMarshalBinary(PacketPing{}))
require.Nil(t, err) require.Nil(t, err)
_, err = server.Read(make([]byte, 1)) _, err = cdc.UnmarshalBinaryReader(server, &pkt, 1024)
require.Nil(t, err) require.Nil(t, err)
assert.True(t, mconn.IsRunning()) assert.True(t, mconn.IsRunning())
@ -262,18 +262,21 @@ func TestMConnectionPingPongs(t *testing.T) {
serverGotPing := make(chan struct{}) serverGotPing := make(chan struct{})
go func() { go func() {
// read ping // read ping
server.Read(make([]byte, 1)) var pkt PacketPing
_, err = cdc.UnmarshalBinaryReader(server, &pkt, 1024)
require.Nil(t, err)
serverGotPing <- struct{}{} serverGotPing <- struct{}{}
// respond with pong // respond with pong
_, err = server.Write([]byte{packetTypePong}) _, err = server.Write(cdc.MustMarshalBinary(PacketPong{}))
require.Nil(t, err) require.Nil(t, err)
time.Sleep(mconn.config.PingInterval) time.Sleep(mconn.config.PingInterval)
// read ping // read ping
server.Read(make([]byte, 1)) _, err = cdc.UnmarshalBinaryReader(server, &pkt, 1024)
require.Nil(t, err)
// respond with pong // respond with pong
_, err = server.Write([]byte{packetTypePong}) _, err = server.Write(cdc.MustMarshalBinary(PacketPong{}))
require.Nil(t, err) require.Nil(t, err)
}() }()
<-serverGotPing <-serverGotPing
@ -290,8 +293,6 @@ func TestMConnectionPingPongs(t *testing.T) {
} }
func TestMConnectionStopsAndReturnsError(t *testing.T) { func TestMConnectionStopsAndReturnsError(t *testing.T) {
assert, require := assert.New(t), require.New(t)
server, client := NetPipe() server, client := NetPipe()
defer server.Close() // nolint: errcheck defer server.Close() // nolint: errcheck
defer client.Close() // nolint: errcheck defer client.Close() // nolint: errcheck
@ -306,7 +307,7 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) {
} }
mconn := createMConnectionWithCallbacks(client, onReceive, onError) mconn := createMConnectionWithCallbacks(client, onReceive, onError)
err := mconn.Start() err := mconn.Start()
require.Nil(err) require.Nil(t, err)
defer mconn.Stop() defer mconn.Stop()
if err := client.Close(); err != nil { if err := client.Close(); err != nil {
@ -317,14 +318,14 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) {
case receivedBytes := <-receivedCh: case receivedBytes := <-receivedCh:
t.Fatalf("Expected error, got %v", receivedBytes) t.Fatalf("Expected error, got %v", receivedBytes)
case err := <-errorsCh: case err := <-errorsCh:
assert.NotNil(err) assert.NotNil(t, err)
assert.False(mconn.IsRunning()) assert.False(t, mconn.IsRunning())
case <-time.After(500 * time.Millisecond): case <-time.After(500 * time.Millisecond):
t.Fatal("Did not receive error in 500ms") t.Fatal("Did not receive error in 500ms")
} }
} }
func newClientAndServerConnsForReadErrors(require *require.Assertions, chOnErr chan struct{}) (*MConnection, *MConnection) { func newClientAndServerConnsForReadErrors(t *testing.T, chOnErr chan struct{}) (*MConnection, *MConnection) {
server, client := NetPipe() server, client := NetPipe()
onReceive := func(chID byte, msgBytes []byte) {} onReceive := func(chID byte, msgBytes []byte) {}
@ -338,7 +339,7 @@ func newClientAndServerConnsForReadErrors(require *require.Assertions, chOnErr c
mconnClient := NewMConnection(client, chDescs, onReceive, onError) mconnClient := NewMConnection(client, chDescs, onReceive, onError)
mconnClient.SetLogger(log.TestingLogger().With("module", "client")) mconnClient.SetLogger(log.TestingLogger().With("module", "client"))
err := mconnClient.Start() err := mconnClient.Start()
require.Nil(err) require.Nil(t, err)
// create server conn with 1 channel // create server conn with 1 channel
// it fires on chOnErr when there's an error // it fires on chOnErr when there's an error
@ -349,7 +350,7 @@ func newClientAndServerConnsForReadErrors(require *require.Assertions, chOnErr c
mconnServer := createMConnectionWithCallbacks(server, onReceive, onError) mconnServer := createMConnectionWithCallbacks(server, onReceive, onError)
mconnServer.SetLogger(serverLogger) mconnServer.SetLogger(serverLogger)
err = mconnServer.Start() err = mconnServer.Start()
require.Nil(err) require.Nil(t, err)
return mconnClient, mconnServer return mconnClient, mconnServer
} }
@ -364,50 +365,45 @@ func expectSend(ch chan struct{}) bool {
} }
func TestMConnectionReadErrorBadEncoding(t *testing.T) { func TestMConnectionReadErrorBadEncoding(t *testing.T) {
assert, require := assert.New(t), require.New(t)
chOnErr := make(chan struct{}) chOnErr := make(chan struct{})
mconnClient, mconnServer := newClientAndServerConnsForReadErrors(require, chOnErr) mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr)
defer mconnClient.Stop() defer mconnClient.Stop()
defer mconnServer.Stop() defer mconnServer.Stop()
client := mconnClient.conn client := mconnClient.conn
msg := "Ant-Man"
// send badly encoded msgPacket // send badly encoded msgPacket
var n int bz := cdc.MustMarshalBinary(PacketMsg{})
var err error bz[4] += 0x01 // Invalid prefix bytes.
wire.WriteByte(packetTypeMsg, client, &n, &err)
wire.WriteByteSlice([]byte(msg), client, &n, &err) // Write it.
assert.True(expectSend(chOnErr), "badly encoded msgPacket") _, err := client.Write(bz)
assert.Nil(t, err)
assert.True(t, expectSend(chOnErr), "badly encoded msgPacket")
} }
func TestMConnectionReadErrorUnknownChannel(t *testing.T) { func TestMConnectionReadErrorUnknownChannel(t *testing.T) {
assert, require := assert.New(t), require.New(t)
chOnErr := make(chan struct{}) chOnErr := make(chan struct{})
mconnClient, mconnServer := newClientAndServerConnsForReadErrors(require, chOnErr) mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr)
defer mconnClient.Stop() defer mconnClient.Stop()
defer mconnServer.Stop() defer mconnServer.Stop()
msg := "Ant-Man" msg := []byte("Ant-Man")
// fail to send msg on channel unknown by client // fail to send msg on channel unknown by client
assert.False(mconnClient.Send(0x03, msg)) assert.False(t, mconnClient.Send(0x03, msg))
// send msg on channel unknown by the server. // send msg on channel unknown by the server.
// should cause an error // should cause an error
assert.True(mconnClient.Send(0x02, msg)) assert.True(t, mconnClient.Send(0x02, msg))
assert.True(expectSend(chOnErr), "unknown channel") assert.True(t, expectSend(chOnErr), "unknown channel")
} }
func TestMConnectionReadErrorLongMessage(t *testing.T) { func TestMConnectionReadErrorLongMessage(t *testing.T) {
assert, require := assert.New(t), require.New(t)
chOnErr := make(chan struct{}) chOnErr := make(chan struct{})
chOnRcv := make(chan struct{}) chOnRcv := make(chan struct{})
mconnClient, mconnServer := newClientAndServerConnsForReadErrors(require, chOnErr) mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr)
defer mconnClient.Stop() defer mconnClient.Stop()
defer mconnServer.Stop() defer mconnServer.Stop()
@ -418,65 +414,81 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) {
client := mconnClient.conn client := mconnClient.conn
// send msg thats just right // send msg thats just right
var n int
var err error var err error
packet := msgPacket{ var buf = new(bytes.Buffer)
// - Uvarint length of MustMarshalBinary(packet) = 1 or 2 bytes
// (as long as it's less than 16,384 bytes)
// - Prefix bytes = 4 bytes
// - ChannelID field key + byte = 2 bytes
// - EOF field key + byte = 2 bytes
// - Bytes field key = 1 bytes
// - Uvarint length of MustMarshalBinary(bytes) = 1 or 2 bytes
// - Struct terminator = 1 byte
// = up to 14 bytes overhead for the packet.
var packet = PacketMsg{
ChannelID: 0x01, ChannelID: 0x01,
Bytes: make([]byte, mconnClient.config.maxMsgPacketTotalSize()-5),
EOF: 1, EOF: 1,
Bytes: make([]byte, mconnClient.config.MaxPacketMsgPayloadSize),
} }
writeMsgPacketTo(packet, client, &n, &err) _, err = cdc.MarshalBinaryWriter(buf, packet)
assert.True(expectSend(chOnRcv), "msg just right") assert.Nil(t, err)
_, err = client.Write(buf.Bytes())
assert.Nil(t, err)
assert.True(t, expectSend(chOnRcv), "msg just right")
assert.False(t, expectSend(chOnErr), "msg just right")
// send msg thats too long // send msg thats too long
packet = msgPacket{ buf = new(bytes.Buffer)
packet = PacketMsg{
ChannelID: 0x01, ChannelID: 0x01,
Bytes: make([]byte, mconnClient.config.maxMsgPacketTotalSize()-4),
EOF: 1, EOF: 1,
Bytes: make([]byte, mconnClient.config.MaxPacketMsgPayloadSize+1),
} }
writeMsgPacketTo(packet, client, &n, &err) _, err = cdc.MarshalBinaryWriter(buf, packet)
assert.True(expectSend(chOnErr), "msg too long") assert.Nil(t, err)
_, err = client.Write(buf.Bytes())
assert.NotNil(t, err)
assert.False(t, expectSend(chOnRcv), "msg too long")
assert.True(t, expectSend(chOnErr), "msg too long")
} }
func TestMConnectionReadErrorUnknownMsgType(t *testing.T) { func TestMConnectionReadErrorUnknownMsgType(t *testing.T) {
assert, require := assert.New(t), require.New(t)
chOnErr := make(chan struct{}) chOnErr := make(chan struct{})
mconnClient, mconnServer := newClientAndServerConnsForReadErrors(require, chOnErr) mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr)
defer mconnClient.Stop() defer mconnClient.Stop()
defer mconnServer.Stop() defer mconnServer.Stop()
// send msg with unknown msg type // send msg with unknown msg type
var n int err := error(nil)
var err error err = amino.EncodeUvarint(mconnClient.conn, 4)
wire.WriteByte(0x04, mconnClient.conn, &n, &err) assert.Nil(t, err)
assert.True(expectSend(chOnErr), "unknown msg type") _, err = mconnClient.conn.Write([]byte{0xFF, 0xFF, 0xFF, 0xFF})
assert.Nil(t, err)
assert.True(t, expectSend(chOnErr), "unknown msg type")
} }
func TestMConnectionTrySend(t *testing.T) { func TestMConnectionTrySend(t *testing.T) {
assert, require := assert.New(t), require.New(t)
server, client := NetPipe() server, client := NetPipe()
defer server.Close() defer server.Close()
defer client.Close() defer client.Close()
mconn := createTestMConnection(client) mconn := createTestMConnection(client)
err := mconn.Start() err := mconn.Start()
require.Nil(err) require.Nil(t, err)
defer mconn.Stop() defer mconn.Stop()
msg := "Semicolon-Woman" msg := []byte("Semicolon-Woman")
resultCh := make(chan string, 2) resultCh := make(chan string, 2)
assert.True(mconn.TrySend(0x01, msg)) assert.True(t, mconn.TrySend(0x01, msg))
server.Read(make([]byte, len(msg))) server.Read(make([]byte, len(msg)))
assert.True(mconn.CanSend(0x01)) assert.True(t, mconn.CanSend(0x01))
assert.True(mconn.TrySend(0x01, msg)) assert.True(t, mconn.TrySend(0x01, msg))
assert.False(mconn.CanSend(0x01)) assert.False(t, mconn.CanSend(0x01))
go func() { go func() {
mconn.TrySend(0x01, msg) mconn.TrySend(0x01, msg)
resultCh <- "TrySend" resultCh <- "TrySend"
}() }()
assert.False(mconn.CanSend(0x01)) assert.False(t, mconn.CanSend(0x01))
assert.False(mconn.TrySend(0x01, msg)) assert.False(t, mconn.TrySend(0x01, msg))
assert.Equal("TrySend", <-resultCh) assert.Equal(t, "TrySend", <-resultCh)
} }

View File

@ -20,17 +20,15 @@ import (
"golang.org/x/crypto/nacl/secretbox" "golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/ripemd160" "golang.org/x/crypto/ripemd160"
crypto "github.com/tendermint/go-crypto" "github.com/tendermint/go-crypto"
wire "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
) )
// 2 + 1024 == 1026 total frame size // 4 + 1024 == 1028 total frame size
const dataLenSize = 2 // uint16 to describe the length, is <= dataMaxSize const dataLenSize = 4
const dataMaxSize = 1024 const dataMaxSize = 1024
const totalFrameSize = dataMaxSize + dataLenSize const totalFrameSize = dataMaxSize + dataLenSize
const sealedFrameSize = totalFrameSize + secretbox.Overhead const sealedFrameSize = totalFrameSize + secretbox.Overhead
const authSigMsgSize = (32 + 1) + (64 + 1) // fixed size (length prefixed) byte arrays
// Implements net.Conn // Implements net.Conn
type SecretConnection struct { type SecretConnection struct {
@ -123,7 +121,7 @@ func (sc *SecretConnection) Write(data []byte) (n int, err error) {
data = nil data = nil
} }
chunkLength := len(chunk) chunkLength := len(chunk)
binary.BigEndian.PutUint16(frame, uint16(chunkLength)) binary.BigEndian.PutUint32(frame, uint32(chunkLength))
copy(frame[dataLenSize:], chunk) copy(frame[dataLenSize:], chunk)
// encrypt the frame // encrypt the frame
@ -145,8 +143,8 @@ func (sc *SecretConnection) Write(data []byte) (n int, err error) {
// CONTRACT: data smaller than dataMaxSize is read atomically. // CONTRACT: data smaller than dataMaxSize is read atomically.
func (sc *SecretConnection) Read(data []byte) (n int, err error) { func (sc *SecretConnection) Read(data []byte) (n int, err error) {
if 0 < len(sc.recvBuffer) { if 0 < len(sc.recvBuffer) {
n_ := copy(data, sc.recvBuffer) n = copy(data, sc.recvBuffer)
sc.recvBuffer = sc.recvBuffer[n_:] sc.recvBuffer = sc.recvBuffer[n:]
return return
} }
@ -166,7 +164,7 @@ func (sc *SecretConnection) Read(data []byte) (n int, err error) {
incr2Nonce(sc.recvNonce) incr2Nonce(sc.recvNonce)
// end decryption // end decryption
var chunkLength = binary.BigEndian.Uint16(frame) // read the first two bytes var chunkLength = binary.BigEndian.Uint32(frame) // read the first two bytes
if chunkLength > dataMaxSize { if chunkLength > dataMaxSize {
return 0, errors.New("chunkLength is greater than dataMaxSize") return 0, errors.New("chunkLength is greater than dataMaxSize")
} }
@ -193,16 +191,17 @@ func genEphKeys() (ephPub, ephPriv *[32]byte) {
var err error var err error
ephPub, ephPriv, err = box.GenerateKey(crand.Reader) ephPub, ephPriv, err = box.GenerateKey(crand.Reader)
if err != nil { if err != nil {
cmn.PanicCrisis("Could not generate ephemeral keypairs") panic("Could not generate ephemeral keypairs")
} }
return return
} }
func shareEphPubKey(conn io.ReadWriteCloser, locEphPub *[32]byte) (remEphPub *[32]byte, err error) { func shareEphPubKey(conn io.ReadWriteCloser, locEphPub *[32]byte) (remEphPub *[32]byte, err error) {
// Send our pubkey and receive theirs in tandem. // Send our pubkey and receive theirs in tandem.
var trs, _ = cmn.Parallel( var trs, _ = cmn.Parallel(
func(_ int) (val interface{}, err error, abort bool) { func(_ int) (val interface{}, err error, abort bool) {
var _, err1 = conn.Write(locEphPub[:]) var _, err1 = cdc.MarshalBinaryWriter(conn, locEphPub)
if err1 != nil { if err1 != nil {
return nil, err1, true // abort return nil, err1, true // abort
} else { } else {
@ -211,7 +210,7 @@ func shareEphPubKey(conn io.ReadWriteCloser, locEphPub *[32]byte) (remEphPub *[3
}, },
func(_ int) (val interface{}, err error, abort bool) { func(_ int) (val interface{}, err error, abort bool) {
var _remEphPub [32]byte var _remEphPub [32]byte
var _, err2 = io.ReadFull(conn, _remEphPub[:]) var _, err2 = cdc.UnmarshalBinaryReader(conn, &_remEphPub, 1024*1024) // TODO
if err2 != nil { if err2 != nil {
return nil, err2, true // abort return nil, err2, true // abort
} else { } else {
@ -277,12 +276,12 @@ type authSigMessage struct {
Sig crypto.Signature Sig crypto.Signature
} }
func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKey, signature crypto.Signature) (recvMsg *authSigMessage, err error) { func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKey, signature crypto.Signature) (recvMsg authSigMessage, err error) {
// Send our info and receive theirs in tandem. // Send our info and receive theirs in tandem.
var trs, _ = cmn.Parallel( var trs, _ = cmn.Parallel(
func(_ int) (val interface{}, err error, abort bool) { func(_ int) (val interface{}, err error, abort bool) {
msgBytes := wire.BinaryBytes(authSigMessage{pubKey.Wrap(), signature.Wrap()}) var _, err1 = cdc.MarshalBinaryWriter(sc, authSigMessage{pubKey, signature})
var _, err1 = sc.Write(msgBytes)
if err1 != nil { if err1 != nil {
return nil, err1, true // abort return nil, err1, true // abort
} else { } else {
@ -290,13 +289,8 @@ func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKey, signature cr
} }
}, },
func(_ int) (val interface{}, err error, abort bool) { func(_ int) (val interface{}, err error, abort bool) {
readBuffer := make([]byte, authSigMsgSize) var _recvMsg authSigMessage
var _, err2 = io.ReadFull(sc, readBuffer) var _, err2 = cdc.UnmarshalBinaryReader(sc, &_recvMsg, 1024*1024) // TODO
if err2 != nil {
return nil, err2, true // abort
}
n := int(0) // not used.
var _recvMsg = wire.ReadBinary(authSigMessage{}, bytes.NewBuffer(readBuffer), authSigMsgSize, &n, &err2).(authSigMessage)
if err2 != nil { if err2 != nil {
return nil, err2, true // abort return nil, err2, true // abort
} else { } else {
@ -312,7 +306,7 @@ func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKey, signature cr
} }
var _recvMsg = trs.FirstValue().(authSigMessage) var _recvMsg = trs.FirstValue().(authSigMessage)
return &_recvMsg, nil return _recvMsg, nil
} }
//-------------------------------------------------------------------------------- //--------------------------------------------------------------------------------

View File

@ -33,12 +33,14 @@ func makeKVStoreConnPair() (fooConn, barConn kvstoreConn) {
} }
func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection) { func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection) {
fooConn, barConn := makeKVStoreConnPair()
fooPrvKey := crypto.GenPrivKeyEd25519().Wrap()
fooPubKey := fooPrvKey.PubKey()
barPrvKey := crypto.GenPrivKeyEd25519().Wrap()
barPubKey := barPrvKey.PubKey()
var fooConn, barConn = makeKVStoreConnPair()
var fooPrvKey = crypto.GenPrivKeyEd25519()
var fooPubKey = fooPrvKey.PubKey()
var barPrvKey = crypto.GenPrivKeyEd25519()
var barPubKey = barPrvKey.PubKey()
// Make connections from both sides in parallel.
var trs, ok = cmn.Parallel( var trs, ok = cmn.Parallel(
func(_ int) (val interface{}, err error, abort bool) { func(_ int) (val interface{}, err error, abort bool) {
fooSecConn, err = MakeSecretConnection(fooConn, fooPrvKey) fooSecConn, err = MakeSecretConnection(fooConn, fooPrvKey)
@ -100,10 +102,10 @@ func TestSecretConnectionReadWrite(t *testing.T) {
} }
// A helper that will run with (fooConn, fooWrites, fooReads) and vice versa // A helper that will run with (fooConn, fooWrites, fooReads) and vice versa
genNodeRunner := func(nodeConn kvstoreConn, nodeWrites []string, nodeReads *[]string) cmn.Task { genNodeRunner := func(id string, nodeConn kvstoreConn, nodeWrites []string, nodeReads *[]string) cmn.Task {
return func(_ int) (interface{}, error, bool) { return func(_ int) (interface{}, error, bool) {
// Node handskae // Initiate cryptographic private key and secret connection trhough nodeConn.
nodePrvKey := crypto.GenPrivKeyEd25519().Wrap() nodePrvKey := crypto.GenPrivKeyEd25519()
nodeSecretConn, err := MakeSecretConnection(nodeConn, nodePrvKey) nodeSecretConn, err := MakeSecretConnection(nodeConn, nodePrvKey)
if err != nil { if err != nil {
t.Errorf("Failed to establish SecretConnection for node: %v", err) t.Errorf("Failed to establish SecretConnection for node: %v", err)
@ -112,7 +114,7 @@ func TestSecretConnectionReadWrite(t *testing.T) {
// In parallel, handle some reads and writes. // In parallel, handle some reads and writes.
var trs, ok = cmn.Parallel( var trs, ok = cmn.Parallel(
func(_ int) (interface{}, error, bool) { func(_ int) (interface{}, error, bool) {
// Node writes // Node writes:
for _, nodeWrite := range nodeWrites { for _, nodeWrite := range nodeWrites {
n, err := nodeSecretConn.Write([]byte(nodeWrite)) n, err := nodeSecretConn.Write([]byte(nodeWrite))
if err != nil { if err != nil {
@ -132,7 +134,7 @@ func TestSecretConnectionReadWrite(t *testing.T) {
return nil, nil, false return nil, nil, false
}, },
func(_ int) (interface{}, error, bool) { func(_ int) (interface{}, error, bool) {
// Node reads // Node reads:
readBuffer := make([]byte, dataMaxSize) readBuffer := make([]byte, dataMaxSize)
for { for {
n, err := nodeSecretConn.Read(readBuffer) n, err := nodeSecretConn.Read(readBuffer)
@ -165,8 +167,8 @@ func TestSecretConnectionReadWrite(t *testing.T) {
// Run foo & bar in parallel // Run foo & bar in parallel
var trs, ok = cmn.Parallel( var trs, ok = cmn.Parallel(
genNodeRunner(fooConn, fooWrites, &fooReads), genNodeRunner("foo", fooConn, fooWrites, &fooReads),
genNodeRunner(barConn, barWrites, &barReads), genNodeRunner("bar", barConn, barWrites, &barReads),
) )
require.Nil(t, trs.FirstError()) require.Nil(t, trs.FirstError())
require.True(t, ok, "unexpected task abortion") require.True(t, ok, "unexpected task abortion")
@ -237,3 +239,12 @@ func BenchmarkSecretConnection(b *testing.B) {
} }
//barSecConn.Close() race condition //barSecConn.Close() race condition
} }
func fingerprint(bz []byte) []byte {
const fbsize = 40
if len(bz) < fbsize {
return bz
} else {
return bz[:fbsize]
}
}

13
p2p/conn/wire.go Normal file
View File

@ -0,0 +1,13 @@
package conn
import (
"github.com/tendermint/go-amino"
"github.com/tendermint/go-crypto"
)
var cdc *amino.Codec = amino.NewCodec()
func init() {
crypto.RegisterAmino(cdc)
RegisterPacket(cdc)
}

View File

@ -48,12 +48,12 @@ func (p *peer) Status() tmconn.ConnectionStatus {
} }
// Send does not do anything and just returns true. // Send does not do anything and just returns true.
func (p *peer) Send(byte, interface{}) bool { func (p *peer) Send(byte, []byte) bool {
return true return true
} }
// TrySend does not do anything and just returns true. // TrySend does not do anything and just returns true.
func (p *peer) TrySend(byte, interface{}) bool { func (p *peer) TrySend(byte, []byte) bool {
return true return true
} }

View File

@ -1,10 +1,11 @@
package p2p package p2p
import ( import (
"math/rand"
"net" "net"
"sync" "sync"
"time" "time"
cmn "github.com/tendermint/tmlibs/common"
) )
const ( const (
@ -124,7 +125,7 @@ func (fc *FuzzedConnection) SetWriteDeadline(t time.Time) error {
func (fc *FuzzedConnection) randomDuration() time.Duration { func (fc *FuzzedConnection) randomDuration() time.Duration {
maxDelayMillis := int(fc.config.MaxDelay.Nanoseconds() / 1000) maxDelayMillis := int(fc.config.MaxDelay.Nanoseconds() / 1000)
return time.Millisecond * time.Duration(rand.Int()%maxDelayMillis) // nolint: gas return time.Millisecond * time.Duration(cmn.RandInt()%maxDelayMillis) // nolint: gas
} }
// implements the fuzz (delay, kill conn) // implements the fuzz (delay, kill conn)
@ -137,7 +138,7 @@ func (fc *FuzzedConnection) fuzz() bool {
switch fc.config.Mode { switch fc.config.Mode {
case FuzzModeDrop: case FuzzModeDrop:
// randomly drop the r/w, drop the conn, or sleep // randomly drop the r/w, drop the conn, or sleep
r := rand.Float64() r := cmn.RandFloat64()
if r <= fc.config.ProbDropRW { if r <= fc.config.ProbDropRW {
return true return true
} else if r < fc.config.ProbDropRW+fc.config.ProbDropConn { } else if r < fc.config.ProbDropRW+fc.config.ProbDropConn {

View File

@ -3,7 +3,6 @@ package p2p
import ( import (
"bytes" "bytes"
"encoding/hex" "encoding/hex"
"encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
@ -48,7 +47,7 @@ func PubKeyToID(pubKey crypto.PubKey) ID {
// If the file does not exist, it generates and saves a new NodeKey. // If the file does not exist, it generates and saves a new NodeKey.
func LoadOrGenNodeKey(filePath string) (*NodeKey, error) { func LoadOrGenNodeKey(filePath string) (*NodeKey, error) {
if cmn.FileExists(filePath) { if cmn.FileExists(filePath) {
nodeKey, err := loadNodeKey(filePath) nodeKey, err := LoadNodeKey(filePath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -57,13 +56,13 @@ func LoadOrGenNodeKey(filePath string) (*NodeKey, error) {
return genNodeKey(filePath) return genNodeKey(filePath)
} }
func loadNodeKey(filePath string) (*NodeKey, error) { func LoadNodeKey(filePath string) (*NodeKey, error) {
jsonBytes, err := ioutil.ReadFile(filePath) jsonBytes, err := ioutil.ReadFile(filePath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
nodeKey := new(NodeKey) nodeKey := new(NodeKey)
err = json.Unmarshal(jsonBytes, nodeKey) err = cdc.UnmarshalJSON(jsonBytes, nodeKey)
if err != nil { if err != nil {
return nil, fmt.Errorf("Error reading NodeKey from %v: %v", filePath, err) return nil, fmt.Errorf("Error reading NodeKey from %v: %v", filePath, err)
} }
@ -71,12 +70,12 @@ func loadNodeKey(filePath string) (*NodeKey, error) {
} }
func genNodeKey(filePath string) (*NodeKey, error) { func genNodeKey(filePath string) (*NodeKey, error) {
privKey := crypto.GenPrivKeyEd25519().Wrap() privKey := crypto.GenPrivKeyEd25519()
nodeKey := &NodeKey{ nodeKey := &NodeKey{
PrivKey: privKey, PrivKey: privKey,
} }
jsonBytes, err := json.Marshal(nodeKey) jsonBytes, err := cdc.MarshalJSON(nodeKey)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -13,7 +13,6 @@ import (
"strings" "strings"
"time" "time"
"github.com/pkg/errors"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
) )
@ -77,7 +76,7 @@ func NewNetAddressStringWithOptionalID(addr string) (*NetAddress, error) {
idStr := spl[0] idStr := spl[0]
idBytes, err := hex.DecodeString(idStr) idBytes, err := hex.DecodeString(idStr)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "Address (%s) contains invalid ID", addrWithoutProtocol) return nil, cmn.ErrorWrap(err, fmt.Sprintf("Address (%s) contains invalid ID", addrWithoutProtocol))
} }
if len(idBytes) != IDByteLength { if len(idBytes) != IDByteLength {
return nil, fmt.Errorf("Address (%s) contains ID of invalid length (%d). Should be %d hex-encoded bytes", return nil, fmt.Errorf("Address (%s) contains ID of invalid length (%d). Should be %d hex-encoded bytes",

View File

@ -3,8 +3,6 @@ package p2p
import ( import (
"fmt" "fmt"
"strings" "strings"
crypto "github.com/tendermint/go-crypto"
) )
const ( const (
@ -20,8 +18,8 @@ func MaxNodeInfoSize() int {
// between two peers during the Tendermint P2P handshake. // between two peers during the Tendermint P2P handshake.
type NodeInfo struct { type NodeInfo struct {
// Authenticate // Authenticate
PubKey crypto.PubKey `json:"pub_key"` // authenticated pubkey ID ID `json:"id"` // authenticated identifier
ListenAddr string `json:"listen_addr"` // accepting incoming ListenAddr string `json:"listen_addr"` // accepting incoming
// Check compatibility // Check compatibility
Network string `json:"network"` // network/chain ID Network string `json:"network"` // network/chain ID
@ -107,19 +105,12 @@ OUTER_LOOP:
return nil return nil
} }
// ID returns node's ID.
func (info NodeInfo) ID() ID {
return PubKeyToID(info.PubKey)
}
// NetAddress returns a NetAddress derived from the NodeInfo - // NetAddress returns a NetAddress derived from the NodeInfo -
// it includes the authenticated peer ID and the self-reported // it includes the authenticated peer ID and the self-reported
// ListenAddr. Note that the ListenAddr is not authenticated and // ListenAddr. Note that the ListenAddr is not authenticated and
// may not match that address actually dialed if its an outbound peer. // may not match that address actually dialed if its an outbound peer.
func (info NodeInfo) NetAddress() *NetAddress { func (info NodeInfo) NetAddress() *NetAddress {
id := PubKeyToID(info.PubKey) netAddr, err := NewNetAddressString(IDAddressString(info.ID, info.ListenAddr))
addr := info.ListenAddr
netAddr, err := NewNetAddressString(IDAddressString(id, addr))
if err != nil { if err != nil {
panic(err) // everything should be well formed by now panic(err) // everything should be well formed by now
} }
@ -127,7 +118,8 @@ func (info NodeInfo) NetAddress() *NetAddress {
} }
func (info NodeInfo) String() string { func (info NodeInfo) String() string {
return fmt.Sprintf("NodeInfo{pk: %v, moniker: %v, network: %v [listen %v], version: %v (%v)}", info.PubKey, info.Moniker, info.Network, info.ListenAddr, info.Version, info.Other) return fmt.Sprintf("NodeInfo{id: %v, moniker: %v, network: %v [listen %v], version: %v (%v)}",
info.ID, info.Moniker, info.Network, info.ListenAddr, info.Version, info.Other)
} }
func splitVersion(version string) (string, string, string, error) { func splitVersion(version string) (string, string, string, error) {

View File

@ -5,10 +5,7 @@ import (
"net" "net"
"time" "time"
"github.com/pkg/errors" "github.com/tendermint/go-crypto"
crypto "github.com/tendermint/go-crypto"
wire "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
@ -25,8 +22,8 @@ type Peer interface {
NodeInfo() NodeInfo // peer's info NodeInfo() NodeInfo // peer's info
Status() tmconn.ConnectionStatus Status() tmconn.ConnectionStatus
Send(byte, interface{}) bool Send(byte, []byte) bool
TrySend(byte, interface{}) bool TrySend(byte, []byte) bool
Set(string, interface{}) Set(string, interface{})
Get(string) interface{} Get(string) interface{}
@ -114,13 +111,13 @@ func newOutboundPeerConn(addr *NetAddress, config *PeerConfig, persistent bool,
conn, err := dial(addr, config) conn, err := dial(addr, config)
if err != nil { if err != nil {
return pc, errors.Wrap(err, "Error creating peer") return pc, cmn.ErrorWrap(err, "Error creating peer")
} }
pc, err = newPeerConn(conn, config, true, persistent, ourNodePrivKey) pc, err = newPeerConn(conn, config, true, persistent, ourNodePrivKey)
if err != nil { if err != nil {
if err2 := conn.Close(); err2 != nil { if err2 := conn.Close(); err2 != nil {
return pc, errors.Wrap(err, err2.Error()) return pc, cmn.ErrorWrap(err, err2.Error())
} }
return pc, err return pc, err
} }
@ -128,7 +125,7 @@ func newOutboundPeerConn(addr *NetAddress, config *PeerConfig, persistent bool,
// ensure dialed ID matches connection ID // ensure dialed ID matches connection ID
if config.AuthEnc && addr.ID != pc.ID() { if config.AuthEnc && addr.ID != pc.ID() {
if err2 := conn.Close(); err2 != nil { if err2 := conn.Close(); err2 != nil {
return pc, errors.Wrap(err, err2.Error()) return pc, cmn.ErrorWrap(err, err2.Error())
} }
return pc, ErrSwitchAuthenticationFailure{addr, pc.ID()} return pc, ErrSwitchAuthenticationFailure{addr, pc.ID()}
} }
@ -157,13 +154,13 @@ func newPeerConn(rawConn net.Conn,
if config.AuthEnc { if config.AuthEnc {
// Set deadline for secret handshake // Set deadline for secret handshake
if err := conn.SetDeadline(time.Now().Add(config.HandshakeTimeout * time.Second)); err != nil { if err := conn.SetDeadline(time.Now().Add(config.HandshakeTimeout * time.Second)); err != nil {
return pc, errors.Wrap(err, "Error setting deadline while encrypting connection") return pc, cmn.ErrorWrap(err, "Error setting deadline while encrypting connection")
} }
// Encrypt connection // Encrypt connection
conn, err = tmconn.MakeSecretConnection(conn, ourNodePrivKey) conn, err = tmconn.MakeSecretConnection(conn, ourNodePrivKey)
if err != nil { if err != nil {
return pc, errors.Wrap(err, "Error creating peer") return pc, cmn.ErrorWrap(err, "Error creating peer")
} }
} }
@ -205,7 +202,7 @@ func (p *peer) OnStop() {
// ID returns the peer's ID - the hex encoded hash of its pubkey. // ID returns the peer's ID - the hex encoded hash of its pubkey.
func (p *peer) ID() ID { func (p *peer) ID() ID {
return p.nodeInfo.ID() return p.nodeInfo.ID
} }
// IsOutbound returns true if the connection is outbound, false otherwise. // IsOutbound returns true if the connection is outbound, false otherwise.
@ -228,9 +225,9 @@ func (p *peer) Status() tmconn.ConnectionStatus {
return p.mconn.Status() return p.mconn.Status()
} }
// Send msg to the channel identified by chID byte. Returns false if the send // Send msg bytes to the channel identified by chID byte. Returns false if the
// queue is full after timeout, specified by MConnection. // send queue is full after timeout, specified by MConnection.
func (p *peer) Send(chID byte, msg interface{}) bool { func (p *peer) Send(chID byte, msgBytes []byte) bool {
if !p.IsRunning() { if !p.IsRunning() {
// see Switch#Broadcast, where we fetch the list of peers and loop over // see Switch#Broadcast, where we fetch the list of peers and loop over
// them - while we're looping, one peer may be removed and stopped. // them - while we're looping, one peer may be removed and stopped.
@ -238,18 +235,18 @@ func (p *peer) Send(chID byte, msg interface{}) bool {
} else if !p.hasChannel(chID) { } else if !p.hasChannel(chID) {
return false return false
} }
return p.mconn.Send(chID, msg) return p.mconn.Send(chID, msgBytes)
} }
// TrySend msg to the channel identified by chID byte. Immediately returns // TrySend msg bytes to the channel identified by chID byte. Immediately returns
// false if the send queue is full. // false if the send queue is full.
func (p *peer) TrySend(chID byte, msg interface{}) bool { func (p *peer) TrySend(chID byte, msgBytes []byte) bool {
if !p.IsRunning() { if !p.IsRunning() {
return false return false
} else if !p.hasChannel(chID) { } else if !p.hasChannel(chID) {
return false return false
} }
return p.mconn.TrySend(chID, msg) return p.mconn.TrySend(chID, msgBytes)
} }
// Get the data for a given key. // Get the data for a given key.
@ -290,28 +287,26 @@ func (pc *peerConn) CloseConn() {
func (pc *peerConn) HandshakeTimeout(ourNodeInfo NodeInfo, timeout time.Duration) (peerNodeInfo NodeInfo, err error) { func (pc *peerConn) HandshakeTimeout(ourNodeInfo NodeInfo, timeout time.Duration) (peerNodeInfo NodeInfo, err error) {
// Set deadline for handshake so we don't block forever on conn.ReadFull // Set deadline for handshake so we don't block forever on conn.ReadFull
if err := pc.conn.SetDeadline(time.Now().Add(timeout)); err != nil { if err := pc.conn.SetDeadline(time.Now().Add(timeout)); err != nil {
return peerNodeInfo, errors.Wrap(err, "Error setting deadline") return peerNodeInfo, cmn.ErrorWrap(err, "Error setting deadline")
} }
var trs, _ = cmn.Parallel( var trs, _ = cmn.Parallel(
func(_ int) (val interface{}, err error, abort bool) { func(_ int) (val interface{}, err error, abort bool) {
var n int _, err = cdc.MarshalBinaryWriter(pc.conn, ourNodeInfo)
wire.WriteBinary(&ourNodeInfo, pc.conn, &n, &err)
return return
}, },
func(_ int) (val interface{}, err error, abort bool) { func(_ int) (val interface{}, err error, abort bool) {
var n int _, err = cdc.UnmarshalBinaryReader(pc.conn, &peerNodeInfo, int64(MaxNodeInfoSize()))
wire.ReadBinary(&peerNodeInfo, pc.conn, MaxNodeInfoSize(), &n, &err)
return return
}, },
) )
if err := trs.FirstError(); err != nil { if err := trs.FirstError(); err != nil {
return peerNodeInfo, errors.Wrap(err, "Error during handshake") return peerNodeInfo, cmn.ErrorWrap(err, "Error during handshake")
} }
// Remove deadline // Remove deadline
if err := pc.conn.SetDeadline(time.Time{}); err != nil { if err := pc.conn.SetDeadline(time.Time{}); err != nil {
return peerNodeInfo, errors.Wrap(err, "Error removing deadline") return peerNodeInfo, cmn.ErrorWrap(err, "Error removing deadline")
} }
return peerNodeInfo, nil return peerNodeInfo, nil

View File

@ -13,11 +13,11 @@ import (
// Returns an empty kvstore peer // Returns an empty kvstore peer
func randPeer() *peer { func randPeer() *peer {
pubKey := crypto.GenPrivKeyEd25519().Wrap().PubKey() nodeKey := NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
return &peer{ return &peer{
nodeInfo: NodeInfo{ nodeInfo: NodeInfo{
ID: nodeKey.ID(),
ListenAddr: cmn.Fmt("%v.%v.%v.%v:46656", rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256), ListenAddr: cmn.Fmt("%v.%v.%v.%v:46656", rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256),
PubKey: pubKey,
}, },
} }
} }

View File

@ -20,7 +20,7 @@ func TestPeerBasic(t *testing.T) {
assert, require := assert.New(t), require.New(t) assert, require := assert.New(t), require.New(t)
// simulate remote peer // simulate remote peer
rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519().Wrap(), Config: DefaultPeerConfig()} rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: DefaultPeerConfig()}
rp.Start() rp.Start()
defer rp.Stop() defer rp.Stop()
@ -47,7 +47,7 @@ func TestPeerWithoutAuthEnc(t *testing.T) {
config.AuthEnc = false config.AuthEnc = false
// simulate remote peer // simulate remote peer
rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519().Wrap(), Config: config} rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: config}
rp.Start() rp.Start()
defer rp.Stop() defer rp.Stop()
@ -68,7 +68,7 @@ func TestPeerSend(t *testing.T) {
config.AuthEnc = false config.AuthEnc = false
// simulate remote peer // simulate remote peer
rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519().Wrap(), Config: config} rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: config}
rp.Start() rp.Start()
defer rp.Stop() defer rp.Stop()
@ -81,7 +81,7 @@ func TestPeerSend(t *testing.T) {
defer p.Stop() defer p.Stop()
assert.True(p.CanSend(testCh)) assert.True(p.CanSend(testCh))
assert.True(p.Send(testCh, "Asylum")) assert.True(p.Send(testCh, []byte("Asylum")))
} }
func createOutboundPeerAndPerformHandshake(addr *NetAddress, config *PeerConfig) (*peer, error) { func createOutboundPeerAndPerformHandshake(addr *NetAddress, config *PeerConfig) (*peer, error) {
@ -89,13 +89,13 @@ func createOutboundPeerAndPerformHandshake(addr *NetAddress, config *PeerConfig)
{ID: testCh, Priority: 1}, {ID: testCh, Priority: 1},
} }
reactorsByCh := map[byte]Reactor{testCh: NewTestReactor(chDescs, true)} reactorsByCh := map[byte]Reactor{testCh: NewTestReactor(chDescs, true)}
pk := crypto.GenPrivKeyEd25519().Wrap() pk := crypto.GenPrivKeyEd25519()
pc, err := newOutboundPeerConn(addr, config, false, pk) pc, err := newOutboundPeerConn(addr, config, false, pk)
if err != nil { if err != nil {
return nil, err return nil, err
} }
nodeInfo, err := pc.HandshakeTimeout(NodeInfo{ nodeInfo, err := pc.HandshakeTimeout(NodeInfo{
PubKey: pk.PubKey(), ID: addr.ID,
Moniker: "host_peer", Moniker: "host_peer",
Network: "testing", Network: "testing",
Version: "123.123.123", Version: "123.123.123",
@ -152,7 +152,7 @@ func (p *remotePeer) accept(l net.Listener) {
golog.Fatalf("Failed to create a peer: %+v", err) golog.Fatalf("Failed to create a peer: %+v", err)
} }
_, err = pc.HandshakeTimeout(NodeInfo{ _, err = pc.HandshakeTimeout(NodeInfo{
PubKey: p.PrivKey.PubKey(), ID: p.Addr().ID,
Moniker: "remote_peer", Moniker: "remote_peer",
Network: "testing", Network: "testing",
Version: "123.123.123", Version: "123.123.123",

View File

@ -9,7 +9,6 @@ import (
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"math" "math"
"math/rand"
"net" "net"
"sync" "sync"
"time" "time"
@ -82,7 +81,7 @@ type addrBook struct {
// accessed concurrently // accessed concurrently
mtx sync.Mutex mtx sync.Mutex
rand *rand.Rand rand *cmn.Rand
ourAddrs map[string]struct{} ourAddrs map[string]struct{}
addrLookup map[p2p.ID]*knownAddress // new & old addrLookup map[p2p.ID]*knownAddress // new & old
bucketsOld []map[string]*knownAddress bucketsOld []map[string]*knownAddress
@ -97,7 +96,7 @@ type addrBook struct {
// Use Start to begin processing asynchronous address updates. // Use Start to begin processing asynchronous address updates.
func NewAddrBook(filePath string, routabilityStrict bool) *addrBook { func NewAddrBook(filePath string, routabilityStrict bool) *addrBook {
am := &addrBook{ am := &addrBook{
rand: rand.New(rand.NewSource(time.Now().UnixNano())), // TODO: seed from outside rand: cmn.NewRand(),
ourAddrs: make(map[string]struct{}), ourAddrs: make(map[string]struct{}),
addrLookup: make(map[p2p.ID]*knownAddress), addrLookup: make(map[p2p.ID]*knownAddress),
filePath: filePath, filePath: filePath,
@ -320,7 +319,7 @@ func (a *addrBook) GetSelection() []*p2p.NetAddress {
// XXX: What's the point of this if we already loop randomly through addrLookup ? // XXX: What's the point of this if we already loop randomly through addrLookup ?
for i := 0; i < numAddresses; i++ { for i := 0; i < numAddresses; i++ {
// pick a number between current index and the end // pick a number between current index and the end
j := rand.Intn(len(allAddr)-i) + i j := cmn.RandIntn(len(allAddr)-i) + i
allAddr[i], allAddr[j] = allAddr[j], allAddr[i] allAddr[i], allAddr[j] = allAddr[j], allAddr[i]
} }

View File

@ -1,16 +1,13 @@
package pex package pex
import ( import (
"bytes"
"fmt" "fmt"
"math/rand"
"reflect" "reflect"
"sort" "sort"
"sync" "sync"
"time" "time"
"github.com/pkg/errors" "github.com/tendermint/go-amino"
wire "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p"
@ -23,7 +20,7 @@ const (
// PexChannel is a channel for PEX messages // PexChannel is a channel for PEX messages
PexChannel = byte(0x00) PexChannel = byte(0x00)
maxPexMessageSize = 1048576 // 1MB maxMsgSize = 1048576 // 1MB
// ensure we have enough peers // ensure we have enough peers
defaultEnsurePeersPeriod = 30 * time.Second defaultEnsurePeersPeriod = 30 * time.Second
@ -181,7 +178,7 @@ func (r *PEXReactor) RemovePeer(p Peer, reason interface{}) {
// Receive implements Reactor by handling incoming PEX messages. // Receive implements Reactor by handling incoming PEX messages.
func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) { func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) {
_, msg, err := DecodeMessage(msgBytes) msg, err := DecodeMessage(msgBytes)
if err != nil { if err != nil {
r.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) r.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
r.Switch.StopPeerForError(src, err) r.Switch.StopPeerForError(src, err)
@ -250,7 +247,7 @@ func (r *PEXReactor) RequestAddrs(p Peer) {
return return
} }
r.requestsSent.Set(id, struct{}{}) r.requestsSent.Set(id, struct{}{})
p.Send(PexChannel, struct{ PexMessage }{&pexRequestMessage{}}) p.Send(PexChannel, cdc.MustMarshalBinary(&pexRequestMessage{}))
} }
// ReceiveAddrs adds the given addrs to the addrbook if theres an open // ReceiveAddrs adds the given addrs to the addrbook if theres an open
@ -260,7 +257,7 @@ func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error {
id := string(src.ID()) id := string(src.ID())
if !r.requestsSent.Has(id) { if !r.requestsSent.Has(id) {
return errors.New("Received unsolicited pexAddrsMessage") return cmn.NewError("Received unsolicited pexAddrsMessage")
} }
r.requestsSent.Delete(id) r.requestsSent.Delete(id)
@ -279,7 +276,7 @@ func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error {
// SendAddrs sends addrs to the peer. // SendAddrs sends addrs to the peer.
func (r *PEXReactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) { func (r *PEXReactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) {
p.Send(PexChannel, struct{ PexMessage }{&pexAddrsMessage{Addrs: netAddrs}}) p.Send(PexChannel, cdc.MustMarshalBinary(&pexAddrsMessage{Addrs: netAddrs}))
} }
// SetEnsurePeersPeriod sets period to ensure peers connected. // SetEnsurePeersPeriod sets period to ensure peers connected.
@ -290,7 +287,7 @@ func (r *PEXReactor) SetEnsurePeersPeriod(d time.Duration) {
// Ensures that sufficient peers are connected. (continuous) // Ensures that sufficient peers are connected. (continuous)
func (r *PEXReactor) ensurePeersRoutine() { func (r *PEXReactor) ensurePeersRoutine() {
var ( var (
seed = rand.New(rand.NewSource(time.Now().UnixNano())) seed = cmn.NewRand()
jitter = seed.Int63n(r.ensurePeersPeriod.Nanoseconds()) jitter = seed.Int63n(r.ensurePeersPeriod.Nanoseconds())
) )
@ -377,7 +374,7 @@ func (r *PEXReactor) ensurePeers() {
peers := r.Switch.Peers().List() peers := r.Switch.Peers().List()
peersCount := len(peers) peersCount := len(peers)
if peersCount > 0 { if peersCount > 0 {
peer := peers[rand.Int()%peersCount] // nolint: gas peer := peers[cmn.RandInt()%peersCount] // nolint: gas
r.Logger.Info("We need more addresses. Sending pexRequest to random peer", "peer", peer) r.Logger.Info("We need more addresses. Sending pexRequest to random peer", "peer", peer)
r.RequestAddrs(peer) r.RequestAddrs(peer)
} }
@ -406,7 +403,7 @@ func (r *PEXReactor) dialPeer(addr *p2p.NetAddress) {
// exponential backoff if it's not our first attempt to dial given address // exponential backoff if it's not our first attempt to dial given address
if attempts > 0 { if attempts > 0 {
jitterSeconds := time.Duration(rand.Float64() * float64(time.Second)) // 1s == (1e9 ns) jitterSeconds := time.Duration(cmn.RandFloat64() * float64(time.Second)) // 1s == (1e9 ns)
backoffDuration := jitterSeconds + ((1 << uint(attempts)) * time.Second) backoffDuration := jitterSeconds + ((1 << uint(attempts)) * time.Second)
sinceLastDialed := time.Since(lastDialed) sinceLastDialed := time.Since(lastDialed)
if sinceLastDialed < backoffDuration { if sinceLastDialed < backoffDuration {
@ -459,7 +456,7 @@ func (r *PEXReactor) dialSeeds() {
} }
seedAddrs, _ := p2p.NewNetAddressStrings(r.config.Seeds) seedAddrs, _ := p2p.NewNetAddressStrings(r.config.Seeds)
perm := rand.Perm(lSeeds) perm := cmn.RandPerm(lSeeds)
// perm := r.Switch.rng.Perm(lSeeds) // perm := r.Switch.rng.Perm(lSeeds)
for _, i := range perm { for _, i := range perm {
// dial a random seed // dial a random seed
@ -606,27 +603,23 @@ func isAddrPrivate(addr *p2p.NetAddress, privatePeerIDs []string) bool {
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// Messages // Messages
const (
msgTypeRequest = byte(0x01)
msgTypeAddrs = byte(0x02)
)
// PexMessage is a primary type for PEX messages. Underneath, it could contain // PexMessage is a primary type for PEX messages. Underneath, it could contain
// either pexRequestMessage, or pexAddrsMessage messages. // either pexRequestMessage, or pexAddrsMessage messages.
type PexMessage interface{} type PexMessage interface{}
var _ = wire.RegisterInterface( func RegisterPexMessage(cdc *amino.Codec) {
struct{ PexMessage }{}, cdc.RegisterInterface((*PexMessage)(nil), nil)
wire.ConcreteType{&pexRequestMessage{}, msgTypeRequest}, cdc.RegisterConcrete(&pexRequestMessage{}, "tendermint/p2p/PexRequestMessage", nil)
wire.ConcreteType{&pexAddrsMessage{}, msgTypeAddrs}, cdc.RegisterConcrete(&pexAddrsMessage{}, "tendermint/p2p/PexAddrsMessage", nil)
) }
// DecodeMessage implements interface registered above. // DecodeMessage implements interface registered above.
func DecodeMessage(bz []byte) (msgType byte, msg PexMessage, err error) { func DecodeMessage(bz []byte) (msg PexMessage, err error) {
msgType = bz[0] if len(bz) > maxMsgSize {
n := new(int) return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
r := bytes.NewReader(bz) len(bz), maxMsgSize)
msg = wire.ReadBinary(struct{ PexMessage }{}, r, maxPexMessageSize, n, &err).(struct{ PexMessage }).PexMessage }
err = cdc.UnmarshalBinary(bz, &msg)
return return
} }

View File

@ -12,7 +12,6 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
crypto "github.com/tendermint/go-crypto" crypto "github.com/tendermint/go-crypto"
wire "github.com/tendermint/go-wire"
cfg "github.com/tendermint/tendermint/config" cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/p2p/conn" "github.com/tendermint/tendermint/p2p/conn"
@ -124,12 +123,12 @@ func TestPEXReactorReceive(t *testing.T) {
size := book.Size() size := book.Size()
addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()} addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()}
msg := wire.BinaryBytes(struct{ PexMessage }{&pexAddrsMessage{Addrs: addrs}}) msg := cdc.MustMarshalBinary(&pexAddrsMessage{Addrs: addrs})
r.Receive(PexChannel, peer, msg) r.Receive(PexChannel, peer, msg)
assert.Equal(t, size+1, book.Size()) assert.Equal(t, size+1, book.Size())
msg = wire.BinaryBytes(struct{ PexMessage }{&pexRequestMessage{}}) msg = cdc.MustMarshalBinary(&pexRequestMessage{})
r.Receive(PexChannel, peer, msg) r.Receive(PexChannel, peer, msg) // should not panic.
} }
func TestPEXReactorRequestMessageAbuse(t *testing.T) { func TestPEXReactorRequestMessageAbuse(t *testing.T) {
@ -144,7 +143,7 @@ func TestPEXReactorRequestMessageAbuse(t *testing.T) {
assert.True(t, sw.Peers().Has(peer.ID())) assert.True(t, sw.Peers().Has(peer.ID()))
id := string(peer.ID()) id := string(peer.ID())
msg := wire.BinaryBytes(struct{ PexMessage }{&pexRequestMessage{}}) msg := cdc.MustMarshalBinary(&pexRequestMessage{})
// first time creates the entry // first time creates the entry
r.Receive(PexChannel, peer, msg) r.Receive(PexChannel, peer, msg)
@ -181,7 +180,7 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) {
assert.True(t, sw.Peers().Has(peer.ID())) assert.True(t, sw.Peers().Has(peer.ID()))
addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()} addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()}
msg := wire.BinaryBytes(struct{ PexMessage }{&pexAddrsMessage{Addrs: addrs}}) msg := cdc.MustMarshalBinary(&pexAddrsMessage{Addrs: addrs})
// receive some addrs. should clear the request // receive some addrs. should clear the request
r.Receive(PexChannel, peer, msg) r.Receive(PexChannel, peer, msg)
@ -257,7 +256,7 @@ func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) {
defer peer.Stop() defer peer.Stop()
// 3. check that the peer connects to seed immediately // 3. check that the peer connects to seed immediately
assertPeersWithTimeout(t, []*p2p.Switch{peer}, 10*time.Millisecond, 1*time.Second, 1) assertPeersWithTimeout(t, []*p2p.Switch{peer}, 10*time.Millisecond, 3*time.Second, 1)
} }
func TestPEXReactorCrawlStatus(t *testing.T) { func TestPEXReactorCrawlStatus(t *testing.T) {
@ -290,7 +289,7 @@ func TestPEXReactorCrawlStatus(t *testing.T) {
func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) { func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) {
peer := p2p.CreateRandomPeer(false) peer := p2p.CreateRandomPeer(false)
pexR, book := createReactor(&PEXReactorConfig{PrivatePeerIDs: []string{string(peer.NodeInfo().ID())}}) pexR, book := createReactor(&PEXReactorConfig{PrivatePeerIDs: []string{string(peer.NodeInfo().ID)}})
defer teardownReactor(book) defer teardownReactor(book)
// we have to send a request to receive responses // we have to send a request to receive responses
@ -298,7 +297,7 @@ func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) {
size := book.Size() size := book.Size()
addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()} addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()}
msg := wire.BinaryBytes(struct{ PexMessage }{&pexAddrsMessage{Addrs: addrs}}) msg := cdc.MustMarshalBinary(&pexAddrsMessage{Addrs: addrs})
pexR.Receive(PexChannel, peer, msg) pexR.Receive(PexChannel, peer, msg)
assert.Equal(t, size, book.Size()) assert.Equal(t, size, book.Size())
@ -350,27 +349,27 @@ func newMockPeer() mockPeer {
_, netAddr := p2p.CreateRoutableAddr() _, netAddr := p2p.CreateRoutableAddr()
mp := mockPeer{ mp := mockPeer{
addr: netAddr, addr: netAddr,
pubKey: crypto.GenPrivKeyEd25519().Wrap().PubKey(), pubKey: crypto.GenPrivKeyEd25519().PubKey(),
} }
mp.BaseService = cmn.NewBaseService(nil, "MockPeer", mp) mp.BaseService = cmn.NewBaseService(nil, "MockPeer", mp)
mp.Start() mp.Start()
return mp return mp
} }
func (mp mockPeer) ID() p2p.ID { return p2p.PubKeyToID(mp.pubKey) } func (mp mockPeer) ID() p2p.ID { return mp.addr.ID }
func (mp mockPeer) IsOutbound() bool { return mp.outbound } func (mp mockPeer) IsOutbound() bool { return mp.outbound }
func (mp mockPeer) IsPersistent() bool { return mp.persistent } func (mp mockPeer) IsPersistent() bool { return mp.persistent }
func (mp mockPeer) NodeInfo() p2p.NodeInfo { func (mp mockPeer) NodeInfo() p2p.NodeInfo {
return p2p.NodeInfo{ return p2p.NodeInfo{
PubKey: mp.pubKey, ID: mp.addr.ID,
ListenAddr: mp.addr.DialString(), ListenAddr: mp.addr.DialString(),
} }
} }
func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} } func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} }
func (mp mockPeer) Send(byte, interface{}) bool { return false } func (mp mockPeer) Send(byte, []byte) bool { return false }
func (mp mockPeer) TrySend(byte, interface{}) bool { return false } func (mp mockPeer) TrySend(byte, []byte) bool { return false }
func (mp mockPeer) Set(string, interface{}) {} func (mp mockPeer) Set(string, interface{}) {}
func (mp mockPeer) Get(string) interface{} { return nil } func (mp mockPeer) Get(string) interface{} { return nil }
func assertPeersWithTimeout( func assertPeersWithTimeout(
t *testing.T, t *testing.T,

11
p2p/pex/wire.go Normal file
View File

@ -0,0 +1,11 @@
package pex
import (
"github.com/tendermint/go-amino"
)
var cdc *amino.Codec = amino.NewCodec()
func init() {
RegisterPexMessage(cdc)
}

View File

@ -3,13 +3,10 @@ package p2p
import ( import (
"fmt" "fmt"
"math" "math"
"math/rand"
"net" "net"
"sync" "sync"
"time" "time"
"github.com/pkg/errors"
cfg "github.com/tendermint/tendermint/config" cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/p2p/conn" "github.com/tendermint/tendermint/p2p/conn"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
@ -69,7 +66,7 @@ type Switch struct {
filterConnByAddr func(net.Addr) error filterConnByAddr func(net.Addr) error
filterConnByID func(ID) error filterConnByID func(ID) error
rng *rand.Rand // seed for randomizing dial times and orders rng *cmn.Rand // seed for randomizing dial times and orders
} }
// NewSwitch creates a new Switch with the given config. // NewSwitch creates a new Switch with the given config.
@ -84,15 +81,14 @@ func NewSwitch(config *cfg.P2PConfig) *Switch {
dialing: cmn.NewCMap(), dialing: cmn.NewCMap(),
} }
// Ensure we have a completely undeterministic PRNG. cmd.RandInt64() draws // Ensure we have a completely undeterministic PRNG.
// from a seed that's initialized with OS entropy on process start. sw.rng = cmn.NewRand()
sw.rng = rand.New(rand.NewSource(cmn.RandInt64()))
// TODO: collapse the peerConfig into the config ? // TODO: collapse the peerConfig into the config ?
sw.peerConfig.MConfig.FlushThrottle = time.Duration(config.FlushThrottleTimeout) * time.Millisecond sw.peerConfig.MConfig.FlushThrottle = time.Duration(config.FlushThrottleTimeout) * time.Millisecond
sw.peerConfig.MConfig.SendRate = config.SendRate sw.peerConfig.MConfig.SendRate = config.SendRate
sw.peerConfig.MConfig.RecvRate = config.RecvRate sw.peerConfig.MConfig.RecvRate = config.RecvRate
sw.peerConfig.MConfig.MaxMsgPacketPayloadSize = config.MaxMsgPacketPayloadSize sw.peerConfig.MConfig.MaxPacketMsgPayloadSize = config.MaxPacketMsgPayloadSize
sw.peerConfig.AuthEnc = config.AuthEnc sw.peerConfig.AuthEnc = config.AuthEnc
sw.BaseService = *cmn.NewBaseService(nil, "P2P Switch", sw) sw.BaseService = *cmn.NewBaseService(nil, "P2P Switch", sw)
@ -178,7 +174,7 @@ func (sw *Switch) OnStart() error {
for _, reactor := range sw.reactors { for _, reactor := range sw.reactors {
err := reactor.Start() err := reactor.Start()
if err != nil { if err != nil {
return errors.Wrapf(err, "failed to start %v", reactor) return cmn.ErrorWrap(err, "failed to start %v", reactor)
} }
} }
// Start listeners // Start listeners
@ -213,18 +209,18 @@ func (sw *Switch) OnStop() {
// Broadcast runs a go routine for each attempted send, which will block trying // Broadcast runs a go routine for each attempted send, which will block trying
// to send for defaultSendTimeoutSeconds. Returns a channel which receives // to send for defaultSendTimeoutSeconds. Returns a channel which receives
// success values for each attempted send (false if times out). Channel will be // success values for each attempted send (false if times out). Channel will be
// closed once msg send to all peers. // closed once msg bytes are sent to all peers (or time out).
// //
// NOTE: Broadcast uses goroutines, so order of broadcast may not be preserved. // NOTE: Broadcast uses goroutines, so order of broadcast may not be preserved.
func (sw *Switch) Broadcast(chID byte, msg interface{}) chan bool { func (sw *Switch) Broadcast(chID byte, msgBytes []byte) chan bool {
successChan := make(chan bool, len(sw.peers.List())) successChan := make(chan bool, len(sw.peers.List()))
sw.Logger.Debug("Broadcast", "channel", chID, "msg", msg) sw.Logger.Debug("Broadcast", "channel", chID, "msgBytes", fmt.Sprintf("%X", msgBytes))
var wg sync.WaitGroup var wg sync.WaitGroup
for _, peer := range sw.peers.List() { for _, peer := range sw.peers.List() {
wg.Add(1) wg.Add(1)
go func(peer Peer) { go func(peer Peer) {
defer wg.Done() defer wg.Done()
success := peer.Send(chID, msg) success := peer.Send(chID, msgBytes)
successChan <- success successChan <- success
}(peer) }(peer)
} }
@ -363,7 +359,9 @@ func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent b
for _, netAddr := range netAddrs { for _, netAddr := range netAddrs {
// do not add our address or ID // do not add our address or ID
if !netAddr.Same(ourAddr) { if !netAddr.Same(ourAddr) {
addrBook.AddAddress(netAddr, ourAddr) if err := addrBook.AddAddress(netAddr, ourAddr); err != nil {
sw.Logger.Error("Can't add peer's address to addrbook", "err", err)
}
} }
} }
// Persist some peers to disk right away. // Persist some peers to disk right away.
@ -517,7 +515,7 @@ func (sw *Switch) addPeer(pc peerConn) error {
return err return err
} }
peerID := peerNodeInfo.ID() peerID := peerNodeInfo.ID
// ensure connection key matches self reported key // ensure connection key matches self reported key
if pc.config.AuthEnc { if pc.config.AuthEnc {

View File

@ -12,7 +12,6 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
crypto "github.com/tendermint/go-crypto" crypto "github.com/tendermint/go-crypto"
wire "github.com/tendermint/go-wire"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
cfg "github.com/tendermint/tendermint/config" cfg "github.com/tendermint/tendermint/config"
@ -120,9 +119,9 @@ func TestSwitches(t *testing.T) {
} }
// Lets send some messages // Lets send some messages
ch0Msg := "channel zero" ch0Msg := []byte("channel zero")
ch1Msg := "channel foo" ch1Msg := []byte("channel foo")
ch2Msg := "channel bar" ch2Msg := []byte("channel bar")
s1.Broadcast(byte(0x00), ch0Msg) s1.Broadcast(byte(0x00), ch0Msg)
s1.Broadcast(byte(0x01), ch1Msg) s1.Broadcast(byte(0x01), ch1Msg)
@ -133,15 +132,15 @@ func TestSwitches(t *testing.T) {
assertMsgReceivedWithTimeout(t, ch2Msg, byte(0x02), s2.Reactor("bar").(*TestReactor), 10*time.Millisecond, 5*time.Second) assertMsgReceivedWithTimeout(t, ch2Msg, byte(0x02), s2.Reactor("bar").(*TestReactor), 10*time.Millisecond, 5*time.Second)
} }
func assertMsgReceivedWithTimeout(t *testing.T, msg string, channel byte, reactor *TestReactor, checkPeriod, timeout time.Duration) { func assertMsgReceivedWithTimeout(t *testing.T, msgBytes []byte, channel byte, reactor *TestReactor, checkPeriod, timeout time.Duration) {
ticker := time.NewTicker(checkPeriod) ticker := time.NewTicker(checkPeriod)
for { for {
select { select {
case <-ticker.C: case <-ticker.C:
msgs := reactor.getMsgs(channel) msgs := reactor.getMsgs(channel)
if len(msgs) > 0 { if len(msgs) > 0 {
if !bytes.Equal(msgs[0].Bytes, wire.BinaryBytes(msg)) { if !bytes.Equal(msgs[0].Bytes, msgBytes) {
t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", wire.BinaryBytes(msg), msgs[0].Bytes) t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", msgBytes, msgs[0].Bytes)
} }
return return
} }
@ -222,14 +221,14 @@ func TestConnIDFilter(t *testing.T) {
c1, c2 := conn.NetPipe() c1, c2 := conn.NetPipe()
s1.SetIDFilter(func(id ID) error { s1.SetIDFilter(func(id ID) error {
if id == PubKeyToID(s2.nodeInfo.PubKey) { if id == s2.nodeInfo.ID {
return fmt.Errorf("Error: pipe is blacklisted") return fmt.Errorf("Error: pipe is blacklisted")
} }
return nil return nil
}) })
s2.SetIDFilter(func(id ID) error { s2.SetIDFilter(func(id ID) error {
if id == PubKeyToID(s1.nodeInfo.PubKey) { if id == s1.nodeInfo.ID {
return fmt.Errorf("Error: pipe is blacklisted") return fmt.Errorf("Error: pipe is blacklisted")
} }
return nil return nil
@ -259,7 +258,7 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
defer sw.Stop() defer sw.Stop()
// simulate remote peer // simulate remote peer
rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519().Wrap(), Config: DefaultPeerConfig()} rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: DefaultPeerConfig()}
rp.Start() rp.Start()
defer rp.Stop() defer rp.Stop()
@ -289,7 +288,7 @@ func TestSwitchReconnectsToPersistentPeer(t *testing.T) {
defer sw.Stop() defer sw.Stop()
// simulate remote peer // simulate remote peer
rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519().Wrap(), Config: DefaultPeerConfig()} rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: DefaultPeerConfig()}
rp.Start() rp.Start()
defer rp.Stop() defer rp.Stop()
@ -359,7 +358,7 @@ func BenchmarkSwitchBroadcast(b *testing.B) {
// Send random message from foo channel to another // Send random message from foo channel to another
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
chID := byte(i % 4) chID := byte(i % 4)
successChan := s1.Broadcast(chID, "test data") successChan := s1.Broadcast(chID, []byte("test data"))
for s := range successChan { for s := range successChan {
if s { if s {
numSuccess++ numSuccess++

View File

@ -1,7 +1,6 @@
package p2p package p2p
import ( import (
"math/rand"
"net" "net"
crypto "github.com/tendermint/go-crypto" crypto "github.com/tendermint/go-crypto"
@ -23,8 +22,8 @@ func CreateRandomPeer(outbound bool) *peer {
outbound: outbound, outbound: outbound,
}, },
nodeInfo: NodeInfo{ nodeInfo: NodeInfo{
ID: netAddr.ID,
ListenAddr: netAddr.DialString(), ListenAddr: netAddr.DialString(),
PubKey: crypto.GenPrivKeyEd25519().Wrap().PubKey(),
}, },
mconn: &conn.MConnection{}, mconn: &conn.MConnection{},
} }
@ -35,7 +34,7 @@ func CreateRandomPeer(outbound bool) *peer {
func CreateRoutableAddr() (addr string, netAddr *NetAddress) { func CreateRoutableAddr() (addr string, netAddr *NetAddress) {
for { for {
var err error var err error
addr = cmn.Fmt("%X@%v.%v.%v.%v:46656", cmn.RandBytes(20), rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256) addr = cmn.Fmt("%X@%v.%v.%v.%v:46656", cmn.RandBytes(20), cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256)
netAddr, err = NewNetAddressString(addr) netAddr, err = NewNetAddressString(addr)
if err != nil { if err != nil {
panic(err) panic(err)
@ -131,17 +130,17 @@ func MakeSwitch(cfg *cfg.P2PConfig, i int, network, version string, initSwitch f
// new switch, add reactors // new switch, add reactors
// TODO: let the config be passed in? // TODO: let the config be passed in?
nodeKey := &NodeKey{ nodeKey := &NodeKey{
PrivKey: crypto.GenPrivKeyEd25519().Wrap(), PrivKey: crypto.GenPrivKeyEd25519(),
} }
sw := NewSwitch(cfg) sw := NewSwitch(cfg)
sw.SetLogger(log.TestingLogger()) sw.SetLogger(log.TestingLogger())
sw = initSwitch(i, sw) sw = initSwitch(i, sw)
ni := NodeInfo{ ni := NodeInfo{
PubKey: nodeKey.PubKey(), ID: nodeKey.ID(),
Moniker: cmn.Fmt("switch%d", i), Moniker: cmn.Fmt("switch%d", i),
Network: network, Network: network,
Version: version, Version: version,
ListenAddr: cmn.Fmt("%v:%v", network, rand.Intn(64512)+1023), ListenAddr: cmn.Fmt("%v:%v", network, cmn.RandIntn(64512)+1023),
} }
for ch := range sw.reactorsByCh { for ch := range sw.reactorsByCh {
ni.Channels = append(ni.Channels, ch) ni.Channels = append(ni.Channels, ch)

View File

@ -5,8 +5,6 @@ import (
"net" "net"
"time" "time"
"github.com/pkg/errors"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
) )
@ -19,26 +17,26 @@ type UPNPCapabilities struct {
func makeUPNPListener(intPort int, extPort int, logger log.Logger) (NAT, net.Listener, net.IP, error) { func makeUPNPListener(intPort int, extPort int, logger log.Logger) (NAT, net.Listener, net.IP, error) {
nat, err := Discover() nat, err := Discover()
if err != nil { if err != nil {
return nil, nil, nil, errors.Errorf("NAT upnp could not be discovered: %v", err) return nil, nil, nil, fmt.Errorf("NAT upnp could not be discovered: %v", err)
} }
logger.Info(cmn.Fmt("ourIP: %v", nat.(*upnpNAT).ourIP)) logger.Info(cmn.Fmt("ourIP: %v", nat.(*upnpNAT).ourIP))
ext, err := nat.GetExternalAddress() ext, err := nat.GetExternalAddress()
if err != nil { if err != nil {
return nat, nil, nil, errors.Errorf("External address error: %v", err) return nat, nil, nil, fmt.Errorf("External address error: %v", err)
} }
logger.Info(cmn.Fmt("External address: %v", ext)) logger.Info(cmn.Fmt("External address: %v", ext))
port, err := nat.AddPortMapping("tcp", extPort, intPort, "Tendermint UPnP Probe", 0) port, err := nat.AddPortMapping("tcp", extPort, intPort, "Tendermint UPnP Probe", 0)
if err != nil { if err != nil {
return nat, nil, ext, errors.Errorf("Port mapping error: %v", err) return nat, nil, ext, fmt.Errorf("Port mapping error: %v", err)
} }
logger.Info(cmn.Fmt("Port mapping mapped: %v", port)) logger.Info(cmn.Fmt("Port mapping mapped: %v", port))
// also run the listener, open for all remote addresses. // also run the listener, open for all remote addresses.
listener, err := net.Listen("tcp", fmt.Sprintf(":%v", intPort)) listener, err := net.Listen("tcp", fmt.Sprintf(":%v", intPort))
if err != nil { if err != nil {
return nat, nil, ext, errors.Errorf("Error establishing listener: %v", err) return nat, nil, ext, fmt.Errorf("Error establishing listener: %v", err)
} }
return nat, listener, ext, nil return nat, listener, ext, nil
} }

12
p2p/wire.go Normal file
View File

@ -0,0 +1,12 @@
package p2p
import (
"github.com/tendermint/go-amino"
"github.com/tendermint/go-crypto"
)
var cdc = amino.NewCodec()
func init() {
crypto.RegisterAmino(cdc)
}

View File

@ -1,16 +1,16 @@
package client_test package client_test
import ( import (
"reflect"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
abci "github.com/tendermint/abci/types" abci "github.com/tendermint/abci/types"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/client"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
) )
var waitForEventTimeout = 5 * time.Second var waitForEventTimeout = 5 * time.Second
@ -23,116 +23,127 @@ func MakeTxKV() ([]byte, []byte, []byte) {
} }
func TestHeaderEvents(t *testing.T) { func TestHeaderEvents(t *testing.T) {
require := require.New(t)
for i, c := range GetClients() { for i, c := range GetClients() {
// start for this test it if it wasn't already running i, c := i, c // capture params
if !c.IsRunning() { t.Run(reflect.TypeOf(c).String(), func(t *testing.T) {
// if so, then we start it, listen, and stop it. // start for this test it if it wasn't already running
err := c.Start() if !c.IsRunning() {
require.Nil(err, "%d: %+v", i, err) // if so, then we start it, listen, and stop it.
defer c.Stop() err := c.Start()
} require.Nil(t, err, "%d: %+v", i, err)
defer c.Stop()
}
evtTyp := types.EventNewBlockHeader evtTyp := types.EventNewBlockHeader
evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout) evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout)
require.Nil(err, "%d: %+v", i, err) require.Nil(t, err, "%d: %+v", i, err)
_, ok := evt.Unwrap().(types.EventDataNewBlockHeader) _, ok := evt.(types.EventDataNewBlockHeader)
require.True(ok, "%d: %#v", i, evt) require.True(t, ok, "%d: %#v", i, evt)
// TODO: more checks... // TODO: more checks...
})
} }
} }
func TestBlockEvents(t *testing.T) { func TestBlockEvents(t *testing.T) {
require := require.New(t)
for i, c := range GetClients() { for i, c := range GetClients() {
// start for this test it if it wasn't already running i, c := i, c // capture params
if !c.IsRunning() { t.Run(reflect.TypeOf(c).String(), func(t *testing.T) {
// if so, then we start it, listen, and stop it.
err := c.Start()
require.Nil(err, "%d: %+v", i, err)
defer c.Stop()
}
// listen for a new block; ensure height increases by 1 // start for this test it if it wasn't already running
var firstBlockHeight int64 if !c.IsRunning() {
for j := 0; j < 3; j++ { // if so, then we start it, listen, and stop it.
evtTyp := types.EventNewBlock err := c.Start()
evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout) require.Nil(t, err, "%d: %+v", i, err)
require.Nil(err, "%d: %+v", j, err) defer c.Stop()
blockEvent, ok := evt.Unwrap().(types.EventDataNewBlock)
require.True(ok, "%d: %#v", j, evt)
block := blockEvent.Block
if j == 0 {
firstBlockHeight = block.Header.Height
continue
} }
require.Equal(block.Header.Height, firstBlockHeight+int64(j)) // listen for a new block; ensure height increases by 1
} var firstBlockHeight int64
for j := 0; j < 3; j++ {
evtTyp := types.EventNewBlock
evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout)
require.Nil(t, err, "%d: %+v", j, err)
blockEvent, ok := evt.(types.EventDataNewBlock)
require.True(t, ok, "%d: %#v", j, evt)
block := blockEvent.Block
if j == 0 {
firstBlockHeight = block.Header.Height
continue
}
require.Equal(t, block.Header.Height, firstBlockHeight+int64(j))
}
})
} }
} }
func TestTxEventsSentWithBroadcastTxAsync(t *testing.T) { func TestTxEventsSentWithBroadcastTxAsync(t *testing.T) {
require := require.New(t)
for i, c := range GetClients() { for i, c := range GetClients() {
// start for this test it if it wasn't already running i, c := i, c // capture params
if !c.IsRunning() { t.Run(reflect.TypeOf(c).String(), func(t *testing.T) {
// if so, then we start it, listen, and stop it.
err := c.Start()
require.Nil(err, "%d: %+v", i, err)
defer c.Stop()
}
// make the tx // start for this test it if it wasn't already running
_, _, tx := MakeTxKV() if !c.IsRunning() {
evtTyp := types.EventTx // if so, then we start it, listen, and stop it.
err := c.Start()
require.Nil(t, err, "%d: %+v", i, err)
defer c.Stop()
}
// send async // make the tx
txres, err := c.BroadcastTxAsync(tx) _, _, tx := MakeTxKV()
require.Nil(err, "%+v", err) evtTyp := types.EventTx
require.Equal(txres.Code, abci.CodeTypeOK) // FIXME
// and wait for confirmation // send async
evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout) txres, err := c.BroadcastTxAsync(tx)
require.Nil(err, "%d: %+v", i, err) require.Nil(t, err, "%+v", err)
// and make sure it has the proper info require.Equal(t, txres.Code, abci.CodeTypeOK) // FIXME
txe, ok := evt.Unwrap().(types.EventDataTx)
require.True(ok, "%d: %#v", i, evt) // and wait for confirmation
// make sure this is the proper tx evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout)
require.EqualValues(tx, txe.Tx) require.Nil(t, err, "%d: %+v", i, err)
require.True(txe.Result.IsOK()) // and make sure it has the proper info
txe, ok := evt.(types.EventDataTx)
require.True(t, ok, "%d: %#v", i, evt)
// make sure this is the proper tx
require.EqualValues(t, tx, txe.Tx)
require.True(t, txe.Result.IsOK())
})
} }
} }
func TestTxEventsSentWithBroadcastTxSync(t *testing.T) { func TestTxEventsSentWithBroadcastTxSync(t *testing.T) {
require := require.New(t)
for i, c := range GetClients() { for i, c := range GetClients() {
// start for this test it if it wasn't already running i, c := i, c // capture params
if !c.IsRunning() { t.Run(reflect.TypeOf(c).String(), func(t *testing.T) {
// if so, then we start it, listen, and stop it.
err := c.Start()
require.Nil(err, "%d: %+v", i, err)
defer c.Stop()
}
// make the tx // start for this test it if it wasn't already running
_, _, tx := MakeTxKV() if !c.IsRunning() {
evtTyp := types.EventTx // if so, then we start it, listen, and stop it.
err := c.Start()
require.Nil(t, err, "%d: %+v", i, err)
defer c.Stop()
}
// send sync // make the tx
txres, err := c.BroadcastTxSync(tx) _, _, tx := MakeTxKV()
require.Nil(err, "%+v", err) evtTyp := types.EventTx
require.Equal(txres.Code, abci.CodeTypeOK) // FIXME
// and wait for confirmation // send sync
evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout) txres, err := c.BroadcastTxSync(tx)
require.Nil(err, "%d: %+v", i, err) require.Nil(t, err, "%+v", err)
// and make sure it has the proper info require.Equal(t, txres.Code, abci.CodeTypeOK) // FIXME
txe, ok := evt.Unwrap().(types.EventDataTx)
require.True(ok, "%d: %#v", i, evt) // and wait for confirmation
// make sure this is the proper tx evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout)
require.EqualValues(tx, txe.Tx) require.Nil(t, err, "%d: %+v", i, err)
require.True(txe.Result.IsOK()) // and make sure it has the proper info
txe, ok := evt.(types.EventDataTx)
require.True(t, ok, "%d: %#v", i, evt)
// make sure this is the proper tx
require.EqualValues(t, tx, txe.Tx)
require.True(t, txe.Result.IsOK())
})
} }
} }

View File

@ -41,7 +41,7 @@ func WaitForHeight(c StatusClient, h int64, waiter Waiter) error {
if err != nil { if err != nil {
return err return err
} }
delta = h - s.LatestBlockHeight delta = h - s.SyncInfo.LatestBlockHeight
// wait for the time, or abort early // wait for the time, or abort early
if err := waiter(delta); err != nil { if err := waiter(delta); err != nil {
return err return err
@ -65,7 +65,7 @@ func WaitForOneEvent(c EventsClient, evtTyp string, timeout time.Duration) (type
query := types.QueryForEvent(evtTyp) query := types.QueryForEvent(evtTyp)
err := c.Subscribe(ctx, subscriber, query, evts) err := c.Subscribe(ctx, subscriber, query, evts)
if err != nil { if err != nil {
return types.TMEventData{}, errors.Wrap(err, "failed to subscribe") return nil, errors.Wrap(err, "failed to subscribe")
} }
// make sure to unregister after the test is over // make sure to unregister after the test is over
@ -75,6 +75,6 @@ func WaitForOneEvent(c EventsClient, evtTyp string, timeout time.Duration) (type
case evt := <-evts: case evt := <-evts:
return evt.(types.TMEventData), nil return evt.(types.TMEventData), nil
case <-ctx.Done(): case <-ctx.Done():
return types.TMEventData{}, errors.New("timed out waiting for event") return nil, errors.New("timed out waiting for event")
} }
} }

View File

@ -32,7 +32,7 @@ func TestWaitForHeight(t *testing.T) {
// now set current block height to 10 // now set current block height to 10
m.Call = mock.Call{ m.Call = mock.Call{
Response: &ctypes.ResultStatus{LatestBlockHeight: 10}, Response: &ctypes.ResultStatus{SyncInfo: ctypes.SyncInfo{LatestBlockHeight: 10} },
} }
// we will not wait for more than 10 blocks // we will not wait for more than 10 blocks
@ -52,7 +52,7 @@ func TestWaitForHeight(t *testing.T) {
// we use the callback to update the status height // we use the callback to update the status height
myWaiter := func(delta int64) error { myWaiter := func(delta int64) error {
// update the height for the next call // update the height for the next call
m.Call.Response = &ctypes.ResultStatus{LatestBlockHeight: 15} m.Call.Response = &ctypes.ResultStatus{SyncInfo: ctypes.SyncInfo{LatestBlockHeight: 15}}
return client.DefaultWaitStrategy(delta) return client.DefaultWaitStrategy(delta)
} }
@ -66,11 +66,11 @@ func TestWaitForHeight(t *testing.T) {
require.Nil(pre.Error) require.Nil(pre.Error)
prer, ok := pre.Response.(*ctypes.ResultStatus) prer, ok := pre.Response.(*ctypes.ResultStatus)
require.True(ok) require.True(ok)
assert.Equal(int64(10), prer.LatestBlockHeight) assert.Equal(int64(10), prer.SyncInfo.LatestBlockHeight)
post := r.Calls[4] post := r.Calls[4]
require.Nil(post.Error) require.Nil(post.Error)
postr, ok := post.Response.(*ctypes.ResultStatus) postr, ok := post.Response.(*ctypes.ResultStatus)
require.True(ok) require.True(ok)
assert.Equal(int64(15), postr.LatestBlockHeight) assert.Equal(int64(15), postr.SyncInfo.LatestBlockHeight)
} }

View File

@ -2,11 +2,11 @@ package client
import ( import (
"context" "context"
"encoding/json"
"sync" "sync"
"github.com/pkg/errors" "github.com/pkg/errors"
amino "github.com/tendermint/go-amino"
ctypes "github.com/tendermint/tendermint/rpc/core/types" ctypes "github.com/tendermint/tendermint/rpc/core/types"
rpcclient "github.com/tendermint/tendermint/rpc/lib/client" rpcclient "github.com/tendermint/tendermint/rpc/lib/client"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
@ -32,10 +32,14 @@ type HTTP struct {
// New takes a remote endpoint in the form tcp://<host>:<port> // New takes a remote endpoint in the form tcp://<host>:<port>
// and the websocket path (which always seems to be "/websocket") // and the websocket path (which always seems to be "/websocket")
func NewHTTP(remote, wsEndpoint string) *HTTP { func NewHTTP(remote, wsEndpoint string) *HTTP {
rc := rpcclient.NewJSONRPCClient(remote)
cdc := rc.Codec()
ctypes.RegisterAmino(cdc)
return &HTTP{ return &HTTP{
rpc: rpcclient.NewJSONRPCClient(remote), rpc: rc,
remote: remote, remote: remote,
WSEvents: newWSEvents(remote, wsEndpoint), WSEvents: newWSEvents(cdc, remote, wsEndpoint),
} }
} }
@ -217,6 +221,7 @@ func (c *HTTP) Validators(height *int64) (*ctypes.ResultValidators, error) {
type WSEvents struct { type WSEvents struct {
cmn.BaseService cmn.BaseService
cdc *amino.Codec
remote string remote string
endpoint string endpoint string
ws *rpcclient.WSClient ws *rpcclient.WSClient
@ -225,8 +230,9 @@ type WSEvents struct {
subscriptions map[string]chan<- interface{} subscriptions map[string]chan<- interface{}
} }
func newWSEvents(remote, endpoint string) *WSEvents { func newWSEvents(cdc *amino.Codec, remote, endpoint string) *WSEvents {
wsEvents := &WSEvents{ wsEvents := &WSEvents{
cdc: cdc,
endpoint: endpoint, endpoint: endpoint,
remote: remote, remote: remote,
subscriptions: make(map[string]chan<- interface{}), subscriptions: make(map[string]chan<- interface{}),
@ -240,6 +246,8 @@ func (w *WSEvents) OnStart() error {
w.ws = rpcclient.NewWSClient(w.remote, w.endpoint, rpcclient.OnReconnect(func() { w.ws = rpcclient.NewWSClient(w.remote, w.endpoint, rpcclient.OnReconnect(func() {
w.redoSubscriptions() w.redoSubscriptions()
})) }))
w.ws.SetCodec(w.cdc)
err := w.ws.Start() err := w.ws.Start()
if err != nil { if err != nil {
return err return err
@ -335,7 +343,7 @@ func (w *WSEvents) eventListener() {
continue continue
} }
result := new(ctypes.ResultEvent) result := new(ctypes.ResultEvent)
err := json.Unmarshal(resp.Result, result) err := w.cdc.UnmarshalJSON(resp.Result, result)
if err != nil { if err != nil {
w.Logger.Error("failed to unmarshal response", "err", err) w.Logger.Error("failed to unmarshal response", "err", err)
continue continue

View File

@ -17,9 +17,11 @@ func TestStatus(t *testing.T) {
m := &mock.StatusMock{ m := &mock.StatusMock{
Call: mock.Call{ Call: mock.Call{
Response: &ctypes.ResultStatus{ Response: &ctypes.ResultStatus{
LatestBlockHash: cmn.HexBytes("block"), SyncInfo: ctypes.SyncInfo{
LatestAppHash: cmn.HexBytes("app"), LatestBlockHash: cmn.HexBytes("block"),
LatestBlockHeight: 10, LatestAppHash: cmn.HexBytes("app"),
LatestBlockHeight: 10,
},
}}, }},
} }
@ -29,8 +31,8 @@ func TestStatus(t *testing.T) {
// make sure response works proper // make sure response works proper
status, err := r.Status() status, err := r.Status()
require.Nil(err, "%+v", err) require.Nil(err, "%+v", err)
assert.EqualValues("block", status.LatestBlockHash) assert.EqualValues("block", status.SyncInfo.LatestBlockHash)
assert.EqualValues(10, status.LatestBlockHeight) assert.EqualValues(10, status.SyncInfo.LatestBlockHeight)
// make sure recorder works properly // make sure recorder works properly
require.Equal(1, len(r.Calls)) require.Equal(1, len(r.Calls))
@ -41,6 +43,6 @@ func TestStatus(t *testing.T) {
require.NotNil(rs.Response) require.NotNil(rs.Response)
st, ok := rs.Response.(*ctypes.ResultStatus) st, ok := rs.Response.(*ctypes.ResultStatus)
require.True(ok) require.True(ok)
assert.EqualValues("block", st.LatestBlockHash) assert.EqualValues("block", st.SyncInfo.LatestBlockHash)
assert.EqualValues(10, st.LatestBlockHeight) assert.EqualValues(10, st.SyncInfo.LatestBlockHeight)
} }

View File

@ -50,7 +50,7 @@ func TestInfo(t *testing.T) {
info, err := c.ABCIInfo() info, err := c.ABCIInfo()
require.Nil(t, err, "%d: %+v", i, err) require.Nil(t, err, "%d: %+v", i, err)
// TODO: this is not correct - fix merkleeyes! // TODO: this is not correct - fix merkleeyes!
// assert.EqualValues(t, status.LatestBlockHeight, info.Response.LastBlockHeight) // assert.EqualValues(t, status.SyncInfo.LatestBlockHeight, info.Response.LastBlockHeight)
assert.True(t, strings.Contains(info.Response.Data, "size")) assert.True(t, strings.Contains(info.Response.Data, "size"))
} }
} }
@ -136,7 +136,7 @@ func TestAppCalls(t *testing.T) {
s, err := c.Status() s, err := c.Status()
require.Nil(err, "%d: %+v", i, err) require.Nil(err, "%d: %+v", i, err)
// sh is start height or status height // sh is start height or status height
sh := s.LatestBlockHeight sh := s.SyncInfo.LatestBlockHeight
// look for the future // look for the future
h := sh + 2 h := sh + 2

Some files were not shown because too many files have changed in this diff Show More