mirror of
https://github.com/fluencelabs/tendermint
synced 2025-04-24 22:32:15 +00:00
Merge branch 'master' into ancaz/blockchain_reactor_reorg
This commit is contained in:
commit
53b4d15409
@ -14,13 +14,15 @@ program](https://hackerone.com/tendermint).
|
|||||||
- Apps
|
- Apps
|
||||||
|
|
||||||
- Go API
|
- Go API
|
||||||
- [libs] \#3811 Remove `db` from libs in favor of `https://github.com/tendermint/tm-cmn`
|
- [libs] \#3811 Remove `db` from libs in favor of `https://github.com/tendermint/tm-cmn`
|
||||||
|
|
||||||
### FEATURES:
|
### FEATURES:
|
||||||
|
|
||||||
### IMPROVEMENTS:
|
### IMPROVEMENTS:
|
||||||
|
|
||||||
- [abci] \#3809 Recover from application panics in `server/socket_server.go` to allow socket cleanup (@ruseinov)
|
- [abci] \#3809 Recover from application panics in `server/socket_server.go` to allow socket cleanup (@ruseinov)
|
||||||
|
- [rpc] \#2252 Add `/broadcast_evidence` endpoint to submit double signing and other types of evidence
|
||||||
- [rpc] \#3818 Make `max_body_bytes` and `max_header_bytes` configurable
|
- [rpc] \#3818 Make `max_body_bytes` and `max_header_bytes` configurable
|
||||||
|
- [p2p] \#3664 p2p/conn: reuse buffer when write/read from secret connection
|
||||||
|
|
||||||
### BUG FIXES:
|
### BUG FIXES:
|
||||||
|
@ -115,6 +115,7 @@ func (app *KVStoreApplication) Commit() types.ResponseCommit {
|
|||||||
return types.ResponseCommit{Data: appHash}
|
return types.ResponseCommit{Data: appHash}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns an associated value or nil if missing.
|
||||||
func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
||||||
if reqQuery.Prove {
|
if reqQuery.Prove {
|
||||||
value := app.state.db.Get(prefixKey(reqQuery.Data))
|
value := app.state.db.Get(prefixKey(reqQuery.Data))
|
||||||
|
@ -9,7 +9,9 @@ import (
|
|||||||
|
|
||||||
"github.com/tendermint/tendermint/abci/example/code"
|
"github.com/tendermint/tendermint/abci/example/code"
|
||||||
"github.com/tendermint/tendermint/abci/types"
|
"github.com/tendermint/tendermint/abci/types"
|
||||||
|
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||||
"github.com/tendermint/tendermint/libs/log"
|
"github.com/tendermint/tendermint/libs/log"
|
||||||
|
tmtypes "github.com/tendermint/tendermint/types"
|
||||||
dbm "github.com/tendermint/tm-cmn/db"
|
dbm "github.com/tendermint/tm-cmn/db"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -27,6 +29,8 @@ type PersistentKVStoreApplication struct {
|
|||||||
// validator set
|
// validator set
|
||||||
ValUpdates []types.ValidatorUpdate
|
ValUpdates []types.ValidatorUpdate
|
||||||
|
|
||||||
|
valAddrToPubKeyMap map[string]types.PubKey
|
||||||
|
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -40,8 +44,9 @@ func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication
|
|||||||
state := loadState(db)
|
state := loadState(db)
|
||||||
|
|
||||||
return &PersistentKVStoreApplication{
|
return &PersistentKVStoreApplication{
|
||||||
app: &KVStoreApplication{state: state},
|
app: &KVStoreApplication{state: state},
|
||||||
logger: log.NewNopLogger(),
|
valAddrToPubKeyMap: make(map[string]types.PubKey),
|
||||||
|
logger: log.NewNopLogger(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -83,8 +88,20 @@ func (app *PersistentKVStoreApplication) Commit() types.ResponseCommit {
|
|||||||
return app.app.Commit()
|
return app.app.Commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) types.ResponseQuery {
|
// When path=/val and data={validator address}, returns the validator update (types.ValidatorUpdate) varint encoded.
|
||||||
return app.app.Query(reqQuery)
|
// For any other path, returns an associated value or nil if missing.
|
||||||
|
func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
||||||
|
switch reqQuery.Path {
|
||||||
|
case "/val":
|
||||||
|
key := []byte("val:" + string(reqQuery.Data))
|
||||||
|
value := app.app.state.db.Get(key)
|
||||||
|
|
||||||
|
resQuery.Key = reqQuery.Data
|
||||||
|
resQuery.Value = value
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
return app.app.Query(reqQuery)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save the validators in the merkle tree
|
// Save the validators in the merkle tree
|
||||||
@ -102,6 +119,20 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t
|
|||||||
func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||||
// reset valset changes
|
// reset valset changes
|
||||||
app.ValUpdates = make([]types.ValidatorUpdate, 0)
|
app.ValUpdates = make([]types.ValidatorUpdate, 0)
|
||||||
|
|
||||||
|
for _, ev := range req.ByzantineValidators {
|
||||||
|
switch ev.Type {
|
||||||
|
case tmtypes.ABCIEvidenceTypeDuplicateVote:
|
||||||
|
// decrease voting power by 1
|
||||||
|
if ev.TotalVotingPower == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
app.updateValidator(types.ValidatorUpdate{
|
||||||
|
PubKey: app.valAddrToPubKeyMap[string(ev.Validator.Address)],
|
||||||
|
Power: ev.TotalVotingPower - 1,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
return types.ResponseBeginBlock{}
|
return types.ResponseBeginBlock{}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -174,6 +205,10 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon
|
|||||||
// add, update, or remove a validator
|
// add, update, or remove a validator
|
||||||
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx {
|
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx {
|
||||||
key := []byte("val:" + string(v.PubKey.Data))
|
key := []byte("val:" + string(v.PubKey.Data))
|
||||||
|
|
||||||
|
pubkey := ed25519.PubKeyEd25519{}
|
||||||
|
copy(pubkey[:], v.PubKey.Data)
|
||||||
|
|
||||||
if v.Power == 0 {
|
if v.Power == 0 {
|
||||||
// remove validator
|
// remove validator
|
||||||
if !app.app.state.db.Has(key) {
|
if !app.app.state.db.Has(key) {
|
||||||
@ -183,6 +218,7 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate
|
|||||||
Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)}
|
Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)}
|
||||||
}
|
}
|
||||||
app.app.state.db.Delete(key)
|
app.app.state.db.Delete(key)
|
||||||
|
delete(app.valAddrToPubKeyMap, string(pubkey.Address()))
|
||||||
} else {
|
} else {
|
||||||
// add or update validator
|
// add or update validator
|
||||||
value := bytes.NewBuffer(make([]byte, 0))
|
value := bytes.NewBuffer(make([]byte, 0))
|
||||||
@ -192,6 +228,7 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate
|
|||||||
Log: fmt.Sprintf("Error encoding validator: %v", err)}
|
Log: fmt.Sprintf("Error encoding validator: %v", err)}
|
||||||
}
|
}
|
||||||
app.app.state.db.Set(key, value.Bytes())
|
app.app.state.db.Set(key, value.Bytes())
|
||||||
|
app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey
|
||||||
}
|
}
|
||||||
|
|
||||||
// we only update the changes array if we successfully updated the tree
|
// we only update the changes array if we successfully updated the tree
|
||||||
|
1
go.mod
1
go.mod
@ -18,6 +18,7 @@ require (
|
|||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 // indirect
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 // indirect
|
||||||
|
github.com/libp2p/go-buffer-pool v0.0.1
|
||||||
github.com/magiconair/properties v1.8.0
|
github.com/magiconair/properties v1.8.0
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.1.2 // indirect
|
github.com/mitchellh/mapstructure v1.1.2 // indirect
|
||||||
|
2
go.sum
2
go.sum
@ -63,6 +63,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
|
|||||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
|
github.com/libp2p/go-buffer-pool v0.0.1 h1:9Rrn/H46cXjaA2HQ5Y8lyhOS1NhTkZ4yuEs2r3Eechg=
|
||||||
|
github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ=
|
||||||
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
|
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
|
||||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||||
|
@ -2,6 +2,7 @@ package conn
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"crypto/cipher"
|
||||||
crand "crypto/rand"
|
crand "crypto/rand"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"crypto/subtle"
|
"crypto/subtle"
|
||||||
@ -17,6 +18,7 @@ import (
|
|||||||
"golang.org/x/crypto/curve25519"
|
"golang.org/x/crypto/curve25519"
|
||||||
"golang.org/x/crypto/nacl/box"
|
"golang.org/x/crypto/nacl/box"
|
||||||
|
|
||||||
|
pool "github.com/libp2p/go-buffer-pool"
|
||||||
"github.com/tendermint/tendermint/crypto"
|
"github.com/tendermint/tendermint/crypto"
|
||||||
cmn "github.com/tendermint/tendermint/libs/common"
|
cmn "github.com/tendermint/tendermint/libs/common"
|
||||||
"golang.org/x/crypto/hkdf"
|
"golang.org/x/crypto/hkdf"
|
||||||
@ -47,10 +49,11 @@ var (
|
|||||||
type SecretConnection struct {
|
type SecretConnection struct {
|
||||||
|
|
||||||
// immutable
|
// immutable
|
||||||
recvSecret *[aeadKeySize]byte
|
recvAead cipher.AEAD
|
||||||
sendSecret *[aeadKeySize]byte
|
sendAead cipher.AEAD
|
||||||
remPubKey crypto.PubKey
|
|
||||||
conn io.ReadWriteCloser
|
remPubKey crypto.PubKey
|
||||||
|
conn io.ReadWriteCloser
|
||||||
|
|
||||||
// net.Conn must be thread safe:
|
// net.Conn must be thread safe:
|
||||||
// https://golang.org/pkg/net/#Conn.
|
// https://golang.org/pkg/net/#Conn.
|
||||||
@ -102,14 +105,22 @@ func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (*
|
|||||||
// generate the secret used for receiving, sending, challenge via hkdf-sha2 on dhSecret
|
// generate the secret used for receiving, sending, challenge via hkdf-sha2 on dhSecret
|
||||||
recvSecret, sendSecret, challenge := deriveSecretAndChallenge(dhSecret, locIsLeast)
|
recvSecret, sendSecret, challenge := deriveSecretAndChallenge(dhSecret, locIsLeast)
|
||||||
|
|
||||||
|
sendAead, err := chacha20poly1305.New(sendSecret[:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.New("Invalid send SecretConnection Key")
|
||||||
|
}
|
||||||
|
recvAead, err := chacha20poly1305.New(recvSecret[:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.New("Invalid receive SecretConnection Key")
|
||||||
|
}
|
||||||
// Construct SecretConnection.
|
// Construct SecretConnection.
|
||||||
sc := &SecretConnection{
|
sc := &SecretConnection{
|
||||||
conn: conn,
|
conn: conn,
|
||||||
recvBuffer: nil,
|
recvBuffer: nil,
|
||||||
recvNonce: new([aeadNonceSize]byte),
|
recvNonce: new([aeadNonceSize]byte),
|
||||||
sendNonce: new([aeadNonceSize]byte),
|
sendNonce: new([aeadNonceSize]byte),
|
||||||
recvSecret: recvSecret,
|
recvAead: recvAead,
|
||||||
sendSecret: sendSecret,
|
sendAead: sendAead,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sign the challenge bytes for authentication.
|
// Sign the challenge bytes for authentication.
|
||||||
@ -143,35 +154,39 @@ func (sc *SecretConnection) Write(data []byte) (n int, err error) {
|
|||||||
defer sc.sendMtx.Unlock()
|
defer sc.sendMtx.Unlock()
|
||||||
|
|
||||||
for 0 < len(data) {
|
for 0 < len(data) {
|
||||||
var frame = make([]byte, totalFrameSize)
|
if err := func() error {
|
||||||
var chunk []byte
|
var sealedFrame = pool.Get(aeadSizeOverhead + totalFrameSize)
|
||||||
if dataMaxSize < len(data) {
|
var frame = pool.Get(totalFrameSize)
|
||||||
chunk = data[:dataMaxSize]
|
defer func() {
|
||||||
data = data[dataMaxSize:]
|
pool.Put(sealedFrame)
|
||||||
} else {
|
pool.Put(frame)
|
||||||
chunk = data
|
}()
|
||||||
data = nil
|
var chunk []byte
|
||||||
}
|
if dataMaxSize < len(data) {
|
||||||
chunkLength := len(chunk)
|
chunk = data[:dataMaxSize]
|
||||||
binary.LittleEndian.PutUint32(frame, uint32(chunkLength))
|
data = data[dataMaxSize:]
|
||||||
copy(frame[dataLenSize:], chunk)
|
} else {
|
||||||
|
chunk = data
|
||||||
|
data = nil
|
||||||
|
}
|
||||||
|
chunkLength := len(chunk)
|
||||||
|
binary.LittleEndian.PutUint32(frame, uint32(chunkLength))
|
||||||
|
copy(frame[dataLenSize:], chunk)
|
||||||
|
|
||||||
aead, err := chacha20poly1305.New(sc.sendSecret[:])
|
// encrypt the frame
|
||||||
if err != nil {
|
sc.sendAead.Seal(sealedFrame[:0], sc.sendNonce[:], frame, nil)
|
||||||
return n, errors.New("Invalid SecretConnection Key")
|
incrNonce(sc.sendNonce)
|
||||||
}
|
// end encryption
|
||||||
|
|
||||||
// encrypt the frame
|
_, err = sc.conn.Write(sealedFrame)
|
||||||
var sealedFrame = make([]byte, aeadSizeOverhead+totalFrameSize)
|
if err != nil {
|
||||||
aead.Seal(sealedFrame[:0], sc.sendNonce[:], frame, nil)
|
return err
|
||||||
incrNonce(sc.sendNonce)
|
}
|
||||||
// end encryption
|
n += len(chunk)
|
||||||
|
return nil
|
||||||
_, err = sc.conn.Write(sealedFrame)
|
}(); err != nil {
|
||||||
if err != nil {
|
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
n += len(chunk)
|
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -189,21 +204,18 @@ func (sc *SecretConnection) Read(data []byte) (n int, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// read off the conn
|
// read off the conn
|
||||||
sealedFrame := make([]byte, totalFrameSize+aeadSizeOverhead)
|
var sealedFrame = pool.Get(aeadSizeOverhead + totalFrameSize)
|
||||||
|
defer pool.Put(sealedFrame)
|
||||||
_, err = io.ReadFull(sc.conn, sealedFrame)
|
_, err = io.ReadFull(sc.conn, sealedFrame)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
aead, err := chacha20poly1305.New(sc.recvSecret[:])
|
|
||||||
if err != nil {
|
|
||||||
return n, errors.New("Invalid SecretConnection Key")
|
|
||||||
}
|
|
||||||
|
|
||||||
// decrypt the frame.
|
// decrypt the frame.
|
||||||
// reads and updates the sc.recvNonce
|
// reads and updates the sc.recvNonce
|
||||||
var frame = make([]byte, totalFrameSize)
|
var frame = pool.Get(totalFrameSize)
|
||||||
_, err = aead.Open(frame[:0], sc.recvNonce[:], sealedFrame, nil)
|
defer pool.Put(frame)
|
||||||
|
_, err = sc.recvAead.Open(frame[:0], sc.recvNonce[:], sealedFrame, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return n, errors.New("Failed to decrypt SecretConnection")
|
return n, errors.New("Failed to decrypt SecretConnection")
|
||||||
}
|
}
|
||||||
@ -218,7 +230,10 @@ func (sc *SecretConnection) Read(data []byte) (n int, err error) {
|
|||||||
}
|
}
|
||||||
var chunk = frame[dataLenSize : dataLenSize+chunkLength]
|
var chunk = frame[dataLenSize : dataLenSize+chunkLength]
|
||||||
n = copy(data, chunk)
|
n = copy(data, chunk)
|
||||||
sc.recvBuffer = chunk[n:]
|
if n < len(chunk) {
|
||||||
|
sc.recvBuffer = make([]byte, len(chunk)-n)
|
||||||
|
copy(sc.recvBuffer, chunk[n:])
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -383,10 +383,23 @@ func createGoldenTestVectors(t *testing.T) string {
|
|||||||
return data
|
return data
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkSecretConnection(b *testing.B) {
|
func BenchmarkWriteSecretConnection(b *testing.B) {
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
|
b.ReportAllocs()
|
||||||
fooSecConn, barSecConn := makeSecretConnPair(b)
|
fooSecConn, barSecConn := makeSecretConnPair(b)
|
||||||
fooWriteText := cmn.RandStr(dataMaxSize)
|
randomMsgSizes := []int{
|
||||||
|
dataMaxSize / 10,
|
||||||
|
dataMaxSize / 3,
|
||||||
|
dataMaxSize / 2,
|
||||||
|
dataMaxSize,
|
||||||
|
dataMaxSize * 3 / 2,
|
||||||
|
dataMaxSize * 2,
|
||||||
|
dataMaxSize * 7 / 2,
|
||||||
|
}
|
||||||
|
fooWriteBytes := make([][]byte, 0, len(randomMsgSizes))
|
||||||
|
for _, size := range randomMsgSizes {
|
||||||
|
fooWriteBytes = append(fooWriteBytes, cmn.RandBytes(size))
|
||||||
|
}
|
||||||
// Consume reads from bar's reader
|
// Consume reads from bar's reader
|
||||||
go func() {
|
go func() {
|
||||||
readBuffer := make([]byte, dataMaxSize)
|
readBuffer := make([]byte, dataMaxSize)
|
||||||
@ -402,7 +415,8 @@ func BenchmarkSecretConnection(b *testing.B) {
|
|||||||
|
|
||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
_, err := fooSecConn.Write([]byte(fooWriteText))
|
idx := cmn.RandIntn(len(fooWriteBytes))
|
||||||
|
_, err := fooSecConn.Write(fooWriteBytes[idx])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("Failed to write to fooSecConn: %v", err)
|
b.Fatalf("Failed to write to fooSecConn: %v", err)
|
||||||
}
|
}
|
||||||
@ -414,3 +428,44 @@ func BenchmarkSecretConnection(b *testing.B) {
|
|||||||
}
|
}
|
||||||
//barSecConn.Close() race condition
|
//barSecConn.Close() race condition
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func BenchmarkReadSecretConnection(b *testing.B) {
|
||||||
|
b.StopTimer()
|
||||||
|
b.ReportAllocs()
|
||||||
|
fooSecConn, barSecConn := makeSecretConnPair(b)
|
||||||
|
randomMsgSizes := []int{
|
||||||
|
dataMaxSize / 10,
|
||||||
|
dataMaxSize / 3,
|
||||||
|
dataMaxSize / 2,
|
||||||
|
dataMaxSize,
|
||||||
|
dataMaxSize * 3 / 2,
|
||||||
|
dataMaxSize * 2,
|
||||||
|
dataMaxSize * 7 / 2,
|
||||||
|
}
|
||||||
|
fooWriteBytes := make([][]byte, 0, len(randomMsgSizes))
|
||||||
|
for _, size := range randomMsgSizes {
|
||||||
|
fooWriteBytes = append(fooWriteBytes, cmn.RandBytes(size))
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
idx := cmn.RandIntn(len(fooWriteBytes))
|
||||||
|
_, err := fooSecConn.Write(fooWriteBytes[idx])
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("Failed to write to fooSecConn: %v, %v,%v", err, i, b.N)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
b.StartTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
readBuffer := make([]byte, dataMaxSize)
|
||||||
|
_, err := barSecConn.Read(readBuffer)
|
||||||
|
|
||||||
|
if err == io.EOF {
|
||||||
|
return
|
||||||
|
} else if err != nil {
|
||||||
|
b.Fatalf("Failed to read from barSecConn: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.StopTimer()
|
||||||
|
}
|
||||||
|
12
rpc/client/amino.go
Normal file
12
rpc/client/amino.go
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
amino "github.com/tendermint/go-amino"
|
||||||
|
"github.com/tendermint/tendermint/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
var cdc = amino.NewCodec()
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
types.RegisterEvidences(cdc)
|
||||||
|
}
|
@ -333,6 +333,15 @@ func (c *baseRPCClient) Validators(height *int64) (*ctypes.ResultValidators, err
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *baseRPCClient) BroadcastEvidence(ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) {
|
||||||
|
result := new(ctypes.ResultBroadcastEvidence)
|
||||||
|
_, err := c.caller.Call("broadcast_evidence", map[string]interface{}{"evidence": ev}, result)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "BroadcastEvidence")
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
//-----------------------------------------------------------------------------
|
//-----------------------------------------------------------------------------
|
||||||
// WSEvents
|
// WSEvents
|
||||||
|
|
||||||
|
@ -28,9 +28,24 @@ import (
|
|||||||
"github.com/tendermint/tendermint/types"
|
"github.com/tendermint/tendermint/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ABCIClient groups together the functionality that principally
|
// Client wraps most important rpc calls a client would make if you want to
|
||||||
// affects the ABCI app. In many cases this will be all we want,
|
// listen for events, test if it also implements events.EventSwitch.
|
||||||
// so we can accept an interface which is easier to mock
|
type Client interface {
|
||||||
|
cmn.Service
|
||||||
|
ABCIClient
|
||||||
|
EventsClient
|
||||||
|
HistoryClient
|
||||||
|
NetworkClient
|
||||||
|
SignClient
|
||||||
|
StatusClient
|
||||||
|
EvidenceClient
|
||||||
|
}
|
||||||
|
|
||||||
|
// ABCIClient groups together the functionality that principally affects the
|
||||||
|
// ABCI app.
|
||||||
|
//
|
||||||
|
// In many cases this will be all we want, so we can accept an interface which
|
||||||
|
// is easier to mock.
|
||||||
type ABCIClient interface {
|
type ABCIClient interface {
|
||||||
// Reading from abci app
|
// Reading from abci app
|
||||||
ABCIInfo() (*ctypes.ResultABCIInfo, error)
|
ABCIInfo() (*ctypes.ResultABCIInfo, error)
|
||||||
@ -44,8 +59,8 @@ type ABCIClient interface {
|
|||||||
BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error)
|
BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SignClient groups together the interfaces need to get valid
|
// SignClient groups together the functionality needed to get valid signatures
|
||||||
// signatures and prove anything about the chain
|
// and prove anything about the chain.
|
||||||
type SignClient interface {
|
type SignClient interface {
|
||||||
Block(height *int64) (*ctypes.ResultBlock, error)
|
Block(height *int64) (*ctypes.ResultBlock, error)
|
||||||
BlockResults(height *int64) (*ctypes.ResultBlockResults, error)
|
BlockResults(height *int64) (*ctypes.ResultBlockResults, error)
|
||||||
@ -55,32 +70,19 @@ type SignClient interface {
|
|||||||
TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error)
|
TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// HistoryClient shows us data from genesis to now in large chunks.
|
// HistoryClient provides access to data from genesis to now in large chunks.
|
||||||
type HistoryClient interface {
|
type HistoryClient interface {
|
||||||
Genesis() (*ctypes.ResultGenesis, error)
|
Genesis() (*ctypes.ResultGenesis, error)
|
||||||
BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error)
|
BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StatusClient provides access to general chain info.
|
||||||
type StatusClient interface {
|
type StatusClient interface {
|
||||||
// General chain info
|
|
||||||
Status() (*ctypes.ResultStatus, error)
|
Status() (*ctypes.ResultStatus, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Client wraps most important rpc calls a client would make
|
// NetworkClient is general info about the network state. May not be needed
|
||||||
// if you want to listen for events, test if it also
|
// usually.
|
||||||
// implements events.EventSwitch
|
|
||||||
type Client interface {
|
|
||||||
cmn.Service
|
|
||||||
ABCIClient
|
|
||||||
EventsClient
|
|
||||||
HistoryClient
|
|
||||||
NetworkClient
|
|
||||||
SignClient
|
|
||||||
StatusClient
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkClient is general info about the network state. May not
|
|
||||||
// be needed usually.
|
|
||||||
type NetworkClient interface {
|
type NetworkClient interface {
|
||||||
NetInfo() (*ctypes.ResultNetInfo, error)
|
NetInfo() (*ctypes.ResultNetInfo, error)
|
||||||
DumpConsensusState() (*ctypes.ResultDumpConsensusState, error)
|
DumpConsensusState() (*ctypes.ResultDumpConsensusState, error)
|
||||||
@ -110,3 +112,9 @@ type MempoolClient interface {
|
|||||||
UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error)
|
UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error)
|
||||||
NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error)
|
NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EvidenceClient is used for submitting an evidence of the malicious
|
||||||
|
// behaviour.
|
||||||
|
type EvidenceClient interface {
|
||||||
|
BroadcastEvidence(ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error)
|
||||||
|
}
|
||||||
|
@ -157,6 +157,10 @@ func (c *Local) TxSearch(query string, prove bool, page, perPage int) (*ctypes.R
|
|||||||
return core.TxSearch(c.ctx, query, prove, page, perPage)
|
return core.TxSearch(c.ctx, query, prove, page, perPage)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Local) BroadcastEvidence(ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) {
|
||||||
|
return core.BroadcastEvidence(c.ctx, ev)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Local) Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) {
|
func (c *Local) Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) {
|
||||||
q, err := tmquery.New(query)
|
q, err := tmquery.New(query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package client_test
|
package client_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -13,7 +14,11 @@ var node *nm.Node
|
|||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
// start a tendermint node (and kvstore) in the background to test against
|
// start a tendermint node (and kvstore) in the background to test against
|
||||||
app := kvstore.NewKVStoreApplication()
|
dir, err := ioutil.TempDir("/tmp", "rpc-client-test")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
app := kvstore.NewPersistentKVStoreApplication(dir)
|
||||||
node = rpctest.StartTendermint(app)
|
node = rpctest.StartTendermint(app)
|
||||||
|
|
||||||
code := m.Run()
|
code := m.Run()
|
||||||
|
@ -36,6 +36,7 @@ type Client struct {
|
|||||||
client.HistoryClient
|
client.HistoryClient
|
||||||
client.StatusClient
|
client.StatusClient
|
||||||
client.EventsClient
|
client.EventsClient
|
||||||
|
client.EvidenceClient
|
||||||
cmn.Service
|
cmn.Service
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -147,3 +148,7 @@ func (c Client) Commit(height *int64) (*ctypes.ResultCommit, error) {
|
|||||||
func (c Client) Validators(height *int64) (*ctypes.ResultValidators, error) {
|
func (c Client) Validators(height *int64) (*ctypes.ResultValidators, error) {
|
||||||
return core.Validators(&rpctypes.Context{}, height)
|
return core.Validators(&rpctypes.Context{}, height)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c Client) BroadcastEvidence(ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) {
|
||||||
|
return core.BroadcastEvidence(&rpctypes.Context{}, ev)
|
||||||
|
}
|
||||||
|
@ -1,7 +1,9 @@
|
|||||||
package client_test
|
package client_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@ -12,7 +14,10 @@ import (
|
|||||||
|
|
||||||
abci "github.com/tendermint/tendermint/abci/types"
|
abci "github.com/tendermint/tendermint/abci/types"
|
||||||
|
|
||||||
|
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||||
|
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||||
cmn "github.com/tendermint/tendermint/libs/common"
|
cmn "github.com/tendermint/tendermint/libs/common"
|
||||||
|
"github.com/tendermint/tendermint/privval"
|
||||||
"github.com/tendermint/tendermint/rpc/client"
|
"github.com/tendermint/tendermint/rpc/client"
|
||||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||||
rpctest "github.com/tendermint/tendermint/rpc/test"
|
rpctest "github.com/tendermint/tendermint/rpc/test"
|
||||||
@ -446,6 +451,145 @@ func TestTxSearch(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func deepcpVote(vote *types.Vote) (res *types.Vote) {
|
||||||
|
res = &types.Vote{
|
||||||
|
ValidatorAddress: make([]byte, len(vote.ValidatorAddress)),
|
||||||
|
ValidatorIndex: vote.ValidatorIndex,
|
||||||
|
Height: vote.Height,
|
||||||
|
Round: vote.Round,
|
||||||
|
Type: vote.Type,
|
||||||
|
BlockID: types.BlockID{
|
||||||
|
Hash: make([]byte, len(vote.BlockID.Hash)),
|
||||||
|
PartsHeader: vote.BlockID.PartsHeader,
|
||||||
|
},
|
||||||
|
Signature: make([]byte, len(vote.Signature)),
|
||||||
|
}
|
||||||
|
copy(res.ValidatorAddress, vote.ValidatorAddress)
|
||||||
|
copy(res.BlockID.Hash, vote.BlockID.Hash)
|
||||||
|
copy(res.Signature, vote.Signature)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func newEvidence(t *testing.T, val *privval.FilePV, vote *types.Vote, vote2 *types.Vote, chainID string) types.DuplicateVoteEvidence {
|
||||||
|
var err error
|
||||||
|
vote2_ := deepcpVote(vote2)
|
||||||
|
vote2_.Signature, err = val.Key.PrivKey.Sign(vote2_.SignBytes(chainID))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return types.DuplicateVoteEvidence{
|
||||||
|
PubKey: val.Key.PubKey,
|
||||||
|
VoteA: vote,
|
||||||
|
VoteB: vote2_,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeEvidences(t *testing.T, val *privval.FilePV, chainID string) (ev types.DuplicateVoteEvidence, fakes []types.DuplicateVoteEvidence) {
|
||||||
|
vote := &types.Vote{
|
||||||
|
ValidatorAddress: val.Key.Address,
|
||||||
|
ValidatorIndex: 0,
|
||||||
|
Height: 1,
|
||||||
|
Round: 0,
|
||||||
|
Type: types.PrevoteType,
|
||||||
|
BlockID: types.BlockID{
|
||||||
|
Hash: tmhash.Sum([]byte("blockhash")),
|
||||||
|
PartsHeader: types.PartSetHeader{
|
||||||
|
Total: 1000,
|
||||||
|
Hash: tmhash.Sum([]byte("partset")),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
vote.Signature, err = val.Key.PrivKey.Sign(vote.SignBytes(chainID))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
vote2 := deepcpVote(vote)
|
||||||
|
vote2.BlockID.Hash = tmhash.Sum([]byte("blockhash2"))
|
||||||
|
|
||||||
|
ev = newEvidence(t, val, vote, vote2, chainID)
|
||||||
|
|
||||||
|
fakes = make([]types.DuplicateVoteEvidence, 42)
|
||||||
|
|
||||||
|
// different address
|
||||||
|
vote2 = deepcpVote(vote)
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
rand.Read(vote2.ValidatorAddress) // nolint: gosec
|
||||||
|
fakes[i] = newEvidence(t, val, vote, vote2, chainID)
|
||||||
|
}
|
||||||
|
// different index
|
||||||
|
vote2 = deepcpVote(vote)
|
||||||
|
for i := 10; i < 20; i++ {
|
||||||
|
vote2.ValidatorIndex = rand.Int()%100 + 1 // nolint: gosec
|
||||||
|
fakes[i] = newEvidence(t, val, vote, vote2, chainID)
|
||||||
|
}
|
||||||
|
// different height
|
||||||
|
vote2 = deepcpVote(vote)
|
||||||
|
for i := 20; i < 30; i++ {
|
||||||
|
vote2.Height = rand.Int63()%1000 + 100 // nolint: gosec
|
||||||
|
fakes[i] = newEvidence(t, val, vote, vote2, chainID)
|
||||||
|
}
|
||||||
|
// different round
|
||||||
|
vote2 = deepcpVote(vote)
|
||||||
|
for i := 30; i < 40; i++ {
|
||||||
|
vote2.Round = rand.Int()%10 + 1 // nolint: gosec
|
||||||
|
fakes[i] = newEvidence(t, val, vote, vote2, chainID)
|
||||||
|
}
|
||||||
|
// different type
|
||||||
|
vote2 = deepcpVote(vote)
|
||||||
|
vote2.Type = types.PrecommitType
|
||||||
|
fakes[40] = newEvidence(t, val, vote, vote2, chainID)
|
||||||
|
// exactly same vote
|
||||||
|
vote2 = deepcpVote(vote)
|
||||||
|
fakes[41] = newEvidence(t, val, vote, vote2, chainID)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBroadcastEvidenceDuplicateVote(t *testing.T) {
|
||||||
|
config := rpctest.GetConfig()
|
||||||
|
chainID := config.ChainID()
|
||||||
|
pvKeyFile := config.PrivValidatorKeyFile()
|
||||||
|
pvKeyStateFile := config.PrivValidatorStateFile()
|
||||||
|
pv := privval.LoadOrGenFilePV(pvKeyFile, pvKeyStateFile)
|
||||||
|
|
||||||
|
ev, fakes := makeEvidences(t, pv, chainID)
|
||||||
|
|
||||||
|
t.Logf("evidence %v", ev)
|
||||||
|
|
||||||
|
for i, c := range GetClients() {
|
||||||
|
t.Logf("client %d", i)
|
||||||
|
|
||||||
|
result, err := c.BroadcastEvidence(&types.DuplicateVoteEvidence{PubKey: ev.PubKey, VoteA: ev.VoteA, VoteB: ev.VoteB})
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, ev.Hash(), result.Hash, "Invalid response, result %+v", result)
|
||||||
|
|
||||||
|
status, err := c.Status()
|
||||||
|
require.NoError(t, err)
|
||||||
|
client.WaitForHeight(c, status.SyncInfo.LatestBlockHeight+2, nil)
|
||||||
|
|
||||||
|
ed25519pub := ev.PubKey.(ed25519.PubKeyEd25519)
|
||||||
|
rawpub := ed25519pub[:]
|
||||||
|
result2, err := c.ABCIQuery("/val", rawpub)
|
||||||
|
require.Nil(t, err, "Error querying evidence, err %v", err)
|
||||||
|
qres := result2.Response
|
||||||
|
require.True(t, qres.IsOK(), "Response not OK")
|
||||||
|
|
||||||
|
var v abci.ValidatorUpdate
|
||||||
|
err = abci.ReadMessage(bytes.NewReader(qres.Value), &v)
|
||||||
|
require.NoError(t, err, "Error reading query result, value %v", qres.Value)
|
||||||
|
|
||||||
|
require.EqualValues(t, rawpub, v.PubKey.Data, "Stored PubKey not equal with expected, value %v", string(qres.Value))
|
||||||
|
require.Equal(t, int64(9), v.Power, "Stored Power not equal with expected, value %v", string(qres.Value))
|
||||||
|
|
||||||
|
for _, fake := range fakes {
|
||||||
|
_, err := c.BroadcastEvidence(&types.DuplicateVoteEvidence{
|
||||||
|
PubKey: fake.PubKey,
|
||||||
|
VoteA: fake.VoteA,
|
||||||
|
VoteB: fake.VoteB})
|
||||||
|
require.Error(t, err, "Broadcasting fake evidence succeed: %s", fake.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestBatchedJSONRPCCalls(t *testing.T) {
|
func TestBatchedJSONRPCCalls(t *testing.T) {
|
||||||
c := getHTTPClient()
|
c := getHTTPClient()
|
||||||
testBatchedJSONRPCCalls(t, c)
|
testBatchedJSONRPCCalls(t, c)
|
||||||
|
39
rpc/core/evidence.go
Normal file
39
rpc/core/evidence.go
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||||
|
rpctypes "github.com/tendermint/tendermint/rpc/lib/types"
|
||||||
|
"github.com/tendermint/tendermint/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Broadcast evidence of the misbehavior.
|
||||||
|
//
|
||||||
|
// ```shell
|
||||||
|
// curl 'localhost:26657/broadcast_evidence?evidence={amino-encoded DuplicateVoteEvidence}'
|
||||||
|
// ```
|
||||||
|
//
|
||||||
|
// ```go
|
||||||
|
// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket")
|
||||||
|
// err := client.Start()
|
||||||
|
// if err != nil {
|
||||||
|
// // handle error
|
||||||
|
// }
|
||||||
|
// defer client.Stop()
|
||||||
|
// res, err := client.BroadcastEvidence(&types.DuplicateVoteEvidence{PubKey: ev.PubKey, VoteA: ev.VoteA, VoteB: ev.VoteB})
|
||||||
|
// ```
|
||||||
|
//
|
||||||
|
// > The above command returns JSON structured like this:
|
||||||
|
//
|
||||||
|
// ```json
|
||||||
|
// ```
|
||||||
|
//
|
||||||
|
// | Parameter | Type | Default | Required | Description |
|
||||||
|
// |-----------+----------------+---------+----------+-----------------------------|
|
||||||
|
// | evidence | types.Evidence | nil | true | Amino-encoded JSON evidence |
|
||||||
|
func BroadcastEvidence(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) {
|
||||||
|
err := evidencePool.AddEvidence(ev)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &ctypes.ResultBroadcastEvidence{Hash: ev.Hash()}, nil
|
||||||
|
}
|
@ -30,7 +30,7 @@ var Routes = map[string]*rpc.RPCFunc{
|
|||||||
"unconfirmed_txs": rpc.NewRPCFunc(UnconfirmedTxs, "limit"),
|
"unconfirmed_txs": rpc.NewRPCFunc(UnconfirmedTxs, "limit"),
|
||||||
"num_unconfirmed_txs": rpc.NewRPCFunc(NumUnconfirmedTxs, ""),
|
"num_unconfirmed_txs": rpc.NewRPCFunc(NumUnconfirmedTxs, ""),
|
||||||
|
|
||||||
// broadcast API
|
// tx broadcast API
|
||||||
"broadcast_tx_commit": rpc.NewRPCFunc(BroadcastTxCommit, "tx"),
|
"broadcast_tx_commit": rpc.NewRPCFunc(BroadcastTxCommit, "tx"),
|
||||||
"broadcast_tx_sync": rpc.NewRPCFunc(BroadcastTxSync, "tx"),
|
"broadcast_tx_sync": rpc.NewRPCFunc(BroadcastTxSync, "tx"),
|
||||||
"broadcast_tx_async": rpc.NewRPCFunc(BroadcastTxAsync, "tx"),
|
"broadcast_tx_async": rpc.NewRPCFunc(BroadcastTxAsync, "tx"),
|
||||||
@ -38,6 +38,9 @@ var Routes = map[string]*rpc.RPCFunc{
|
|||||||
// abci API
|
// abci API
|
||||||
"abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,height,prove"),
|
"abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,height,prove"),
|
||||||
"abci_info": rpc.NewRPCFunc(ABCIInfo, ""),
|
"abci_info": rpc.NewRPCFunc(ABCIInfo, ""),
|
||||||
|
|
||||||
|
// evidence API
|
||||||
|
"broadcast_evidence": rpc.NewRPCFunc(BroadcastEvidence, "evidence"),
|
||||||
}
|
}
|
||||||
|
|
||||||
func AddUnsafeRoutes() {
|
func AddUnsafeRoutes() {
|
||||||
|
@ -194,6 +194,11 @@ type ResultABCIQuery struct {
|
|||||||
Response abci.ResponseQuery `json:"response"`
|
Response abci.ResponseQuery `json:"response"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Result of broadcasting evidence
|
||||||
|
type ResultBroadcastEvidence struct {
|
||||||
|
Hash []byte `json:"hash"`
|
||||||
|
}
|
||||||
|
|
||||||
// empty results
|
// empty results
|
||||||
type (
|
type (
|
||||||
ResultUnsafeFlushMempool struct{}
|
ResultUnsafeFlushMempool struct{}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user