mirror of
https://github.com/fluencelabs/tendermint
synced 2025-07-23 08:11:57 +00:00
Compare commits
9 Commits
v0.31.0-de
...
release/v0
Author | SHA1 | Date | |
---|---|---|---|
|
4ae9b633ed | ||
|
1fb900f9f5 | ||
|
eb7af9a765 | ||
|
5a7a97541b | ||
|
df2f25fe82 | ||
|
f0c2c9ba5a | ||
|
976819537d | ||
|
f996b10f47 | ||
|
36d7180ca2 |
@@ -3,17 +3,20 @@ version: 2
|
||||
defaults: &defaults
|
||||
working_directory: /go/src/github.com/tendermint/tendermint
|
||||
docker:
|
||||
- image: circleci/golang:1.11.4
|
||||
- image: circleci/golang
|
||||
environment:
|
||||
GOBIN: /tmp/workspace/bin
|
||||
|
||||
docs_update_config: &docs_update_config
|
||||
working_directory: ~/repo
|
||||
docker:
|
||||
- image: tendermint/docs_deployment
|
||||
- image: tendermintdev/jq_curl
|
||||
environment:
|
||||
AWS_REGION: us-east-1
|
||||
|
||||
release_management_docker: &release_management_docker
|
||||
machine: true
|
||||
|
||||
jobs:
|
||||
setup_dependencies:
|
||||
<<: *defaults
|
||||
@@ -154,7 +157,7 @@ jobs:
|
||||
for pkg in $(go list github.com/tendermint/tendermint/... | circleci tests split --split-by=timings); do
|
||||
id=$(basename "$pkg")
|
||||
|
||||
GOCACHE=off go test -v -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log"
|
||||
go test -v -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log"
|
||||
done
|
||||
- persist_to_workspace:
|
||||
root: /tmp/workspace
|
||||
@@ -192,7 +195,7 @@ jobs:
|
||||
name: run localnet and exit on failure
|
||||
command: |
|
||||
set -x
|
||||
docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang:1.11.4 make build-linux
|
||||
docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux
|
||||
make localnet-start &
|
||||
./scripts/localnet-blocks-test.sh 40 5 10 localhost
|
||||
|
||||
@@ -239,7 +242,121 @@ jobs:
|
||||
- run:
|
||||
name: Trigger website build
|
||||
command: |
|
||||
chamber exec tendermint -- start_website_build
|
||||
curl --silent \
|
||||
--show-error \
|
||||
-X POST \
|
||||
--header "Content-Type: application/json" \
|
||||
-d "{\"branch\": \"$CIRCLE_BRANCH\"}" \
|
||||
"https://circleci.com/api/v1.1/project/github/$CIRCLE_PROJECT_USERNAME/$WEBSITE_REPO_NAME/build?circle-token=$TENDERBOT_API_TOKEN" > response.json
|
||||
|
||||
RESULT=`jq -r '.status' response.json`
|
||||
MESSAGE=`jq -r '.message' response.json`
|
||||
|
||||
if [[ ${RESULT} == "null" ]] || [[ ${RESULT} -ne "200" ]]; then
|
||||
echo "CircleCI API call failed: $MESSAGE"
|
||||
exit 1
|
||||
else
|
||||
echo "Website build started"
|
||||
fi
|
||||
|
||||
prepare_build:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Get next release number
|
||||
command: |
|
||||
export LAST_TAG="`git describe --tags --abbrev=0 --match "${CIRCLE_BRANCH}.*"`"
|
||||
echo "Last tag: ${LAST_TAG}"
|
||||
if [ -z "${LAST_TAG}" ]; then
|
||||
export LAST_TAG="${CIRCLE_BRANCH}"
|
||||
echo "Last tag not found. Possibly fresh branch or feature branch. Setting ${LAST_TAG} as tag."
|
||||
fi
|
||||
export NEXT_TAG="`python -u scripts/release_management/bump-semver.py --version "${LAST_TAG}"`"
|
||||
echo "Next tag: ${NEXT_TAG}"
|
||||
echo "export CIRCLE_TAG=\"${NEXT_TAG}\"" > release-version.source
|
||||
- run:
|
||||
name: Build dependencies
|
||||
command: |
|
||||
make get_tools get_vendor_deps
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- "release-version.source"
|
||||
- save_cache:
|
||||
key: v1-release-deps-{{ .Branch }}-{{ .Revision }}
|
||||
paths:
|
||||
- "vendor"
|
||||
|
||||
build_artifacts:
|
||||
<<: *defaults
|
||||
parallelism: 4
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v1-release-deps-{{ .Branch }}-{{ .Revision }}
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: Build artifact
|
||||
command: |
|
||||
# Setting CIRCLE_TAG because we do not tag the release ourselves.
|
||||
source /tmp/workspace/release-version.source
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 0 ;then export GOOS=linux GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 1 ;then export GOOS=darwin GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 2 ;then export GOOS=windows GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 3 ;then export GOOS=linux GOARCH=arm && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
- persist_to_workspace:
|
||||
root: build
|
||||
paths:
|
||||
- "*.zip"
|
||||
- "tendermint_linux_amd64"
|
||||
|
||||
release_artifacts:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: Deploy to GitHub
|
||||
command: |
|
||||
# Setting CIRCLE_TAG because we do not tag the release ourselves.
|
||||
source /tmp/workspace/release-version.source
|
||||
echo "---"
|
||||
ls -la /tmp/workspace/*.zip
|
||||
echo "---"
|
||||
python -u scripts/release_management/sha-files.py
|
||||
echo "---"
|
||||
cat /tmp/workspace/SHA256SUMS
|
||||
echo "---"
|
||||
export RELEASE_ID="`python -u scripts/release_management/github-draft.py`"
|
||||
echo "Release ID: ${RELEASE_ID}"
|
||||
#Todo: Parallelize uploads
|
||||
export GOOS=linux GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
export GOOS=darwin GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
export GOOS=windows GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
export GOOS=linux GOARCH=arm && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
python -u scripts/release_management/github-upload.py --file "/tmp/workspace/SHA256SUMS" --id "${RELEASE_ID}"
|
||||
python -u scripts/release_management/github-publish.py --id "${RELEASE_ID}"
|
||||
|
||||
release_docker:
|
||||
<<: *release_management_docker
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: Deploy to Docker Hub
|
||||
command: |
|
||||
# Setting CIRCLE_TAG because we do not tag the release ourselves.
|
||||
source /tmp/workspace/release-version.source
|
||||
cp /tmp/workspace/tendermint_linux_amd64 DOCKER/tendermint
|
||||
docker build --label="tendermint" --tag="tendermint/tendermint:${CIRCLE_TAG}" --tag="tendermint/tendermint:latest" "DOCKER"
|
||||
docker login -u "${DOCKERHUB_USER}" --password-stdin <<< "${DOCKERHUB_PASS}"
|
||||
docker push "tendermint/tendermint"
|
||||
docker logout
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
@@ -277,3 +394,25 @@ workflows:
|
||||
- upload_coverage:
|
||||
requires:
|
||||
- test_cover
|
||||
release:
|
||||
jobs:
|
||||
- prepare_build
|
||||
- build_artifacts:
|
||||
requires:
|
||||
- prepare_build
|
||||
- release_artifacts:
|
||||
requires:
|
||||
- prepare_build
|
||||
- build_artifacts
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /v[0-9]+\.[0-9]+/
|
||||
- release_docker:
|
||||
requires:
|
||||
- prepare_build
|
||||
- build_artifacts
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /v[0-9]+\.[0-9]+/
|
||||
|
41
CHANGELOG.md
41
CHANGELOG.md
@@ -1,5 +1,46 @@
|
||||
# Changelog
|
||||
|
||||
## v0.30.3
|
||||
|
||||
*April 1st, 2019*
|
||||
|
||||
This release includes two security sensitive fixes: it ensures generated private
|
||||
keys are valid, and it prevents certain DNS lookups that would cause the node to
|
||||
panic if the lookup failed.
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
- [crypto/secp256k1] [\#3439](https://github.com/tendermint/tendermint/issues/3439)
|
||||
Ensure generated private keys are valid by randomly sampling until a valid key is found.
|
||||
Previously, it was possible (though rare!) to generate keys that exceeded the curve order.
|
||||
Such keys would lead to invalid signatures.
|
||||
- [p2p] [\#3522](https://github.com/tendermint/tendermint/issues/3522) Memoize
|
||||
socket address in peer connections to avoid DNS lookups. Previously, failed
|
||||
DNS lookups could cause the node to panic.
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [circle] [\#3497](https://github.com/tendermint/tendermint/issues/3497) Move release management to CircleCI
|
||||
|
||||
## v0.30.2
|
||||
|
||||
*March 10th, 2019*
|
||||
|
||||
This release fixes a CLevelDB memory leak. It was happening because we were not
|
||||
closing the WriteBatch object after use. See [levigo's
|
||||
godoc](https://godoc.org/github.com/jmhodges/levigo#WriteBatch.Close) for the
|
||||
Close method. Special thanks goes to @Stumble who both reported an issue in
|
||||
[cosmos-sdk](https://github.com/cosmos/cosmos-sdk/issues/3842) and provided a
|
||||
fix here.
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* Go API
|
||||
- [libs/db] [\#3842](https://github.com/cosmos/cosmos-sdk/issues/3842) Add Close() method to Batch interface (@Stumble)
|
||||
|
||||
### BUG FIXES:
|
||||
- [libs/db] [\#3842](https://github.com/cosmos/cosmos-sdk/issues/3842) Fix CLevelDB memory leak (@Stumble)
|
||||
|
||||
## v0.30.1
|
||||
|
||||
*February 20th, 2019*
|
||||
|
@@ -7,28 +7,19 @@ Special thanks to external contributors on this release:
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
- [httpclient] Update Subscribe interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md)
|
||||
|
||||
* Apps
|
||||
|
||||
* Go API
|
||||
- [libs/common] TrapSignal accepts logger as a first parameter and does not block anymore
|
||||
* previously it was dumping "captured ..." msg to os.Stdout
|
||||
* TrapSignal should not be responsible for blocking thread of execution
|
||||
|
||||
* Blockchain Protocol
|
||||
|
||||
* P2P Protocol
|
||||
|
||||
### FEATURES:
|
||||
- [mempool] \#3079 bound mempool memory usage (`mempool.max_txs_bytes` is set to 1GB by default; see config.toml)
|
||||
mempool's current `txs_total_bytes` is exposed via `total_bytes` field in
|
||||
`/num_unconfirmed_txs` and `/unconfirmed_txs` RPC endpoints.
|
||||
|
||||
### IMPROVEMENTS:
|
||||
- [libs/common] \#3238 exit with zero (0) code upon receiving SIGTERM/SIGINT
|
||||
|
||||
- [CircleCI] \#3497 Move release management to CircleCI
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
- [p2p/conn] \#3347 Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection
|
||||
- [libs/pubsub] \#951, \#1880 use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md)
|
||||
|
@@ -1,5 +1,5 @@
|
||||
FROM alpine:3.7
|
||||
MAINTAINER Greg Szabo <greg@tendermint.com>
|
||||
FROM alpine:3.9
|
||||
LABEL maintainer="hello@tendermint.com"
|
||||
|
||||
# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json
|
||||
# (unless you change `genesis_file` in config.toml). You can put your config.toml and
|
||||
|
11
Makefile
11
Makefile
@@ -6,6 +6,7 @@ GOTOOLS = \
|
||||
github.com/square/certstrap
|
||||
GOBIN?=${GOPATH}/bin
|
||||
PACKAGES=$(shell go list ./...)
|
||||
OUTPUT?=build/tendermint
|
||||
|
||||
INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf
|
||||
BUILD_TAGS?='tendermint'
|
||||
@@ -19,13 +20,13 @@ check: check_tools get_vendor_deps
|
||||
### Build Tendermint
|
||||
|
||||
build:
|
||||
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/
|
||||
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/
|
||||
|
||||
build_c:
|
||||
CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" -o build/tendermint ./cmd/tendermint/
|
||||
CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" -o $(OUTPUT) ./cmd/tendermint/
|
||||
|
||||
build_race:
|
||||
CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint
|
||||
CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint
|
||||
|
||||
install:
|
||||
CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
|
||||
@@ -109,7 +110,7 @@ draw_deps:
|
||||
|
||||
get_deps_bin_size:
|
||||
@# Copy of build recipe with additional flags to perform binary size analysis
|
||||
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/ 2>&1))
|
||||
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/ 2>&1))
|
||||
@find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log
|
||||
@echo "Results can be found here: $(CURDIR)/deps_bin_size.log"
|
||||
|
||||
@@ -261,7 +262,7 @@ check_dep:
|
||||
### Docker image
|
||||
|
||||
build-docker:
|
||||
cp build/tendermint DOCKER/tendermint
|
||||
cp $(OUTPUT) DOCKER/tendermint
|
||||
docker build --label=tendermint --tag="tendermint/tendermint" DOCKER
|
||||
rm -rf DOCKER/tendermint
|
||||
|
||||
|
4
Vagrantfile
vendored
4
Vagrantfile
vendored
@@ -34,10 +34,6 @@ Vagrant.configure("2") do |config|
|
||||
mv go /usr/local
|
||||
rm -f go1.11.linux-amd64.tar.gz
|
||||
|
||||
# install nodejs (for docs)
|
||||
curl -sL https://deb.nodesource.com/setup_11.x | bash -
|
||||
apt-get install -y nodejs
|
||||
|
||||
# cleanup
|
||||
apt-get autoremove -y
|
||||
|
||||
|
@@ -636,7 +636,9 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
func cmdCounter(cmd *cobra.Command, args []string) error {
|
||||
|
||||
app := counter.NewCounterApplication(flagSerial)
|
||||
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
// Start the listener
|
||||
@@ -649,14 +651,12 @@ func cmdCounter(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
cmn.TrapSignal(logger, func() {
|
||||
// Wait forever
|
||||
cmn.TrapSignal(func() {
|
||||
// Cleanup
|
||||
srv.Stop()
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
return nil
|
||||
}
|
||||
|
||||
func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
@@ -681,14 +681,12 @@ func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
cmn.TrapSignal(logger, func() {
|
||||
// Wait forever
|
||||
cmn.TrapSignal(func() {
|
||||
// Cleanup
|
||||
srv.Stop()
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
return nil
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
|
@@ -35,7 +35,7 @@ func main() {
|
||||
|
||||
pv := privval.LoadFilePV(*privValKeyPath, *privValStatePath)
|
||||
|
||||
var dialer privval.SocketDialer
|
||||
var dialer privval.Dialer
|
||||
protocol, address := cmn.ProtocolAndAddress(*addr)
|
||||
switch protocol {
|
||||
case "unix":
|
||||
@@ -48,20 +48,16 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
rs := privval.NewSignerServiceEndpoint(logger, *chainID, pv, dialer)
|
||||
rs := privval.NewRemoteSigner(logger, *chainID, pv, dialer)
|
||||
err := rs.Start()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
cmn.TrapSignal(logger, func() {
|
||||
cmn.TrapSignal(func() {
|
||||
err := rs.Stop()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
}
|
||||
|
@@ -59,11 +59,6 @@ func ensureAddrHasSchemeOrDefaultToTCP(addr string) (string, error) {
|
||||
}
|
||||
|
||||
func runProxy(cmd *cobra.Command, args []string) error {
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
cmn.TrapSignal(logger, func() {
|
||||
// TODO: close up shop
|
||||
})
|
||||
|
||||
nodeAddr, err := ensureAddrHasSchemeOrDefaultToTCP(nodeAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -91,6 +86,9 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
return cmn.ErrorWrap(err, "starting proxy")
|
||||
}
|
||||
|
||||
// Run forever
|
||||
select {}
|
||||
cmn.TrapSignal(func() {
|
||||
// TODO: close up shop
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@@ -2,10 +2,12 @@ package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
nm "github.com/tendermint/tendermint/node"
|
||||
)
|
||||
|
||||
@@ -55,19 +57,25 @@ func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command {
|
||||
return fmt.Errorf("Failed to create node: %v", err)
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
cmn.TrapSignal(logger, func() {
|
||||
if n.IsRunning() {
|
||||
n.Stop()
|
||||
// Stop upon receiving SIGTERM or CTRL-C
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
go func() {
|
||||
for sig := range c {
|
||||
logger.Error(fmt.Sprintf("captured %v, exiting...", sig))
|
||||
if n.IsRunning() {
|
||||
n.Stop()
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
})
|
||||
}()
|
||||
|
||||
if err := n.Start(); err != nil {
|
||||
return fmt.Errorf("Failed to start node: %v", err)
|
||||
}
|
||||
logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo())
|
||||
|
||||
// Run forever.
|
||||
// Run forever
|
||||
select {}
|
||||
},
|
||||
}
|
||||
|
@@ -530,13 +530,12 @@ func DefaultFuzzConnConfig() *FuzzConnConfig {
|
||||
|
||||
// MempoolConfig defines the configuration options for the Tendermint mempool
|
||||
type MempoolConfig struct {
|
||||
RootDir string `mapstructure:"home"`
|
||||
Recheck bool `mapstructure:"recheck"`
|
||||
Broadcast bool `mapstructure:"broadcast"`
|
||||
WalPath string `mapstructure:"wal_dir"`
|
||||
Size int `mapstructure:"size"`
|
||||
MaxTxsBytes int64 `mapstructure:"max_txs_bytes"`
|
||||
CacheSize int `mapstructure:"cache_size"`
|
||||
RootDir string `mapstructure:"home"`
|
||||
Recheck bool `mapstructure:"recheck"`
|
||||
Broadcast bool `mapstructure:"broadcast"`
|
||||
WalPath string `mapstructure:"wal_dir"`
|
||||
Size int `mapstructure:"size"`
|
||||
CacheSize int `mapstructure:"cache_size"`
|
||||
}
|
||||
|
||||
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool
|
||||
@@ -545,11 +544,10 @@ func DefaultMempoolConfig() *MempoolConfig {
|
||||
Recheck: true,
|
||||
Broadcast: true,
|
||||
WalPath: "",
|
||||
// Each signature verification takes .5ms, Size reduced until we implement
|
||||
// Each signature verification takes .5ms, size reduced until we implement
|
||||
// ABCI Recheck
|
||||
Size: 5000,
|
||||
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
|
||||
CacheSize: 10000,
|
||||
Size: 5000,
|
||||
CacheSize: 10000,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -576,9 +574,6 @@ func (cfg *MempoolConfig) ValidateBasic() error {
|
||||
if cfg.Size < 0 {
|
||||
return errors.New("size can't be negative")
|
||||
}
|
||||
if cfg.MaxTxsBytes < 0 {
|
||||
return errors.New("max_txs_bytes can't be negative")
|
||||
}
|
||||
if cfg.CacheSize < 0 {
|
||||
return errors.New("cache_size can't be negative")
|
||||
}
|
||||
|
@@ -237,15 +237,10 @@ recheck = {{ .Mempool.Recheck }}
|
||||
broadcast = {{ .Mempool.Broadcast }}
|
||||
wal_dir = "{{ js .Mempool.WalPath }}"
|
||||
|
||||
# Maximum number of transactions in the mempool
|
||||
# size of the mempool
|
||||
size = {{ .Mempool.Size }}
|
||||
|
||||
# Limit the total size of all txs in the mempool.
|
||||
# This only accounts for raw transactions (e.g. given 1MB transactions and
|
||||
# max_txs_bytes=5MB, mempool will only accept 5 transactions).
|
||||
max_txs_bytes = {{ .Mempool.MaxTxsBytes }}
|
||||
|
||||
# Size of the cache (used to filter transactions we saw earlier) in transactions
|
||||
# size of the cache (used to filter transactions we saw earlier)
|
||||
cache_size = {{ .Mempool.CacheSize }}
|
||||
|
||||
##### consensus configuration options #####
|
||||
|
@@ -46,7 +46,7 @@ func TestByzantine(t *testing.T) {
|
||||
switches[i].SetLogger(p2pLogger.With("validator", i))
|
||||
}
|
||||
|
||||
blocksSubs := make([]types.Subscription, N)
|
||||
eventChans := make([]chan interface{}, N)
|
||||
reactors := make([]p2p.Reactor, N)
|
||||
for i := 0; i < N; i++ {
|
||||
// make first val byzantine
|
||||
@@ -65,8 +65,8 @@ func TestByzantine(t *testing.T) {
|
||||
eventBus := css[i].eventBus
|
||||
eventBus.SetLogger(logger.With("module", "events", "validator", i))
|
||||
|
||||
var err error
|
||||
blocksSubs[i], err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
|
||||
eventChans[i] = make(chan interface{}, 1)
|
||||
err := eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, eventChans[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
conR := NewConsensusReactor(css[i], true) // so we dont start the consensus states
|
||||
@@ -131,7 +131,7 @@ func TestByzantine(t *testing.T) {
|
||||
p2p.Connect2Switches(switches, ind1, ind2)
|
||||
|
||||
// wait for someone in the big partition (B) to make a block
|
||||
<-blocksSubs[ind2].Out()
|
||||
<-eventChans[ind2]
|
||||
|
||||
t.Log("A block has been committed. Healing partition")
|
||||
p2p.Connect2Switches(switches, ind0, ind1)
|
||||
@@ -143,7 +143,7 @@ func TestByzantine(t *testing.T) {
|
||||
wg.Add(2)
|
||||
for i := 1; i < N-1; i++ {
|
||||
go func(j int) {
|
||||
<-blocksSubs[j].Out()
|
||||
<-eventChans[j]
|
||||
wg.Done()
|
||||
}(i)
|
||||
}
|
||||
|
@@ -7,6 +7,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -24,7 +25,6 @@ import (
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
@@ -127,18 +127,20 @@ func startTestRound(cs *ConsensusState, height int64, round int) {
|
||||
cs.startRoutines(0)
|
||||
}
|
||||
|
||||
// Create proposal block from cs1 but sign it with vs.
|
||||
// Create proposal block from cs1 but sign it with vs
|
||||
func decideProposal(cs1 *ConsensusState, vs *validatorStub, height int64, round int) (proposal *types.Proposal, block *types.Block) {
|
||||
cs1.mtx.Lock()
|
||||
block, blockParts := cs1.createProposalBlock()
|
||||
validRound := cs1.ValidRound
|
||||
chainID := cs1.state.ChainID
|
||||
cs1.mtx.Unlock()
|
||||
if block == nil {
|
||||
panic("Failed to createProposalBlock. Did you forget to add commit for previous block?")
|
||||
if block == nil { // on error
|
||||
panic("error creating proposal block")
|
||||
}
|
||||
|
||||
// Make proposal
|
||||
cs1.mtx.RLock()
|
||||
validRound := cs1.ValidRound
|
||||
chainID := cs1.state.ChainID
|
||||
cs1.mtx.RUnlock()
|
||||
polRound, propBlockID := validRound, types.BlockID{block.Hash(), blockParts.Header()}
|
||||
proposal = types.NewProposal(height, round, polRound, propBlockID)
|
||||
if err := vs.SignProposal(chainID, proposal); err != nil {
|
||||
@@ -227,22 +229,24 @@ func validatePrevoteAndPrecommit(t *testing.T, cs *ConsensusState, thisRound, lo
|
||||
cs.mtx.Unlock()
|
||||
}
|
||||
|
||||
func subscribeToVoter(cs *ConsensusState, addr []byte) <-chan tmpubsub.Message {
|
||||
votesSub, err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryVote)
|
||||
// genesis
|
||||
func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
|
||||
voteCh0 := make(chan interface{})
|
||||
err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryVote, voteCh0)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, types.EventQueryVote))
|
||||
}
|
||||
ch := make(chan tmpubsub.Message)
|
||||
voteCh := make(chan interface{})
|
||||
go func() {
|
||||
for msg := range votesSub.Out() {
|
||||
vote := msg.Data().(types.EventDataVote)
|
||||
for v := range voteCh0 {
|
||||
vote := v.(types.EventDataVote)
|
||||
// we only fire for our own votes
|
||||
if bytes.Equal(addr, vote.Vote.ValidatorAddress) {
|
||||
ch <- msg
|
||||
voteCh <- v
|
||||
}
|
||||
}
|
||||
}()
|
||||
return ch
|
||||
return voteCh
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------------
|
||||
@@ -319,7 +323,7 @@ func randConsensusState(nValidators int) (*ConsensusState, []*validatorStub) {
|
||||
|
||||
//-------------------------------------------------------------------------------
|
||||
|
||||
func ensureNoNewEvent(ch <-chan tmpubsub.Message, timeout time.Duration,
|
||||
func ensureNoNewEvent(ch <-chan interface{}, timeout time.Duration,
|
||||
errorMessage string) {
|
||||
select {
|
||||
case <-time.After(timeout):
|
||||
@@ -329,28 +333,28 @@ func ensureNoNewEvent(ch <-chan tmpubsub.Message, timeout time.Duration,
|
||||
}
|
||||
}
|
||||
|
||||
func ensureNoNewEventOnChannel(ch <-chan tmpubsub.Message) {
|
||||
func ensureNoNewEventOnChannel(ch <-chan interface{}) {
|
||||
ensureNoNewEvent(
|
||||
ch,
|
||||
ensureTimeout,
|
||||
"We should be stuck waiting, not receiving new event on the channel")
|
||||
}
|
||||
|
||||
func ensureNoNewRoundStep(stepCh <-chan tmpubsub.Message) {
|
||||
func ensureNoNewRoundStep(stepCh <-chan interface{}) {
|
||||
ensureNoNewEvent(
|
||||
stepCh,
|
||||
ensureTimeout,
|
||||
"We should be stuck waiting, not receiving NewRoundStep event")
|
||||
}
|
||||
|
||||
func ensureNoNewUnlock(unlockCh <-chan tmpubsub.Message) {
|
||||
func ensureNoNewUnlock(unlockCh <-chan interface{}) {
|
||||
ensureNoNewEvent(
|
||||
unlockCh,
|
||||
ensureTimeout,
|
||||
"We should be stuck waiting, not receiving Unlock event")
|
||||
}
|
||||
|
||||
func ensureNoNewTimeout(stepCh <-chan tmpubsub.Message, timeout int64) {
|
||||
func ensureNoNewTimeout(stepCh <-chan interface{}, timeout int64) {
|
||||
timeoutDuration := time.Duration(timeout*5) * time.Nanosecond
|
||||
ensureNoNewEvent(
|
||||
stepCh,
|
||||
@@ -358,157 +362,142 @@ func ensureNoNewTimeout(stepCh <-chan tmpubsub.Message, timeout int64) {
|
||||
"We should be stuck waiting, not receiving NewTimeout event")
|
||||
}
|
||||
|
||||
func ensureNewEvent(ch <-chan tmpubsub.Message, height int64, round int, timeout time.Duration, errorMessage string) {
|
||||
func ensureNewEvent(
|
||||
ch <-chan interface{},
|
||||
height int64,
|
||||
round int,
|
||||
timeout time.Duration,
|
||||
errorMessage string) {
|
||||
|
||||
select {
|
||||
case <-time.After(timeout):
|
||||
panic(errorMessage)
|
||||
case msg := <-ch:
|
||||
roundStateEvent, ok := msg.Data().(types.EventDataRoundState)
|
||||
case ev := <-ch:
|
||||
rs, ok := ev.(types.EventDataRoundState)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("expected a EventDataRoundState, got %T. Wrong subscription channel?",
|
||||
msg.Data()))
|
||||
panic(
|
||||
fmt.Sprintf(
|
||||
"expected a EventDataRoundState, got %v.Wrong subscription channel?",
|
||||
reflect.TypeOf(rs)))
|
||||
}
|
||||
if roundStateEvent.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, roundStateEvent.Height))
|
||||
if rs.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, rs.Height))
|
||||
}
|
||||
if roundStateEvent.Round != round {
|
||||
panic(fmt.Sprintf("expected round %v, got %v", round, roundStateEvent.Round))
|
||||
if rs.Round != round {
|
||||
panic(fmt.Sprintf("expected round %v, got %v", round, rs.Round))
|
||||
}
|
||||
// TODO: We could check also for a step at this point!
|
||||
}
|
||||
}
|
||||
|
||||
func ensureNewRound(roundCh <-chan tmpubsub.Message, height int64, round int) {
|
||||
func ensureNewRound(roundCh <-chan interface{}, height int64, round int) {
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for NewRound event")
|
||||
case msg := <-roundCh:
|
||||
newRoundEvent, ok := msg.Data().(types.EventDataNewRound)
|
||||
case ev := <-roundCh:
|
||||
rs, ok := ev.(types.EventDataNewRound)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("expected a EventDataNewRound, got %T. Wrong subscription channel?",
|
||||
msg.Data()))
|
||||
panic(
|
||||
fmt.Sprintf(
|
||||
"expected a EventDataNewRound, got %v.Wrong subscription channel?",
|
||||
reflect.TypeOf(rs)))
|
||||
}
|
||||
if newRoundEvent.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, newRoundEvent.Height))
|
||||
if rs.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, rs.Height))
|
||||
}
|
||||
if newRoundEvent.Round != round {
|
||||
panic(fmt.Sprintf("expected round %v, got %v", round, newRoundEvent.Round))
|
||||
if rs.Round != round {
|
||||
panic(fmt.Sprintf("expected round %v, got %v", round, rs.Round))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ensureNewTimeout(timeoutCh <-chan tmpubsub.Message, height int64, round int, timeout int64) {
|
||||
func ensureNewTimeout(timeoutCh <-chan interface{}, height int64, round int, timeout int64) {
|
||||
timeoutDuration := time.Duration(timeout*5) * time.Nanosecond
|
||||
ensureNewEvent(timeoutCh, height, round, timeoutDuration,
|
||||
"Timeout expired while waiting for NewTimeout event")
|
||||
}
|
||||
|
||||
func ensureNewProposal(proposalCh <-chan tmpubsub.Message, height int64, round int) {
|
||||
func ensureNewProposal(proposalCh <-chan interface{}, height int64, round int) {
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for NewProposal event")
|
||||
case msg := <-proposalCh:
|
||||
proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal)
|
||||
case ev := <-proposalCh:
|
||||
rs, ok := ev.(types.EventDataCompleteProposal)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("expected a EventDataCompleteProposal, got %T. Wrong subscription channel?",
|
||||
msg.Data()))
|
||||
panic(
|
||||
fmt.Sprintf(
|
||||
"expected a EventDataCompleteProposal, got %v.Wrong subscription channel?",
|
||||
reflect.TypeOf(rs)))
|
||||
}
|
||||
if proposalEvent.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, proposalEvent.Height))
|
||||
if rs.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, rs.Height))
|
||||
}
|
||||
if proposalEvent.Round != round {
|
||||
panic(fmt.Sprintf("expected round %v, got %v", round, proposalEvent.Round))
|
||||
if rs.Round != round {
|
||||
panic(fmt.Sprintf("expected round %v, got %v", round, rs.Round))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ensureNewValidBlock(validBlockCh <-chan tmpubsub.Message, height int64, round int) {
|
||||
func ensureNewValidBlock(validBlockCh <-chan interface{}, height int64, round int) {
|
||||
ensureNewEvent(validBlockCh, height, round, ensureTimeout,
|
||||
"Timeout expired while waiting for NewValidBlock event")
|
||||
}
|
||||
|
||||
func ensureNewBlock(blockCh <-chan tmpubsub.Message, height int64) {
|
||||
func ensureNewBlock(blockCh <-chan interface{}, height int64) {
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for NewBlock event")
|
||||
case msg := <-blockCh:
|
||||
blockEvent, ok := msg.Data().(types.EventDataNewBlock)
|
||||
case ev := <-blockCh:
|
||||
block, ok := ev.(types.EventDataNewBlock)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("expected a EventDataNewBlock, got %T. Wrong subscription channel?",
|
||||
msg.Data()))
|
||||
panic(fmt.Sprintf("expected a *types.EventDataNewBlock, "+
|
||||
"got %v. wrong subscription channel?",
|
||||
reflect.TypeOf(block)))
|
||||
}
|
||||
if blockEvent.Block.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, blockEvent.Block.Height))
|
||||
if block.Block.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, block.Block.Height))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ensureNewBlockHeader(blockCh <-chan tmpubsub.Message, height int64, blockHash cmn.HexBytes) {
|
||||
func ensureNewBlockHeader(blockCh <-chan interface{}, height int64, blockHash cmn.HexBytes) {
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for NewBlockHeader event")
|
||||
case msg := <-blockCh:
|
||||
blockHeaderEvent, ok := msg.Data().(types.EventDataNewBlockHeader)
|
||||
case ev := <-blockCh:
|
||||
blockHeader, ok := ev.(types.EventDataNewBlockHeader)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("expected a EventDataNewBlockHeader, got %T. Wrong subscription channel?",
|
||||
msg.Data()))
|
||||
panic(fmt.Sprintf("expected a *types.EventDataNewBlockHeader, "+
|
||||
"got %v. wrong subscription channel?",
|
||||
reflect.TypeOf(blockHeader)))
|
||||
}
|
||||
if blockHeaderEvent.Header.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, blockHeaderEvent.Header.Height))
|
||||
if blockHeader.Header.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, blockHeader.Header.Height))
|
||||
}
|
||||
if !bytes.Equal(blockHeaderEvent.Header.Hash(), blockHash) {
|
||||
panic(fmt.Sprintf("expected header %X, got %X", blockHash, blockHeaderEvent.Header.Hash()))
|
||||
if !bytes.Equal(blockHeader.Header.Hash(), blockHash) {
|
||||
panic(fmt.Sprintf("expected header %X, got %X", blockHash, blockHeader.Header.Hash()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ensureNewUnlock(unlockCh <-chan tmpubsub.Message, height int64, round int) {
|
||||
func ensureNewUnlock(unlockCh <-chan interface{}, height int64, round int) {
|
||||
ensureNewEvent(unlockCh, height, round, ensureTimeout,
|
||||
"Timeout expired while waiting for NewUnlock event")
|
||||
}
|
||||
|
||||
func ensureProposal(proposalCh <-chan tmpubsub.Message, height int64, round int, propID types.BlockID) {
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for NewProposal event")
|
||||
case msg := <-proposalCh:
|
||||
proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("expected a EventDataCompleteProposal, got %T. Wrong subscription channel?",
|
||||
msg.Data()))
|
||||
}
|
||||
if proposalEvent.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, proposalEvent.Height))
|
||||
}
|
||||
if proposalEvent.Round != round {
|
||||
panic(fmt.Sprintf("expected round %v, got %v", round, proposalEvent.Round))
|
||||
}
|
||||
if !proposalEvent.BlockID.Equals(propID) {
|
||||
panic("Proposed block does not match expected block")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ensurePrecommit(voteCh <-chan tmpubsub.Message, height int64, round int) {
|
||||
ensureVote(voteCh, height, round, types.PrecommitType)
|
||||
}
|
||||
|
||||
func ensurePrevote(voteCh <-chan tmpubsub.Message, height int64, round int) {
|
||||
ensureVote(voteCh, height, round, types.PrevoteType)
|
||||
}
|
||||
|
||||
func ensureVote(voteCh <-chan tmpubsub.Message, height int64, round int,
|
||||
func ensureVote(voteCh <-chan interface{}, height int64, round int,
|
||||
voteType types.SignedMsgType) {
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for NewVote event")
|
||||
case msg := <-voteCh:
|
||||
voteEvent, ok := msg.Data().(types.EventDataVote)
|
||||
case v := <-voteCh:
|
||||
edv, ok := v.(types.EventDataVote)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("expected a EventDataVote, got %T. Wrong subscription channel?",
|
||||
msg.Data()))
|
||||
panic(fmt.Sprintf("expected a *types.Vote, "+
|
||||
"got %v. wrong subscription channel?",
|
||||
reflect.TypeOf(v)))
|
||||
}
|
||||
vote := voteEvent.Vote
|
||||
vote := edv.Vote
|
||||
if vote.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, vote.Height))
|
||||
}
|
||||
@@ -521,7 +510,39 @@ func ensureVote(voteCh <-chan tmpubsub.Message, height int64, round int,
|
||||
}
|
||||
}
|
||||
|
||||
func ensureNewEventOnChannel(ch <-chan tmpubsub.Message) {
|
||||
func ensureProposal(proposalCh <-chan interface{}, height int64, round int, propId types.BlockID) {
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for NewProposal event")
|
||||
case ev := <-proposalCh:
|
||||
rs, ok := ev.(types.EventDataCompleteProposal)
|
||||
if !ok {
|
||||
panic(
|
||||
fmt.Sprintf(
|
||||
"expected a EventDataCompleteProposal, got %v.Wrong subscription channel?",
|
||||
reflect.TypeOf(rs)))
|
||||
}
|
||||
if rs.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, rs.Height))
|
||||
}
|
||||
if rs.Round != round {
|
||||
panic(fmt.Sprintf("expected round %v, got %v", round, rs.Round))
|
||||
}
|
||||
if !rs.BlockID.Equals(propId) {
|
||||
panic("Proposed block does not match expected block")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ensurePrecommit(voteCh <-chan interface{}, height int64, round int) {
|
||||
ensureVote(voteCh, height, round, types.PrecommitType)
|
||||
}
|
||||
|
||||
func ensurePrevote(voteCh <-chan interface{}, height int64, round int) {
|
||||
ensureVote(voteCh, height, round, types.PrevoteType)
|
||||
}
|
||||
|
||||
func ensureNewEventOnChannel(ch <-chan interface{}) {
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for new activity on the channel")
|
||||
|
@@ -117,9 +117,9 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) {
|
||||
for nTxs := 0; nTxs < NTxs; {
|
||||
ticker := time.NewTicker(time.Second * 30)
|
||||
select {
|
||||
case msg := <-newBlockCh:
|
||||
blockEvent := msg.Data().(types.EventDataNewBlock)
|
||||
nTxs += int(blockEvent.Block.Header.NumTxs)
|
||||
case b := <-newBlockCh:
|
||||
evt := b.(types.EventDataNewBlock)
|
||||
nTxs += int(evt.Block.Header.NumTxs)
|
||||
case <-ticker.C:
|
||||
panic("Timed out waiting to commit blocks with transactions")
|
||||
}
|
||||
|
@@ -30,13 +30,9 @@ import (
|
||||
//----------------------------------------------
|
||||
// in-process testnets
|
||||
|
||||
func startConsensusNet(t *testing.T, css []*ConsensusState, N int) (
|
||||
[]*ConsensusReactor,
|
||||
[]types.Subscription,
|
||||
[]*types.EventBus,
|
||||
) {
|
||||
func startConsensusNet(t *testing.T, css []*ConsensusState, N int) ([]*ConsensusReactor, []chan interface{}, []*types.EventBus) {
|
||||
reactors := make([]*ConsensusReactor, N)
|
||||
blocksSubs := make([]types.Subscription, 0)
|
||||
eventChans := make([]chan interface{}, N)
|
||||
eventBuses := make([]*types.EventBus, N)
|
||||
for i := 0; i < N; i++ {
|
||||
/*logger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info")
|
||||
@@ -48,9 +44,9 @@ func startConsensusNet(t *testing.T, css []*ConsensusState, N int) (
|
||||
eventBuses[i] = css[i].eventBus
|
||||
reactors[i].SetEventBus(eventBuses[i])
|
||||
|
||||
blocksSub, err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
|
||||
eventChans[i] = make(chan interface{}, 1)
|
||||
err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, eventChans[i])
|
||||
require.NoError(t, err)
|
||||
blocksSubs = append(blocksSubs, blocksSub)
|
||||
}
|
||||
// make connected switches and start all reactors
|
||||
p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
@@ -67,7 +63,7 @@ func startConsensusNet(t *testing.T, css []*ConsensusState, N int) (
|
||||
s := reactors[i].conS.GetState()
|
||||
reactors[i].SwitchToConsensus(s, 0)
|
||||
}
|
||||
return reactors, blocksSubs, eventBuses
|
||||
return reactors, eventChans, eventBuses
|
||||
}
|
||||
|
||||
func stopConsensusNet(logger log.Logger, reactors []*ConsensusReactor, eventBuses []*types.EventBus) {
|
||||
@@ -88,11 +84,11 @@ func TestReactorBasic(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
defer cleanup()
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
// wait till everyone makes the first new block
|
||||
timeoutWaitGroup(t, N, func(j int) {
|
||||
<-blocksSubs[j].Out()
|
||||
<-eventChans[j]
|
||||
}, css)
|
||||
}
|
||||
|
||||
@@ -165,20 +161,20 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
css[i] = cs
|
||||
}
|
||||
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nValidators)
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, nValidators)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
// wait till everyone makes the first new block with no evidence
|
||||
timeoutWaitGroup(t, nValidators, func(j int) {
|
||||
msg := <-blocksSubs[j].Out()
|
||||
block := msg.Data().(types.EventDataNewBlock).Block
|
||||
blockI := <-eventChans[j]
|
||||
block := blockI.(types.EventDataNewBlock).Block
|
||||
assert.True(t, len(block.Evidence.Evidence) == 0)
|
||||
}, css)
|
||||
|
||||
// second block should have evidence
|
||||
timeoutWaitGroup(t, nValidators, func(j int) {
|
||||
msg := <-blocksSubs[j].Out()
|
||||
block := msg.Data().(types.EventDataNewBlock).Block
|
||||
blockI := <-eventChans[j]
|
||||
block := blockI.(types.EventDataNewBlock).Block
|
||||
assert.True(t, len(block.Evidence.Evidence) > 0)
|
||||
}, css)
|
||||
}
|
||||
@@ -225,7 +221,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
c.Consensus.CreateEmptyBlocks = false
|
||||
})
|
||||
defer cleanup()
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
// send a tx
|
||||
@@ -235,7 +231,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
timeoutWaitGroup(t, N, func(j int) {
|
||||
<-blocksSubs[j].Out()
|
||||
<-eventChans[j]
|
||||
}, css)
|
||||
}
|
||||
|
||||
@@ -244,12 +240,12 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
defer cleanup()
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
timeoutWaitGroup(t, N, func(j int) {
|
||||
<-blocksSubs[j].Out()
|
||||
<-eventChans[j]
|
||||
}, css)
|
||||
|
||||
// Get peer
|
||||
@@ -269,7 +265,7 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
logger := log.TestingLogger()
|
||||
css, cleanup := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentKVStore)
|
||||
defer cleanup()
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nVals)
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, nVals)
|
||||
defer stopConsensusNet(logger, reactors, eventBuses)
|
||||
|
||||
// map of active validators
|
||||
@@ -281,7 +277,7 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
|
||||
// wait till everyone makes block 1
|
||||
timeoutWaitGroup(t, nVals, func(j int) {
|
||||
<-blocksSubs[j].Out()
|
||||
<-eventChans[j]
|
||||
}, css)
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
@@ -292,10 +288,10 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25)
|
||||
previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
|
||||
waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
waitForAndValidateBlockWithTx(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
|
||||
|
||||
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
|
||||
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
|
||||
@@ -304,10 +300,10 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2)
|
||||
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
|
||||
waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
waitForAndValidateBlockWithTx(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
|
||||
|
||||
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
|
||||
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
|
||||
@@ -316,10 +312,10 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26)
|
||||
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
|
||||
waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
waitForAndValidateBlockWithTx(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
|
||||
|
||||
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
|
||||
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
|
||||
@@ -333,7 +329,7 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
defer cleanup()
|
||||
logger := log.TestingLogger()
|
||||
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nPeers)
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, nPeers)
|
||||
defer stopConsensusNet(logger, reactors, eventBuses)
|
||||
|
||||
// map of active validators
|
||||
@@ -345,7 +341,7 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
|
||||
// wait till everyone makes block 1
|
||||
timeoutWaitGroup(t, nPeers, func(j int) {
|
||||
<-blocksSubs[j].Out()
|
||||
<-eventChans[j]
|
||||
}, css)
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
@@ -358,22 +354,22 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
// wait till everyone makes block 2
|
||||
// ensure the commit includes all validators
|
||||
// send newValTx to change vals in block 3
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx1)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx1)
|
||||
|
||||
// wait till everyone makes block 3.
|
||||
// it includes the commit for block 2, which is by the original validator set
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx1)
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, newValidatorTx1)
|
||||
|
||||
// wait till everyone makes block 4.
|
||||
// it includes the commit for block 3, which is by the original validator set
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
|
||||
|
||||
// the commits for block 4 should be with the updated validator set
|
||||
activeVals[string(newValidatorPubKey1.Address())] = struct{}{}
|
||||
|
||||
// wait till everyone makes block 5
|
||||
// it includes the commit for block 4, which should have the updated validator set
|
||||
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
|
||||
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css)
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
logger.Info("---------------------------- Testing changing the voting power of one validator")
|
||||
@@ -383,10 +379,10 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25)
|
||||
previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1)
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
|
||||
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, updateValidatorTx1)
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, updateValidatorTx1)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
|
||||
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css)
|
||||
|
||||
if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
|
||||
t.Errorf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[nVals].GetRoundState().LastValidators.TotalVotingPower())
|
||||
@@ -403,12 +399,12 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
newVal3ABCI := types.TM2PB.PubKey(newValidatorPubKey3)
|
||||
newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower)
|
||||
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3)
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3)
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
|
||||
activeVals[string(newValidatorPubKey2.Address())] = struct{}{}
|
||||
activeVals[string(newValidatorPubKey3.Address())] = struct{}{}
|
||||
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
|
||||
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css)
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
logger.Info("---------------------------- Testing removing two validators at once")
|
||||
@@ -416,12 +412,12 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0)
|
||||
removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0)
|
||||
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3)
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, removeValidatorTx2, removeValidatorTx3)
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, removeValidatorTx2, removeValidatorTx3)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
|
||||
delete(activeVals, string(newValidatorPubKey2.Address()))
|
||||
delete(activeVals, string(newValidatorPubKey3.Address()))
|
||||
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
|
||||
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css)
|
||||
}
|
||||
|
||||
// Check we can make blocks with skip_timeout_commit=false
|
||||
@@ -434,27 +430,23 @@ func TestReactorWithTimeoutCommit(t *testing.T) {
|
||||
css[i].config.SkipTimeoutCommit = false
|
||||
}
|
||||
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N-1)
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, N-1)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
timeoutWaitGroup(t, N-1, func(j int) {
|
||||
<-blocksSubs[j].Out()
|
||||
<-eventChans[j]
|
||||
}, css)
|
||||
}
|
||||
|
||||
func waitForAndValidateBlock(
|
||||
t *testing.T,
|
||||
n int,
|
||||
activeVals map[string]struct{},
|
||||
blocksSubs []types.Subscription,
|
||||
css []*ConsensusState,
|
||||
txs ...[]byte,
|
||||
) {
|
||||
func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState, txs ...[]byte) {
|
||||
timeoutWaitGroup(t, n, func(j int) {
|
||||
css[j].Logger.Debug("waitForAndValidateBlock")
|
||||
msg := <-blocksSubs[j].Out()
|
||||
newBlock := msg.Data().(types.EventDataNewBlock).Block
|
||||
newBlockI, ok := <-eventChans[j]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
newBlock := newBlockI.(types.EventDataNewBlock).Block
|
||||
css[j].Logger.Debug("waitForAndValidateBlock: Got block", "height", newBlock.Height)
|
||||
err := validateBlock(newBlock, activeVals)
|
||||
assert.Nil(t, err)
|
||||
@@ -465,21 +457,17 @@ func waitForAndValidateBlock(
|
||||
}, css)
|
||||
}
|
||||
|
||||
func waitForAndValidateBlockWithTx(
|
||||
t *testing.T,
|
||||
n int,
|
||||
activeVals map[string]struct{},
|
||||
blocksSubs []types.Subscription,
|
||||
css []*ConsensusState,
|
||||
txs ...[]byte,
|
||||
) {
|
||||
func waitForAndValidateBlockWithTx(t *testing.T, n int, activeVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState, txs ...[]byte) {
|
||||
timeoutWaitGroup(t, n, func(j int) {
|
||||
ntxs := 0
|
||||
BLOCK_TX_LOOP:
|
||||
for {
|
||||
css[j].Logger.Debug("waitForAndValidateBlockWithTx", "ntxs", ntxs)
|
||||
msg := <-blocksSubs[j].Out()
|
||||
newBlock := msg.Data().(types.EventDataNewBlock).Block
|
||||
newBlockI, ok := <-eventChans[j]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
newBlock := newBlockI.(types.EventDataNewBlock).Block
|
||||
css[j].Logger.Debug("waitForAndValidateBlockWithTx: Got block", "height", newBlock.Height)
|
||||
err := validateBlock(newBlock, activeVals)
|
||||
assert.Nil(t, err)
|
||||
@@ -500,21 +488,18 @@ func waitForAndValidateBlockWithTx(
|
||||
}, css)
|
||||
}
|
||||
|
||||
func waitForBlockWithUpdatedValsAndValidateIt(
|
||||
t *testing.T,
|
||||
n int,
|
||||
updatedVals map[string]struct{},
|
||||
blocksSubs []types.Subscription,
|
||||
css []*ConsensusState,
|
||||
) {
|
||||
func waitForBlockWithUpdatedValsAndValidateIt(t *testing.T, n int, updatedVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState) {
|
||||
timeoutWaitGroup(t, n, func(j int) {
|
||||
|
||||
var newBlock *types.Block
|
||||
LOOP:
|
||||
for {
|
||||
css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt")
|
||||
msg := <-blocksSubs[j].Out()
|
||||
newBlock = msg.Data().(types.EventDataNewBlock).Block
|
||||
newBlockI, ok := <-eventChans[j]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
newBlock = newBlockI.(types.EventDataNewBlock).Block
|
||||
if newBlock.LastCommit.Size() == len(updatedVals) {
|
||||
css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height)
|
||||
break LOOP
|
||||
|
@@ -42,7 +42,7 @@ var crc32c = crc32.MakeTable(crc32.Castagnoli)
|
||||
// Unmarshal and apply a single message to the consensus state as if it were
|
||||
// received in receiveRoutine. Lines that start with "#" are ignored.
|
||||
// NOTE: receiveRoutine should not be running.
|
||||
func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscription) error {
|
||||
func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepCh chan interface{}) error {
|
||||
// Skip meta messages which exist for demarcating boundaries.
|
||||
if _, ok := msg.Msg.(EndHeightMessage); ok {
|
||||
return nil
|
||||
@@ -54,17 +54,15 @@ func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepSub typ
|
||||
cs.Logger.Info("Replay: New Step", "height", m.Height, "round", m.Round, "step", m.Step)
|
||||
// these are playback checks
|
||||
ticker := time.After(time.Second * 2)
|
||||
if newStepSub != nil {
|
||||
if newStepCh != nil {
|
||||
select {
|
||||
case stepMsg := <-newStepSub.Out():
|
||||
m2 := stepMsg.Data().(types.EventDataRoundState)
|
||||
case mi := <-newStepCh:
|
||||
m2 := mi.(types.EventDataRoundState)
|
||||
if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step {
|
||||
return fmt.Errorf("RoundState mismatch. Got %v; Expected %v", m2, m)
|
||||
}
|
||||
case <-newStepSub.Cancelled():
|
||||
return fmt.Errorf("Failed to read off newStepSub.Out(). newStepSub was cancelled")
|
||||
case <-ticker:
|
||||
return fmt.Errorf("Failed to read off newStepSub.Out()")
|
||||
return fmt.Errorf("Failed to read off newStepCh")
|
||||
}
|
||||
}
|
||||
case msgInfo:
|
||||
|
@@ -51,13 +51,25 @@ func (cs *ConsensusState) ReplayFile(file string, console bool) error {
|
||||
cs.startForReplay()
|
||||
|
||||
// ensure all new step events are regenerated as expected
|
||||
newStepCh := make(chan interface{}, 1)
|
||||
|
||||
ctx := context.Background()
|
||||
newStepSub, err := cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep)
|
||||
err := cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep, newStepCh)
|
||||
if err != nil {
|
||||
return errors.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep)
|
||||
}
|
||||
defer cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep)
|
||||
defer func() {
|
||||
// drain newStepCh to make sure we don't block
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-newStepCh:
|
||||
default:
|
||||
break LOOP
|
||||
}
|
||||
}
|
||||
cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep)
|
||||
}()
|
||||
|
||||
// just open the file for reading, no need to use wal
|
||||
fp, err := os.OpenFile(file, os.O_RDONLY, 0600)
|
||||
@@ -82,7 +94,7 @@ func (cs *ConsensusState) ReplayFile(file string, console bool) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pb.cs.readReplayMessage(msg, newStepSub); err != nil {
|
||||
if err := pb.cs.readReplayMessage(msg, newStepCh); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -119,7 +131,7 @@ func newPlayback(fileName string, fp *os.File, cs *ConsensusState, genState sm.S
|
||||
}
|
||||
|
||||
// go back count steps by resetting the state and running (pb.count - count) steps
|
||||
func (pb *playback) replayReset(count int, newStepSub types.Subscription) error {
|
||||
func (pb *playback) replayReset(count int, newStepCh chan interface{}) error {
|
||||
pb.cs.Stop()
|
||||
pb.cs.Wait()
|
||||
|
||||
@@ -149,7 +161,7 @@ func (pb *playback) replayReset(count int, newStepSub types.Subscription) error
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pb.cs.readReplayMessage(msg, newStepSub); err != nil {
|
||||
if err := pb.cs.readReplayMessage(msg, newStepCh); err != nil {
|
||||
return err
|
||||
}
|
||||
pb.count++
|
||||
@@ -213,15 +225,27 @@ func (pb *playback) replayConsoleLoop() int {
|
||||
|
||||
ctx := context.Background()
|
||||
// ensure all new step events are regenerated as expected
|
||||
newStepCh := make(chan interface{}, 1)
|
||||
|
||||
newStepSub, err := pb.cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep)
|
||||
err := pb.cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep, newStepCh)
|
||||
if err != nil {
|
||||
cmn.Exit(fmt.Sprintf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep))
|
||||
}
|
||||
defer pb.cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep)
|
||||
defer func() {
|
||||
// drain newStepCh to make sure we don't block
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-newStepCh:
|
||||
default:
|
||||
break LOOP
|
||||
}
|
||||
}
|
||||
pb.cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep)
|
||||
}()
|
||||
|
||||
if len(tokens) == 1 {
|
||||
if err := pb.replayReset(1, newStepSub); err != nil {
|
||||
if err := pb.replayReset(1, newStepCh); err != nil {
|
||||
pb.cs.Logger.Error("Replay reset error", "err", err)
|
||||
}
|
||||
} else {
|
||||
@@ -231,7 +255,7 @@ func (pb *playback) replayConsoleLoop() int {
|
||||
} else if i > pb.count {
|
||||
fmt.Printf("argument to back must not be larger than the current count (%d)\n", pb.count)
|
||||
} else {
|
||||
if err := pb.replayReset(i, newStepSub); err != nil {
|
||||
if err := pb.replayReset(i, newStepCh); err != nil {
|
||||
pb.cs.Logger.Error("Replay reset error", "err", err)
|
||||
}
|
||||
}
|
||||
|
@@ -19,6 +19,7 @@ import (
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
auto "github.com/tendermint/tendermint/libs/autofile"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
@@ -77,14 +78,13 @@ func startNewConsensusStateAndWaitForBlock(t *testing.T, consensusReplayConfig *
|
||||
// in the WAL itself. Assuming the consensus state is running, replay of any
|
||||
// WAL, including the empty one, should eventually be followed by a new
|
||||
// block, or else something is wrong.
|
||||
newBlockSub, err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
|
||||
newBlockCh := make(chan interface{}, 1)
|
||||
err = cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, newBlockCh)
|
||||
require.NoError(t, err)
|
||||
select {
|
||||
case <-newBlockSub.Out():
|
||||
case <-newBlockSub.Cancelled():
|
||||
t.Fatal("newBlockSub was cancelled")
|
||||
case <-time.After(120 * time.Second):
|
||||
t.Fatal("Timed out waiting for new block (see trace above)")
|
||||
case <-newBlockCh:
|
||||
case <-time.After(60 * time.Second):
|
||||
t.Fatalf("Timed out waiting for new block (see trace above)")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -128,8 +128,8 @@ func TestWALCrash(t *testing.T) {
|
||||
|
||||
func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *cfg.Config,
|
||||
initFn func(dbm.DB, *ConsensusState, context.Context), heightToStop int64) {
|
||||
walPanicked := make(chan error)
|
||||
crashingWal := &crashingWAL{panicCh: walPanicked, heightToStop: heightToStop}
|
||||
walPaniced := make(chan error)
|
||||
crashingWal := &crashingWAL{panicCh: walPaniced, heightToStop: heightToStop}
|
||||
|
||||
i := 1
|
||||
LOOP:
|
||||
@@ -168,8 +168,8 @@ LOOP:
|
||||
i++
|
||||
|
||||
select {
|
||||
case err := <-walPanicked:
|
||||
t.Logf("WAL panicked: %v", err)
|
||||
case err := <-walPaniced:
|
||||
t.Logf("WAL paniced: %v", err)
|
||||
|
||||
// make sure we can make blocks after a crash
|
||||
startNewConsensusStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateDB)
|
||||
@@ -190,18 +190,16 @@ LOOP:
|
||||
|
||||
// crashingWAL is a WAL which crashes or rather simulates a crash during Save
|
||||
// (before and after). It remembers a message for which we last panicked
|
||||
// (lastPanickedForMsgIndex), so we don't panic for it in subsequent iterations.
|
||||
// (lastPanicedForMsgIndex), so we don't panic for it in subsequent iterations.
|
||||
type crashingWAL struct {
|
||||
next WAL
|
||||
panicCh chan error
|
||||
heightToStop int64
|
||||
|
||||
msgIndex int // current message index
|
||||
lastPanickedForMsgIndex int // last message for which we panicked
|
||||
msgIndex int // current message index
|
||||
lastPanicedForMsgIndex int // last message for which we panicked
|
||||
}
|
||||
|
||||
var _ WAL = &crashingWAL{}
|
||||
|
||||
// WALWriteError indicates a WAL crash.
|
||||
type WALWriteError struct {
|
||||
msg string
|
||||
@@ -234,8 +232,8 @@ func (w *crashingWAL) Write(m WALMessage) {
|
||||
return
|
||||
}
|
||||
|
||||
if w.msgIndex > w.lastPanickedForMsgIndex {
|
||||
w.lastPanickedForMsgIndex = w.msgIndex
|
||||
if w.msgIndex > w.lastPanicedForMsgIndex {
|
||||
w.lastPanicedForMsgIndex = w.msgIndex
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
w.panicCh <- WALWriteError{fmt.Sprintf("failed to write %T to WAL (fileline: %s:%d)", m, file, line)}
|
||||
runtime.Goexit()
|
||||
@@ -249,9 +247,8 @@ func (w *crashingWAL) WriteSync(m WALMessage) {
|
||||
w.Write(m)
|
||||
}
|
||||
|
||||
func (w *crashingWAL) FlushAndSync() error { return w.next.FlushAndSync() }
|
||||
|
||||
func (w *crashingWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) {
|
||||
func (w *crashingWAL) Group() *auto.Group { return w.next.Group() }
|
||||
func (w *crashingWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) {
|
||||
return w.next.SearchForEndHeight(height, options)
|
||||
}
|
||||
|
||||
|
@@ -909,9 +909,6 @@ func (cs *ConsensusState) defaultDecideProposal(height int64, round int) {
|
||||
}
|
||||
}
|
||||
|
||||
// Flush the WAL. Otherwise, we may not recompute the same proposal to sign, and the privValidator will refuse to sign anything.
|
||||
cs.wal.FlushAndSync()
|
||||
|
||||
// Make proposal
|
||||
propBlockId := types.BlockID{Hash: block.Hash(), PartsHeader: blockParts.Header()}
|
||||
proposal := types.NewProposal(height, round, cs.ValidRound, propBlockId)
|
||||
@@ -1677,9 +1674,6 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
|
||||
}
|
||||
|
||||
func (cs *ConsensusState) signVote(type_ types.SignedMsgType, hash []byte, header types.PartSetHeader) (*types.Vote, error) {
|
||||
// Flush the WAL. Otherwise, we may not recompute the same vote to sign, and the privValidator will refuse to sign anything.
|
||||
cs.wal.FlushAndSync()
|
||||
|
||||
addr := cs.privValidator.GetPubKey().Address()
|
||||
valIndex, _ := cs.Validators.GetByAddress(addr)
|
||||
|
||||
|
@@ -1620,10 +1620,11 @@ func TestStateOutputVoteStats(t *testing.T) {
|
||||
}
|
||||
|
||||
// subscribe subscribes test client to the given query and returns a channel with cap = 1.
|
||||
func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message {
|
||||
sub, err := eventBus.Subscribe(context.Background(), testSubscriber, q)
|
||||
func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan interface{} {
|
||||
out := make(chan interface{}, 1)
|
||||
err := eventBus.Subscribe(context.Background(), testSubscriber, q, out)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q))
|
||||
}
|
||||
return sub.Out()
|
||||
return out
|
||||
}
|
||||
|
@@ -21,9 +21,6 @@ import (
|
||||
const (
|
||||
// must be greater than types.BlockPartSizeBytes + a few bytes
|
||||
maxMsgSizeBytes = 1024 * 1024 // 1MB
|
||||
|
||||
// how often the WAL should be sync'd during period sync'ing
|
||||
walDefaultFlushInterval = 2 * time.Second
|
||||
)
|
||||
|
||||
//--------------------------------------------------------
|
||||
@@ -57,36 +54,26 @@ func RegisterWALMessages(cdc *amino.Codec) {
|
||||
type WAL interface {
|
||||
Write(WALMessage)
|
||||
WriteSync(WALMessage)
|
||||
FlushAndSync() error
|
||||
Group() *auto.Group
|
||||
SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error)
|
||||
|
||||
SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error)
|
||||
|
||||
// service methods
|
||||
Start() error
|
||||
Stop() error
|
||||
Wait()
|
||||
}
|
||||
|
||||
// Write ahead logger writes msgs to disk before they are processed.
|
||||
// Can be used for crash-recovery and deterministic replay.
|
||||
// TODO: currently the wal is overwritten during replay catchup, give it a mode
|
||||
// so it's either reading or appending - must read to end to start appending
|
||||
// again.
|
||||
// Can be used for crash-recovery and deterministic replay
|
||||
// TODO: currently the wal is overwritten during replay catchup
|
||||
// give it a mode so it's either reading or appending - must read to end to start appending again
|
||||
type baseWAL struct {
|
||||
cmn.BaseService
|
||||
|
||||
group *auto.Group
|
||||
|
||||
enc *WALEncoder
|
||||
|
||||
flushTicker *time.Ticker
|
||||
flushInterval time.Duration
|
||||
}
|
||||
|
||||
var _ WAL = &baseWAL{}
|
||||
|
||||
// NewWAL returns a new write-ahead logger based on `baseWAL`, which implements
|
||||
// WAL. It's flushed and synced to disk every 2s and once when stopped.
|
||||
func NewWAL(walFile string, groupOptions ...func(*auto.Group)) (*baseWAL, error) {
|
||||
err := cmn.EnsureDir(filepath.Dir(walFile), 0700)
|
||||
if err != nil {
|
||||
@@ -98,19 +85,13 @@ func NewWAL(walFile string, groupOptions ...func(*auto.Group)) (*baseWAL, error)
|
||||
return nil, err
|
||||
}
|
||||
wal := &baseWAL{
|
||||
group: group,
|
||||
enc: NewWALEncoder(group),
|
||||
flushInterval: walDefaultFlushInterval,
|
||||
group: group,
|
||||
enc: NewWALEncoder(group),
|
||||
}
|
||||
wal.BaseService = *cmn.NewBaseService(nil, "baseWAL", wal)
|
||||
return wal, nil
|
||||
}
|
||||
|
||||
// SetFlushInterval allows us to override the periodic flush interval for the WAL.
|
||||
func (wal *baseWAL) SetFlushInterval(i time.Duration) {
|
||||
wal.flushInterval = i
|
||||
}
|
||||
|
||||
func (wal *baseWAL) Group() *auto.Group {
|
||||
return wal.group
|
||||
}
|
||||
@@ -128,39 +109,14 @@ func (wal *baseWAL) OnStart() error {
|
||||
wal.WriteSync(EndHeightMessage{0})
|
||||
}
|
||||
err = wal.group.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wal.flushTicker = time.NewTicker(wal.flushInterval)
|
||||
go wal.processFlushTicks()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (wal *baseWAL) processFlushTicks() {
|
||||
for {
|
||||
select {
|
||||
case <-wal.flushTicker.C:
|
||||
if err := wal.FlushAndSync(); err != nil {
|
||||
wal.Logger.Error("Periodic WAL flush failed", "err", err)
|
||||
}
|
||||
case <-wal.Quit():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FlushAndSync flushes and fsync's the underlying group's data to disk.
|
||||
// See auto#FlushAndSync
|
||||
func (wal *baseWAL) FlushAndSync() error {
|
||||
return wal.group.FlushAndSync()
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop the underlying autofile group.
|
||||
// Use Wait() to ensure it's finished shutting down
|
||||
// before cleaning up files.
|
||||
func (wal *baseWAL) OnStop() {
|
||||
wal.flushTicker.Stop()
|
||||
wal.FlushAndSync()
|
||||
wal.group.Flush()
|
||||
wal.group.Stop()
|
||||
wal.group.Close()
|
||||
}
|
||||
@@ -194,7 +150,7 @@ func (wal *baseWAL) WriteSync(msg WALMessage) {
|
||||
}
|
||||
|
||||
wal.Write(msg)
|
||||
if err := wal.FlushAndSync(); err != nil {
|
||||
if err := wal.group.Flush(); err != nil {
|
||||
panic(fmt.Sprintf("Error flushing consensus wal buf to file. Error: %v \n", err))
|
||||
}
|
||||
}
|
||||
@@ -210,11 +166,8 @@ type WALSearchOptions struct {
|
||||
// Group reader will be nil if found equals false.
|
||||
//
|
||||
// CONTRACT: caller must close group reader.
|
||||
func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) {
|
||||
var (
|
||||
msg *TimedWALMessage
|
||||
gr *auto.GroupReader
|
||||
)
|
||||
func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) {
|
||||
var msg *TimedWALMessage
|
||||
lastHeightFound := int64(-1)
|
||||
|
||||
// NOTE: starting from the last file in the group because we're usually
|
||||
@@ -381,12 +334,10 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) {
|
||||
|
||||
type nilWAL struct{}
|
||||
|
||||
var _ WAL = nilWAL{}
|
||||
|
||||
func (nilWAL) Write(m WALMessage) {}
|
||||
func (nilWAL) WriteSync(m WALMessage) {}
|
||||
func (nilWAL) FlushAndSync() error { return nil }
|
||||
func (nilWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) {
|
||||
func (nilWAL) Group() *auto.Group { return nil }
|
||||
func (nilWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) {
|
||||
return nil, false, nil
|
||||
}
|
||||
func (nilWAL) Start() error { return nil }
|
||||
|
@@ -5,6 +5,7 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
auto "github.com/tendermint/tendermint/libs/autofile"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -22,12 +24,13 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// WALGenerateNBlocks generates a consensus WAL. It does this by spinning up a
|
||||
// WALGenerateNBlocks generates a consensus WAL. It does this by spining up a
|
||||
// stripped down version of node (proxy app, event bus, consensus state) with a
|
||||
// persistent kvstore application and special consensus wal instance
|
||||
// (byteBufferWAL) and waits until numBlocks are created. If the node fails to produce given numBlocks, it returns an error.
|
||||
func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
|
||||
config := getConfig(t)
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
|
||||
app := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), "wal_generator"))
|
||||
|
||||
@@ -191,9 +194,10 @@ func (w *byteBufferWAL) WriteSync(m WALMessage) {
|
||||
w.Write(m)
|
||||
}
|
||||
|
||||
func (w *byteBufferWAL) FlushAndSync() error { return nil }
|
||||
|
||||
func (w *byteBufferWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) {
|
||||
func (w *byteBufferWAL) Group() *auto.Group {
|
||||
panic("not implemented")
|
||||
}
|
||||
func (w *byteBufferWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
|
@@ -3,6 +3,7 @@ package consensus
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -21,10 +22,6 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
walTestFlushInterval = time.Duration(100) * time.Millisecond
|
||||
)
|
||||
|
||||
func TestWALTruncate(t *testing.T) {
|
||||
walDir, err := ioutil.TempDir("", "wal")
|
||||
require.NoError(t, err)
|
||||
@@ -32,10 +29,8 @@ func TestWALTruncate(t *testing.T) {
|
||||
|
||||
walFile := filepath.Join(walDir, "wal")
|
||||
|
||||
// this magic number 4K can truncate the content when RotateFile.
|
||||
// defaultHeadSizeLimit(10M) is hard to simulate.
|
||||
// this magic number 1 * time.Millisecond make RotateFile check frequently.
|
||||
// defaultGroupCheckDuration(5s) is hard to simulate.
|
||||
//this magic number 4K can truncate the content when RotateFile. defaultHeadSizeLimit(10M) is hard to simulate.
|
||||
//this magic number 1 * time.Millisecond make RotateFile check frequently. defaultGroupCheckDuration(5s) is hard to simulate.
|
||||
wal, err := NewWAL(walFile,
|
||||
autofile.GroupHeadSizeLimit(4096),
|
||||
autofile.GroupCheckDuration(1*time.Millisecond),
|
||||
@@ -51,21 +46,20 @@ func TestWALTruncate(t *testing.T) {
|
||||
wal.Wait()
|
||||
}()
|
||||
|
||||
// 60 block's size nearly 70K, greater than group's headBuf size(4096 * 10),
|
||||
// when headBuf is full, truncate content will Flush to the file. at this
|
||||
// time, RotateFile is called, truncate content exist in each file.
|
||||
//60 block's size nearly 70K, greater than group's headBuf size(4096 * 10), when headBuf is full, truncate content will Flush to the file.
|
||||
//at this time, RotateFile is called, truncate content exist in each file.
|
||||
err = WALGenerateNBlocks(t, wal.Group(), 60)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(1 * time.Millisecond) //wait groupCheckDuration, make sure RotateFile run
|
||||
|
||||
wal.FlushAndSync()
|
||||
wal.Group().Flush()
|
||||
|
||||
h := int64(50)
|
||||
gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{})
|
||||
assert.NoError(t, err, "expected not to err on height %d", h)
|
||||
assert.True(t, found, "expected to find end height for %d", h)
|
||||
assert.NotNil(t, gr)
|
||||
assert.NoError(t, err, fmt.Sprintf("expected not to err on height %d", h))
|
||||
assert.True(t, found, fmt.Sprintf("expected to find end height for %d", h))
|
||||
assert.NotNil(t, gr, "expected group not to be nil")
|
||||
defer gr.Close()
|
||||
|
||||
dec := NewWALDecoder(gr)
|
||||
@@ -73,7 +67,7 @@ func TestWALTruncate(t *testing.T) {
|
||||
assert.NoError(t, err, "expected to decode a message")
|
||||
rs, ok := msg.Msg.(tmtypes.EventDataRoundState)
|
||||
assert.True(t, ok, "expected message of type EventDataRoundState")
|
||||
assert.Equal(t, rs.Height, h+1, "wrong height")
|
||||
assert.Equal(t, rs.Height, h+1, fmt.Sprintf("wrong height"))
|
||||
}
|
||||
|
||||
func TestWALEncoderDecoder(t *testing.T) {
|
||||
@@ -134,9 +128,9 @@ func TestWALSearchForEndHeight(t *testing.T) {
|
||||
|
||||
h := int64(3)
|
||||
gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{})
|
||||
assert.NoError(t, err, "expected not to err on height %d", h)
|
||||
assert.True(t, found, "expected to find end height for %d", h)
|
||||
assert.NotNil(t, gr)
|
||||
assert.NoError(t, err, fmt.Sprintf("expected not to err on height %d", h))
|
||||
assert.True(t, found, fmt.Sprintf("expected to find end height for %d", h))
|
||||
assert.NotNil(t, gr, "expected group not to be nil")
|
||||
defer gr.Close()
|
||||
|
||||
dec := NewWALDecoder(gr)
|
||||
@@ -144,47 +138,7 @@ func TestWALSearchForEndHeight(t *testing.T) {
|
||||
assert.NoError(t, err, "expected to decode a message")
|
||||
rs, ok := msg.Msg.(tmtypes.EventDataRoundState)
|
||||
assert.True(t, ok, "expected message of type EventDataRoundState")
|
||||
assert.Equal(t, rs.Height, h+1, "wrong height")
|
||||
}
|
||||
|
||||
func TestWALPeriodicSync(t *testing.T) {
|
||||
walDir, err := ioutil.TempDir("", "wal")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(walDir)
|
||||
|
||||
walFile := filepath.Join(walDir, "wal")
|
||||
wal, err := NewWAL(walFile, autofile.GroupCheckDuration(1*time.Millisecond))
|
||||
require.NoError(t, err)
|
||||
|
||||
wal.SetFlushInterval(walTestFlushInterval)
|
||||
wal.SetLogger(log.TestingLogger())
|
||||
|
||||
// Generate some data
|
||||
err = WALGenerateNBlocks(t, wal.Group(), 5)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have data in the buffer now
|
||||
assert.NotZero(t, wal.Group().Buffered())
|
||||
|
||||
require.NoError(t, wal.Start())
|
||||
defer func() {
|
||||
wal.Stop()
|
||||
wal.Wait()
|
||||
}()
|
||||
|
||||
time.Sleep(walTestFlushInterval + (10 * time.Millisecond))
|
||||
|
||||
// The data should have been flushed by the periodic sync
|
||||
assert.Zero(t, wal.Group().Buffered())
|
||||
|
||||
h := int64(4)
|
||||
gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{})
|
||||
assert.NoError(t, err, "expected not to err on height %d", h)
|
||||
assert.True(t, found, "expected to find end height for %d", h)
|
||||
assert.NotNil(t, gr)
|
||||
if gr != nil {
|
||||
gr.Close()
|
||||
}
|
||||
assert.Equal(t, rs.Height, h+1, fmt.Sprintf("wrong height"))
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -63,8 +63,7 @@ module.exports = {
|
||||
"/tendermint-core/light-client-protocol",
|
||||
"/tendermint-core/metrics",
|
||||
"/tendermint-core/secure-p2p",
|
||||
"/tendermint-core/validators",
|
||||
"/tendermint-core/mempool"
|
||||
"/tendermint-core/validators"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@@ -66,12 +66,11 @@ To build and serve the documentation locally, run:
|
||||
|
||||
```
|
||||
# from this directory
|
||||
npm install
|
||||
npm install -g vuepress
|
||||
```
|
||||
|
||||
NOTE: the command may require `sudo`.
|
||||
|
||||
then change the following line in the `.vuepress/config.js`:
|
||||
then change the following line in the `config.js`:
|
||||
|
||||
```
|
||||
base: "/docs/",
|
||||
|
@@ -89,14 +89,12 @@ func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
cmn.TrapSignal(logger, func() {
|
||||
// Wait forever
|
||||
cmn.TrapSignal(func() {
|
||||
// Cleanup
|
||||
srv.Stop()
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
@@ -240,14 +238,12 @@ func cmdCounter(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
cmn.TrapSignal(logger, func() {
|
||||
// Wait forever
|
||||
cmn.TrapSignal(func() {
|
||||
// Cleanup
|
||||
srv.Stop()
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
|
@@ -40,4 +40,4 @@ However, we only store valid txs in the cache, not invalid ones.
|
||||
This is because invalid txs could become good later.
|
||||
Txs that are included in a block aren't removed from the cache,
|
||||
as they still may be getting received over the p2p network.
|
||||
These txs are stored in the cache by their hash, to mitigate memory concerns.
|
||||
These txs are stored in the cache by their hash, to mitigate memory concerns.
|
@@ -183,15 +183,10 @@ recheck = true
|
||||
broadcast = true
|
||||
wal_dir = ""
|
||||
|
||||
# Maximum number of transactions in the mempool
|
||||
# size of the mempool
|
||||
size = 5000
|
||||
|
||||
# Limit the total size of all txs in the mempool.
|
||||
# This only accounts for raw transactions (e.g. given 1MB transactions and
|
||||
# max_txs_bytes=5MB, mempool will only accept 5 transactions).
|
||||
max_txs_bytes = 1073741824
|
||||
|
||||
# Size of the cache (used to filter transactions we saw earlier) in transactions
|
||||
# size of the cache (used to filter transactions we saw earlier)
|
||||
cache_size = 10000
|
||||
|
||||
##### consensus configuration options #####
|
||||
@@ -268,74 +263,3 @@ max_open_connections = 3
|
||||
# Instrumentation namespace
|
||||
namespace = "tendermint"
|
||||
```
|
||||
|
||||
## Empty blocks VS no empty blocks
|
||||
|
||||
**create_empty_blocks = true**
|
||||
|
||||
If `create_empty_blocks` is set to `true` in your config, blocks will be
|
||||
created ~ every second (with default consensus parameters). You can regulate
|
||||
the delay between blocks by changing the `timeout_commit`. E.g. `timeout_commit
|
||||
= "10s"` should result in ~ 10 second blocks.
|
||||
|
||||
**create_empty_blocks = false**
|
||||
|
||||
In this setting, blocks are created when transactions received.
|
||||
|
||||
Note after the block H, Tendermint creates something we call a "proof block"
|
||||
(only if the application hash changed) H+1. The reason for this is to support
|
||||
proofs. If you have a transaction in block H that changes the state to X, the
|
||||
new application hash will only be included in block H+1. If after your
|
||||
transaction is committed, you want to get a lite-client proof for the new state
|
||||
(X), you need the new block to be committed in order to do that because the new
|
||||
block has the new application hash for the state X. That's why we make a new
|
||||
(empty) block if the application hash changes. Otherwise, you won't be able to
|
||||
make a proof for the new state.
|
||||
|
||||
Plus, if you set `create_empty_blocks_interval` to something other than the
|
||||
default (`0`), Tendermint will be creating empty blocks even in the absence of
|
||||
transactions every `create_empty_blocks_interval`. For instance, with
|
||||
`create_empty_blocks = false` and `create_empty_blocks_interval = "30s"`,
|
||||
Tendermint will only create blocks if there are transactions, or after waiting
|
||||
30 seconds without receiving any transactions.
|
||||
|
||||
## Consensus timeouts explained
|
||||
|
||||
There's a variety of information about timeouts in [Running in
|
||||
production](./running-in-production.html)
|
||||
|
||||
You can also find more detailed technical explanation in the spec: [The latest
|
||||
gossip on BFT consensus](https://arxiv.org/abs/1807.04938).
|
||||
|
||||
```
|
||||
[consensus]
|
||||
...
|
||||
|
||||
timeout_propose = "3s"
|
||||
timeout_propose_delta = "500ms"
|
||||
timeout_prevote = "1s"
|
||||
timeout_prevote_delta = "500ms"
|
||||
timeout_precommit = "1s"
|
||||
timeout_precommit_delta = "500ms"
|
||||
timeout_commit = "1s"
|
||||
```
|
||||
|
||||
Note that in a successful round, the only timeout that we absolutely wait no
|
||||
matter what is `timeout_commit`.
|
||||
|
||||
Here's a brief summary of the timeouts:
|
||||
|
||||
- `timeout_propose` = how long we wait for a proposal block before prevoting
|
||||
nil
|
||||
- `timeout_propose_delta` = how much timeout_propose increases with each round
|
||||
- `timeout_prevote` = how long we wait after receiving +2/3 prevotes for
|
||||
anything (ie. not a single block or nil)
|
||||
- `timeout_prevote_delta` = how much the timeout_prevote increases with each
|
||||
round
|
||||
- `timeout_precommit` = how long we wait after receiving +2/3 precommits for
|
||||
anything (ie. not a single block or nil)
|
||||
- `timeout_precommit_delta` = how much the timeout_precommit increases with
|
||||
each round
|
||||
- `timeout_commit` = how long we wait after committing a block, before starting
|
||||
on the new height (this gives us a chance to receive some more precommits,
|
||||
even though we already have +2/3)
|
||||
|
@@ -1,41 +0,0 @@
|
||||
# Mempool
|
||||
|
||||
## Transaction ordering
|
||||
|
||||
Currently, there's no ordering of transactions other than the order they've
|
||||
arrived (via RPC or from other nodes).
|
||||
|
||||
So the only way to specify the order is to send them to a single node.
|
||||
|
||||
valA:
|
||||
- tx1
|
||||
- tx2
|
||||
- tx3
|
||||
|
||||
If the transactions are split up across different nodes, there's no way to
|
||||
ensure they are processed in the expected order.
|
||||
|
||||
valA:
|
||||
- tx1
|
||||
- tx2
|
||||
|
||||
valB:
|
||||
- tx3
|
||||
|
||||
If valB is the proposer, the order might be:
|
||||
|
||||
- tx3
|
||||
- tx1
|
||||
- tx2
|
||||
|
||||
If valA is the proposer, the order might be:
|
||||
|
||||
- tx1
|
||||
- tx2
|
||||
- tx3
|
||||
|
||||
That said, if the transactions contain some internal value, like an
|
||||
order/nonce/sequence number, the application can reject transactions that are
|
||||
out of order. So if a node receives tx3, then tx1, it can reject tx3 and then
|
||||
accept tx1. The sender can then retry sending tx3, which should probably be
|
||||
rejected until the node has seen tx2.
|
@@ -29,21 +29,7 @@ func parseFlags() (headPath string, chopSize int64, limitSize int64, version boo
|
||||
return
|
||||
}
|
||||
|
||||
type fmtLogger struct{}
|
||||
|
||||
func (fmtLogger) Info(msg string, keyvals ...interface{}) {
|
||||
strs := make([]string, len(keyvals))
|
||||
for i, kv := range keyvals {
|
||||
strs[i] = fmt.Sprintf("%v", kv)
|
||||
}
|
||||
fmt.Printf("%s %s\n", msg, strings.Join(strs, ","))
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
cmn.TrapSignal(fmtLogger{}, func() {
|
||||
fmt.Println("logjack shutting down")
|
||||
})
|
||||
|
||||
// Read options
|
||||
headPath, chopSize, limitSize, version := parseFlags()
|
||||
@@ -65,22 +51,29 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Forever read from stdin and write to AutoFile.
|
||||
buf := make([]byte, readBufferSize)
|
||||
for {
|
||||
n, err := os.Stdin.Read(buf)
|
||||
group.Write(buf[:n])
|
||||
group.FlushAndSync()
|
||||
if err != nil {
|
||||
group.Stop()
|
||||
if err == io.EOF {
|
||||
os.Exit(0)
|
||||
} else {
|
||||
fmt.Println("logjack errored")
|
||||
os.Exit(1)
|
||||
go func() {
|
||||
// Forever, read from stdin and write to AutoFile.
|
||||
buf := make([]byte, readBufferSize)
|
||||
for {
|
||||
n, err := os.Stdin.Read(buf)
|
||||
group.Write(buf[:n])
|
||||
group.Flush()
|
||||
if err != nil {
|
||||
group.Stop()
|
||||
if err == io.EOF {
|
||||
os.Exit(0)
|
||||
} else {
|
||||
fmt.Println("logjack errored")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Trap signal
|
||||
cmn.TrapSignal(func() {
|
||||
fmt.Println("logjack shutting down")
|
||||
})
|
||||
}
|
||||
|
||||
func parseBytesize(chopSize string) int64 {
|
||||
|
@@ -131,23 +131,21 @@ func GroupTotalSizeLimit(limit int64) func(*Group) {
|
||||
}
|
||||
}
|
||||
|
||||
// OnStart implements cmn.Service by starting the goroutine that checks file
|
||||
// and group limits.
|
||||
// OnStart implements Service by starting the goroutine that checks file and
|
||||
// group limits.
|
||||
func (g *Group) OnStart() error {
|
||||
g.ticker = time.NewTicker(g.groupCheckDuration)
|
||||
go g.processTicks()
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop implements cmn.Service by stopping the goroutine described above.
|
||||
// OnStop implements Service by stopping the goroutine described above.
|
||||
// NOTE: g.Head must be closed separately using Close.
|
||||
func (g *Group) OnStop() {
|
||||
g.ticker.Stop()
|
||||
g.FlushAndSync()
|
||||
g.Flush() // flush any uncommitted data
|
||||
}
|
||||
|
||||
// Wait blocks until all internal goroutines are finished. Supposed to be
|
||||
// called after Stop.
|
||||
func (g *Group) Wait() {
|
||||
// wait for processTicks routine to finish
|
||||
<-g.doneProcessTicks
|
||||
@@ -155,7 +153,7 @@ func (g *Group) Wait() {
|
||||
|
||||
// Close closes the head file. The group must be stopped by this moment.
|
||||
func (g *Group) Close() {
|
||||
g.FlushAndSync()
|
||||
g.Flush() // flush any uncommitted data
|
||||
|
||||
g.mtx.Lock()
|
||||
_ = g.Head.closeFile()
|
||||
@@ -211,16 +209,9 @@ func (g *Group) WriteLine(line string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Buffered returns the size of the currently buffered data.
|
||||
func (g *Group) Buffered() int {
|
||||
g.mtx.Lock()
|
||||
defer g.mtx.Unlock()
|
||||
return g.headBuf.Buffered()
|
||||
}
|
||||
|
||||
// FlushAndSync writes any buffered data to the underlying file and commits the
|
||||
// current content of the file to stable storage (fsync).
|
||||
func (g *Group) FlushAndSync() error {
|
||||
// Flush writes any buffered data to the underlying file and commits the
|
||||
// current content of the file to stable storage.
|
||||
func (g *Group) Flush() error {
|
||||
g.mtx.Lock()
|
||||
defer g.mtx.Unlock()
|
||||
err := g.headBuf.Flush()
|
||||
|
@@ -55,7 +55,7 @@ func TestCheckHeadSizeLimit(t *testing.T) {
|
||||
err := g.WriteLine(cmn.RandStr(999))
|
||||
require.NoError(t, err, "Error appending to head")
|
||||
}
|
||||
g.FlushAndSync()
|
||||
g.Flush()
|
||||
assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000)
|
||||
|
||||
// Even calling checkHeadSizeLimit manually won't rotate it.
|
||||
@@ -65,7 +65,7 @@ func TestCheckHeadSizeLimit(t *testing.T) {
|
||||
// Write 1000 more bytes.
|
||||
err := g.WriteLine(cmn.RandStr(999))
|
||||
require.NoError(t, err, "Error appending to head")
|
||||
g.FlushAndSync()
|
||||
g.Flush()
|
||||
|
||||
// Calling checkHeadSizeLimit this time rolls it.
|
||||
g.checkHeadSizeLimit()
|
||||
@@ -74,7 +74,7 @@ func TestCheckHeadSizeLimit(t *testing.T) {
|
||||
// Write 1000 more bytes.
|
||||
err = g.WriteLine(cmn.RandStr(999))
|
||||
require.NoError(t, err, "Error appending to head")
|
||||
g.FlushAndSync()
|
||||
g.Flush()
|
||||
|
||||
// Calling checkHeadSizeLimit does nothing.
|
||||
g.checkHeadSizeLimit()
|
||||
@@ -85,7 +85,7 @@ func TestCheckHeadSizeLimit(t *testing.T) {
|
||||
err = g.WriteLine(cmn.RandStr(999))
|
||||
require.NoError(t, err, "Error appending to head")
|
||||
}
|
||||
g.FlushAndSync()
|
||||
g.Flush()
|
||||
assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 2000000, 1000000)
|
||||
|
||||
// Calling checkHeadSizeLimit rolls it again.
|
||||
@@ -95,7 +95,7 @@ func TestCheckHeadSizeLimit(t *testing.T) {
|
||||
// Write 1000 more bytes.
|
||||
_, err = g.Head.Write([]byte(cmn.RandStr(999) + "\n"))
|
||||
require.NoError(t, err, "Error appending to head")
|
||||
g.FlushAndSync()
|
||||
g.Flush()
|
||||
assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2001000, 1000)
|
||||
|
||||
// Calling checkHeadSizeLimit does nothing.
|
||||
@@ -212,12 +212,12 @@ func TestRotateFile(t *testing.T) {
|
||||
g.WriteLine("Line 1")
|
||||
g.WriteLine("Line 2")
|
||||
g.WriteLine("Line 3")
|
||||
g.FlushAndSync()
|
||||
g.Flush()
|
||||
g.RotateFile()
|
||||
g.WriteLine("Line 4")
|
||||
g.WriteLine("Line 5")
|
||||
g.WriteLine("Line 6")
|
||||
g.FlushAndSync()
|
||||
g.Flush()
|
||||
|
||||
// Read g.Head.Path+"000"
|
||||
body1, err := ioutil.ReadFile(g.Head.Path + ".000")
|
||||
@@ -244,13 +244,13 @@ func TestFindLast1(t *testing.T) {
|
||||
g.WriteLine("Line 2")
|
||||
g.WriteLine("# a")
|
||||
g.WriteLine("Line 3")
|
||||
g.FlushAndSync()
|
||||
g.Flush()
|
||||
g.RotateFile()
|
||||
g.WriteLine("Line 4")
|
||||
g.WriteLine("Line 5")
|
||||
g.WriteLine("Line 6")
|
||||
g.WriteLine("# b")
|
||||
g.FlushAndSync()
|
||||
g.Flush()
|
||||
|
||||
match, found, err := g.FindLast("#")
|
||||
assert.NoError(t, err)
|
||||
@@ -267,14 +267,14 @@ func TestFindLast2(t *testing.T) {
|
||||
g.WriteLine("Line 1")
|
||||
g.WriteLine("Line 2")
|
||||
g.WriteLine("Line 3")
|
||||
g.FlushAndSync()
|
||||
g.Flush()
|
||||
g.RotateFile()
|
||||
g.WriteLine("# a")
|
||||
g.WriteLine("Line 4")
|
||||
g.WriteLine("Line 5")
|
||||
g.WriteLine("# b")
|
||||
g.WriteLine("Line 6")
|
||||
g.FlushAndSync()
|
||||
g.Flush()
|
||||
|
||||
match, found, err := g.FindLast("#")
|
||||
assert.NoError(t, err)
|
||||
@@ -293,12 +293,12 @@ func TestFindLast3(t *testing.T) {
|
||||
g.WriteLine("Line 2")
|
||||
g.WriteLine("# b")
|
||||
g.WriteLine("Line 3")
|
||||
g.FlushAndSync()
|
||||
g.Flush()
|
||||
g.RotateFile()
|
||||
g.WriteLine("Line 4")
|
||||
g.WriteLine("Line 5")
|
||||
g.WriteLine("Line 6")
|
||||
g.FlushAndSync()
|
||||
g.Flush()
|
||||
|
||||
match, found, err := g.FindLast("#")
|
||||
assert.NoError(t, err)
|
||||
@@ -315,12 +315,12 @@ func TestFindLast4(t *testing.T) {
|
||||
g.WriteLine("Line 1")
|
||||
g.WriteLine("Line 2")
|
||||
g.WriteLine("Line 3")
|
||||
g.FlushAndSync()
|
||||
g.Flush()
|
||||
g.RotateFile()
|
||||
g.WriteLine("Line 4")
|
||||
g.WriteLine("Line 5")
|
||||
g.WriteLine("Line 6")
|
||||
g.FlushAndSync()
|
||||
g.Flush()
|
||||
|
||||
match, found, err := g.FindLast("#")
|
||||
assert.NoError(t, err)
|
||||
@@ -336,7 +336,7 @@ func TestWrite(t *testing.T) {
|
||||
|
||||
written := []byte("Medusa")
|
||||
g.Write(written)
|
||||
g.FlushAndSync()
|
||||
g.Flush()
|
||||
|
||||
read := make([]byte, len(written))
|
||||
gr, err := g.NewReader(0)
|
||||
@@ -357,11 +357,11 @@ func TestGroupReaderRead(t *testing.T) {
|
||||
|
||||
professor := []byte("Professor Monster")
|
||||
g.Write(professor)
|
||||
g.FlushAndSync()
|
||||
g.Flush()
|
||||
g.RotateFile()
|
||||
frankenstein := []byte("Frankenstein's Monster")
|
||||
g.Write(frankenstein)
|
||||
g.FlushAndSync()
|
||||
g.Flush()
|
||||
|
||||
totalWrittenLength := len(professor) + len(frankenstein)
|
||||
read := make([]byte, totalWrittenLength)
|
||||
@@ -386,12 +386,12 @@ func TestGroupReaderRead2(t *testing.T) {
|
||||
|
||||
professor := []byte("Professor Monster")
|
||||
g.Write(professor)
|
||||
g.FlushAndSync()
|
||||
g.Flush()
|
||||
g.RotateFile()
|
||||
frankenstein := []byte("Frankenstein's Monster")
|
||||
frankensteinPart := []byte("Frankenstein")
|
||||
g.Write(frankensteinPart) // note writing only a part
|
||||
g.FlushAndSync()
|
||||
g.Flush()
|
||||
|
||||
totalLength := len(professor) + len(frankenstein)
|
||||
read := make([]byte, totalLength)
|
||||
@@ -427,7 +427,7 @@ func TestMaxIndex(t *testing.T) {
|
||||
assert.Zero(t, g.MaxIndex(), "MaxIndex should be zero at the beginning")
|
||||
|
||||
g.WriteLine("Line 1")
|
||||
g.FlushAndSync()
|
||||
g.Flush()
|
||||
g.RotateFile()
|
||||
|
||||
assert.Equal(t, 1, g.MaxIndex(), "MaxIndex should point to the last file")
|
||||
|
@@ -34,24 +34,21 @@ func GoPath() string {
|
||||
return path
|
||||
}
|
||||
|
||||
type logger interface {
|
||||
Info(msg string, keyvals ...interface{})
|
||||
}
|
||||
|
||||
// TrapSignal catches the SIGTERM/SIGINT and executes cb function. After that it exits
|
||||
// with code 0.
|
||||
func TrapSignal(logger logger, cb func()) {
|
||||
// TrapSignal catches the SIGTERM and executes cb function. After that it exits
|
||||
// with code 1.
|
||||
func TrapSignal(cb func()) {
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
go func() {
|
||||
for sig := range c {
|
||||
logger.Info(fmt.Sprintf("captured %v, exiting...", sig))
|
||||
fmt.Printf("captured %v, exiting...\n", sig)
|
||||
if cb != nil {
|
||||
cb()
|
||||
}
|
||||
os.Exit(0)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
select {}
|
||||
}
|
||||
|
||||
// Kill the running process by sending itself SIGTERM.
|
||||
|
@@ -5,7 +5,7 @@ import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestOSGoPath(t *testing.T) {
|
||||
func TestGoPath(t *testing.T) {
|
||||
// restore original gopath upon exit
|
||||
path := os.Getenv("GOPATH")
|
||||
defer func() {
|
||||
@@ -28,7 +28,7 @@ func TestOSGoPath(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestOSGoPathWithoutEnvVar(t *testing.T) {
|
||||
func TestGoPathWithoutEnvVar(t *testing.T) {
|
||||
// restore original gopath upon exit
|
||||
path := os.Getenv("GOPATH")
|
||||
defer func() {
|
||||
|
@@ -179,6 +179,11 @@ func (mBatch *cLevelDBBatch) WriteSync() {
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
func (mBatch *cLevelDBBatch) Close() {
|
||||
mBatch.batch.Close()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Iterator
|
||||
// NOTE This is almost identical to db/go_level_db.Iterator
|
||||
|
@@ -250,3 +250,8 @@ func (dbch debugBatch) WriteSync() {
|
||||
fmt.Printf("%v.batch.WriteSync()\n", dbch.label)
|
||||
dbch.bch.WriteSync()
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
func (dbch debugBatch) Close() {
|
||||
dbch.bch.Close()
|
||||
}
|
||||
|
@@ -184,6 +184,10 @@ func (mBatch *goLevelDBBatch) WriteSync() {
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
// Close is no-op for goLevelDBBatch.
|
||||
func (mBatch *goLevelDBBatch) Close() {}
|
||||
|
||||
//----------------------------------------
|
||||
// Iterator
|
||||
// NOTE This is almost identical to db/c_level_db.Iterator
|
||||
|
@@ -46,6 +46,10 @@ func (mBatch *memBatch) WriteSync() {
|
||||
mBatch.write(true)
|
||||
}
|
||||
|
||||
func (mBatch *memBatch) Close() {
|
||||
mBatch.ops = nil
|
||||
}
|
||||
|
||||
func (mBatch *memBatch) write(doSync bool) {
|
||||
if mtx := mBatch.db.Mutex(); mtx != nil {
|
||||
mtx.Lock()
|
||||
|
@@ -248,6 +248,10 @@ func (pb prefixBatch) WriteSync() {
|
||||
pb.source.WriteSync()
|
||||
}
|
||||
|
||||
func (pb prefixBatch) Close() {
|
||||
pb.source.Close()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// prefixIterator
|
||||
|
||||
|
@@ -180,6 +180,7 @@ func (s *server) BatchWriteSync(c context.Context, b *protodb.Batch) (*protodb.N
|
||||
|
||||
func (s *server) batchWrite(c context.Context, b *protodb.Batch, sync bool) (*protodb.Nothing, error) {
|
||||
bat := s.db.NewBatch()
|
||||
defer bat.Close()
|
||||
for _, op := range b.Ops {
|
||||
switch op.Type {
|
||||
case protodb.Operation_SET:
|
||||
|
@@ -260,3 +260,7 @@ func (bat *batch) WriteSync() {
|
||||
panic(fmt.Sprintf("RemoteDB.BatchWriteSync: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (bat *batch) Close() {
|
||||
bat.ops = nil
|
||||
}
|
||||
|
@@ -57,10 +57,12 @@ type DB interface {
|
||||
//----------------------------------------
|
||||
// Batch
|
||||
|
||||
// Batch Close must be called when the program no longer needs the object.
|
||||
type Batch interface {
|
||||
SetDeleter
|
||||
Write()
|
||||
WriteSync()
|
||||
Close()
|
||||
}
|
||||
|
||||
type SetDeleter interface {
|
||||
|
@@ -19,9 +19,10 @@ func TestExample(t *testing.T) {
|
||||
defer s.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
subscription, err := s.Subscribe(ctx, "example-client", query.MustParse("abci.account.name='John'"))
|
||||
ch := make(chan interface{}, 1)
|
||||
err := s.Subscribe(ctx, "example-client", query.MustParse("abci.account.name='John'"), ch)
|
||||
require.NoError(t, err)
|
||||
err = s.PublishWithTags(ctx, "Tombstone", map[string]string{"abci.account.name": "John"})
|
||||
err = s.PublishWithTags(ctx, "Tombstone", pubsub.NewTagMap(map[string]string{"abci.account.name": "John"}))
|
||||
require.NoError(t, err)
|
||||
assertReceive(t, "Tombstone", subscription.Out())
|
||||
assertReceive(t, "Tombstone", ch)
|
||||
}
|
||||
|
@@ -10,25 +10,41 @@
|
||||
// match, this message will be pushed to all clients, subscribed to that query.
|
||||
// See query subpackage for our implementation.
|
||||
//
|
||||
// Example:
|
||||
// Due to the blocking send implementation, a single subscriber can freeze an
|
||||
// entire server by not reading messages before it unsubscribes. To avoid such
|
||||
// scenario, subscribers must either:
|
||||
//
|
||||
// q, err := query.New("account.name='John'")
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Second)
|
||||
// defer cancel()
|
||||
// subscription, err := pubsub.Subscribe(ctx, "johns-transactions", q)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// a) make sure they continue to read from the out channel until
|
||||
// Unsubscribe(All) is called
|
||||
//
|
||||
// for {
|
||||
// select {
|
||||
// case msg <- subscription.Out():
|
||||
// // handle msg.Data() and msg.Tags()
|
||||
// case <-subscription.Cancelled():
|
||||
// return subscription.Err()
|
||||
// s.Subscribe(ctx, sub, qry, out)
|
||||
// go func() {
|
||||
// for msg := range out {
|
||||
// // handle msg
|
||||
// // will exit automatically when out is closed by Unsubscribe(All)
|
||||
// }
|
||||
// }()
|
||||
// s.UnsubscribeAll(ctx, sub)
|
||||
//
|
||||
// b) drain the out channel before calling Unsubscribe(All)
|
||||
//
|
||||
// s.Subscribe(ctx, sub, qry, out)
|
||||
// defer func() {
|
||||
// // drain out to make sure we don't block
|
||||
// LOOP:
|
||||
// for {
|
||||
// select {
|
||||
// case <-out:
|
||||
// default:
|
||||
// break LOOP
|
||||
// }
|
||||
// }
|
||||
// s.UnsubscribeAll(ctx, sub)
|
||||
// }()
|
||||
// for msg := range out {
|
||||
// // handle msg
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
//
|
||||
@@ -61,23 +77,19 @@ var (
|
||||
ErrAlreadySubscribed = errors.New("already subscribed")
|
||||
)
|
||||
|
||||
// Query defines an interface for a query to be used for subscribing.
|
||||
type Query interface {
|
||||
Matches(tags map[string]string) bool
|
||||
String() string
|
||||
type cmd struct {
|
||||
op operation
|
||||
query Query
|
||||
ch chan<- interface{}
|
||||
clientID string
|
||||
msg interface{}
|
||||
tags TagMap
|
||||
}
|
||||
|
||||
type cmd struct {
|
||||
op operation
|
||||
|
||||
// subscribe, unsubscribe
|
||||
query Query
|
||||
subscription *Subscription
|
||||
clientID string
|
||||
|
||||
// publish
|
||||
msg interface{}
|
||||
tags map[string]string
|
||||
// Query defines an interface for a query to be used for subscribing.
|
||||
type Query interface {
|
||||
Matches(tags TagMap) bool
|
||||
String() string
|
||||
}
|
||||
|
||||
// Server allows clients to subscribe/unsubscribe for messages, publishing
|
||||
@@ -95,6 +107,37 @@ type Server struct {
|
||||
// Option sets a parameter for the server.
|
||||
type Option func(*Server)
|
||||
|
||||
// TagMap is used to associate tags to a message.
|
||||
// They can be queried by subscribers to choose messages they will received.
|
||||
type TagMap interface {
|
||||
// Get returns the value for a key, or nil if no value is present.
|
||||
// The ok result indicates whether value was found in the tags.
|
||||
Get(key string) (value string, ok bool)
|
||||
// Len returns the number of tags.
|
||||
Len() int
|
||||
}
|
||||
|
||||
type tagMap map[string]string
|
||||
|
||||
var _ TagMap = (*tagMap)(nil)
|
||||
|
||||
// NewTagMap constructs a new immutable tag set from a map.
|
||||
func NewTagMap(data map[string]string) TagMap {
|
||||
return tagMap(data)
|
||||
}
|
||||
|
||||
// Get returns the value for a key, or nil if no value is present.
|
||||
// The ok result indicates whether value was found in the tags.
|
||||
func (ts tagMap) Get(key string) (value string, ok bool) {
|
||||
value, ok = ts[key]
|
||||
return
|
||||
}
|
||||
|
||||
// Len returns the number of tags.
|
||||
func (ts tagMap) Len() int {
|
||||
return len(ts)
|
||||
}
|
||||
|
||||
// NewServer returns a new server. See the commentary on the Option functions
|
||||
// for a detailed description of how to configure buffering. If no options are
|
||||
// provided, the resulting server's queue is unbuffered.
|
||||
@@ -131,34 +174,11 @@ func (s *Server) BufferCapacity() int {
|
||||
return s.cmdsCap
|
||||
}
|
||||
|
||||
// Subscribe creates a subscription for the given client.
|
||||
//
|
||||
// An error will be returned to the caller if the context is canceled or if
|
||||
// subscription already exist for pair clientID and query.
|
||||
//
|
||||
// outCapacity can be used to set a capacity for Subscription#Out channel (1 by
|
||||
// default). Panics if outCapacity is less than or equal to zero. If you want
|
||||
// an unbuffered channel, use SubscribeUnbuffered.
|
||||
func (s *Server) Subscribe(ctx context.Context, clientID string, query Query, outCapacity ...int) (*Subscription, error) {
|
||||
outCap := 1
|
||||
if len(outCapacity) > 0 {
|
||||
if outCapacity[0] <= 0 {
|
||||
panic("Negative or zero capacity. Use SubscribeUnbuffered if you want an unbuffered channel")
|
||||
}
|
||||
outCap = outCapacity[0]
|
||||
}
|
||||
|
||||
return s.subscribe(ctx, clientID, query, outCap)
|
||||
}
|
||||
|
||||
// SubscribeUnbuffered does the same as Subscribe, except it returns a
|
||||
// subscription with unbuffered channel. Use with caution as it can freeze the
|
||||
// server.
|
||||
func (s *Server) SubscribeUnbuffered(ctx context.Context, clientID string, query Query) (*Subscription, error) {
|
||||
return s.subscribe(ctx, clientID, query, 0)
|
||||
}
|
||||
|
||||
func (s *Server) subscribe(ctx context.Context, clientID string, query Query, outCapacity int) (*Subscription, error) {
|
||||
// Subscribe creates a subscription for the given client. It accepts a channel
|
||||
// on which messages matching the given query can be received. An error will be
|
||||
// returned to the caller if the context is canceled or if subscription already
|
||||
// exist for pair clientID and query.
|
||||
func (s *Server) Subscribe(ctx context.Context, clientID string, query Query, out chan<- interface{}) error {
|
||||
s.mtx.RLock()
|
||||
clientSubscriptions, ok := s.subscriptions[clientID]
|
||||
if ok {
|
||||
@@ -166,23 +186,22 @@ func (s *Server) subscribe(ctx context.Context, clientID string, query Query, ou
|
||||
}
|
||||
s.mtx.RUnlock()
|
||||
if ok {
|
||||
return nil, ErrAlreadySubscribed
|
||||
return ErrAlreadySubscribed
|
||||
}
|
||||
|
||||
subscription := NewSubscription(outCapacity)
|
||||
select {
|
||||
case s.cmds <- cmd{op: sub, clientID: clientID, query: query, subscription: subscription}:
|
||||
case s.cmds <- cmd{op: sub, clientID: clientID, query: query, ch: out}:
|
||||
s.mtx.Lock()
|
||||
if _, ok = s.subscriptions[clientID]; !ok {
|
||||
s.subscriptions[clientID] = make(map[string]struct{})
|
||||
}
|
||||
s.subscriptions[clientID][query.String()] = struct{}{}
|
||||
s.mtx.Unlock()
|
||||
return subscription, nil
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
return ctx.Err()
|
||||
case <-s.Quit():
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -242,13 +261,13 @@ func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error {
|
||||
// Publish publishes the given message. An error will be returned to the caller
|
||||
// if the context is canceled.
|
||||
func (s *Server) Publish(ctx context.Context, msg interface{}) error {
|
||||
return s.PublishWithTags(ctx, msg, make(map[string]string))
|
||||
return s.PublishWithTags(ctx, msg, NewTagMap(make(map[string]string)))
|
||||
}
|
||||
|
||||
// PublishWithTags publishes the given message with the set of tags. The set is
|
||||
// matched with clients queries. If there is a match, the message is sent to
|
||||
// the client.
|
||||
func (s *Server) PublishWithTags(ctx context.Context, msg interface{}, tags map[string]string) error {
|
||||
func (s *Server) PublishWithTags(ctx context.Context, msg interface{}, tags TagMap) error {
|
||||
select {
|
||||
case s.cmds <- cmd{op: pub, msg: msg, tags: tags}:
|
||||
return nil
|
||||
@@ -266,8 +285,10 @@ func (s *Server) OnStop() {
|
||||
|
||||
// NOTE: not goroutine safe
|
||||
type state struct {
|
||||
// query string -> client -> subscription
|
||||
subscriptions map[string]map[string]*Subscription
|
||||
// query string -> client -> ch
|
||||
queryToChanMap map[string]map[string]chan<- interface{}
|
||||
// client -> query string -> struct{}
|
||||
clientToQueryMap map[string]map[string]struct{}
|
||||
// query string -> queryPlusRefCount
|
||||
queries map[string]*queryPlusRefCount
|
||||
}
|
||||
@@ -282,8 +303,9 @@ type queryPlusRefCount struct {
|
||||
// OnStart implements Service.OnStart by starting the server.
|
||||
func (s *Server) OnStart() error {
|
||||
go s.loop(state{
|
||||
subscriptions: make(map[string]map[string]*Subscription),
|
||||
queries: make(map[string]*queryPlusRefCount),
|
||||
queryToChanMap: make(map[string]map[string]chan<- interface{}),
|
||||
clientToQueryMap: make(map[string]map[string]struct{}),
|
||||
queries: make(map[string]*queryPlusRefCount),
|
||||
})
|
||||
return nil
|
||||
}
|
||||
@@ -299,57 +321,75 @@ loop:
|
||||
switch cmd.op {
|
||||
case unsub:
|
||||
if cmd.query != nil {
|
||||
state.remove(cmd.clientID, cmd.query.String(), ErrUnsubscribed)
|
||||
state.remove(cmd.clientID, cmd.query)
|
||||
} else {
|
||||
state.removeClient(cmd.clientID, ErrUnsubscribed)
|
||||
state.removeAll(cmd.clientID)
|
||||
}
|
||||
case shutdown:
|
||||
state.removeAll(nil)
|
||||
for clientID := range state.clientToQueryMap {
|
||||
state.removeAll(clientID)
|
||||
}
|
||||
break loop
|
||||
case sub:
|
||||
state.add(cmd.clientID, cmd.query, cmd.subscription)
|
||||
state.add(cmd.clientID, cmd.query, cmd.ch)
|
||||
case pub:
|
||||
state.send(cmd.msg, cmd.tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (state *state) add(clientID string, q Query, subscription *Subscription) {
|
||||
func (state *state) add(clientID string, q Query, ch chan<- interface{}) {
|
||||
qStr := q.String()
|
||||
|
||||
// initialize subscription for this client per query if needed
|
||||
if _, ok := state.subscriptions[qStr]; !ok {
|
||||
state.subscriptions[qStr] = make(map[string]*Subscription)
|
||||
// initialize clientToChannelMap per query if needed
|
||||
if _, ok := state.queryToChanMap[qStr]; !ok {
|
||||
state.queryToChanMap[qStr] = make(map[string]chan<- interface{})
|
||||
}
|
||||
// create subscription
|
||||
state.subscriptions[qStr][clientID] = subscription
|
||||
|
||||
// initialize query if needed
|
||||
// create subscription
|
||||
state.queryToChanMap[qStr][clientID] = ch
|
||||
|
||||
// initialize queries if needed
|
||||
if _, ok := state.queries[qStr]; !ok {
|
||||
state.queries[qStr] = &queryPlusRefCount{q: q, refCount: 0}
|
||||
}
|
||||
// increment reference counter
|
||||
state.queries[qStr].refCount++
|
||||
|
||||
// add client if needed
|
||||
if _, ok := state.clientToQueryMap[clientID]; !ok {
|
||||
state.clientToQueryMap[clientID] = make(map[string]struct{})
|
||||
}
|
||||
state.clientToQueryMap[clientID][qStr] = struct{}{}
|
||||
}
|
||||
|
||||
func (state *state) remove(clientID string, qStr string, reason error) {
|
||||
clientSubscriptions, ok := state.subscriptions[qStr]
|
||||
func (state *state) remove(clientID string, q Query) {
|
||||
qStr := q.String()
|
||||
|
||||
clientToChannelMap, ok := state.queryToChanMap[qStr]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
subscription, ok := clientSubscriptions[clientID]
|
||||
ch, ok := clientToChannelMap[clientID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
subscription.cancel(reason)
|
||||
close(ch)
|
||||
|
||||
// remove client from query map.
|
||||
// remove the query from client map.
|
||||
// if client is not subscribed to anything else, remove it.
|
||||
delete(state.clientToQueryMap[clientID], qStr)
|
||||
if len(state.clientToQueryMap[clientID]) == 0 {
|
||||
delete(state.clientToQueryMap, clientID)
|
||||
}
|
||||
|
||||
// remove the client from query map.
|
||||
// if query has no other clients subscribed, remove it.
|
||||
delete(state.subscriptions[qStr], clientID)
|
||||
if len(state.subscriptions[qStr]) == 0 {
|
||||
delete(state.subscriptions, qStr)
|
||||
delete(state.queryToChanMap[qStr], clientID)
|
||||
if len(state.queryToChanMap[qStr]) == 0 {
|
||||
delete(state.queryToChanMap, qStr)
|
||||
}
|
||||
|
||||
// decrease ref counter in queries
|
||||
@@ -360,38 +400,41 @@ func (state *state) remove(clientID string, qStr string, reason error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (state *state) removeClient(clientID string, reason error) {
|
||||
for qStr, clientSubscriptions := range state.subscriptions {
|
||||
if _, ok := clientSubscriptions[clientID]; ok {
|
||||
state.remove(clientID, qStr, reason)
|
||||
func (state *state) removeAll(clientID string) {
|
||||
queryMap, ok := state.clientToQueryMap[clientID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
for qStr := range queryMap {
|
||||
ch := state.queryToChanMap[qStr][clientID]
|
||||
close(ch)
|
||||
|
||||
// remove the client from query map.
|
||||
// if query has no other clients subscribed, remove it.
|
||||
delete(state.queryToChanMap[qStr], clientID)
|
||||
if len(state.queryToChanMap[qStr]) == 0 {
|
||||
delete(state.queryToChanMap, qStr)
|
||||
}
|
||||
|
||||
// decrease ref counter in queries
|
||||
state.queries[qStr].refCount--
|
||||
// remove the query if nobody else is using it
|
||||
if state.queries[qStr].refCount == 0 {
|
||||
delete(state.queries, qStr)
|
||||
}
|
||||
}
|
||||
|
||||
// remove the client.
|
||||
delete(state.clientToQueryMap, clientID)
|
||||
}
|
||||
|
||||
func (state *state) removeAll(reason error) {
|
||||
for qStr, clientSubscriptions := range state.subscriptions {
|
||||
for clientID := range clientSubscriptions {
|
||||
state.remove(clientID, qStr, reason)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (state *state) send(msg interface{}, tags map[string]string) {
|
||||
for qStr, clientSubscriptions := range state.subscriptions {
|
||||
func (state *state) send(msg interface{}, tags TagMap) {
|
||||
for qStr, clientToChannelMap := range state.queryToChanMap {
|
||||
q := state.queries[qStr].q
|
||||
if q.Matches(tags) {
|
||||
for clientID, subscription := range clientSubscriptions {
|
||||
if cap(subscription.out) == 0 {
|
||||
// block on unbuffered channel
|
||||
subscription.out <- Message{msg, tags}
|
||||
} else {
|
||||
// don't block on buffered channels
|
||||
select {
|
||||
case subscription.out <- Message{msg, tags}:
|
||||
default:
|
||||
state.remove(clientID, qStr, ErrOutOfCapacity)
|
||||
}
|
||||
}
|
||||
for _, ch := range clientToChannelMap {
|
||||
ch <- msg
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -27,97 +27,16 @@ func TestSubscribe(t *testing.T) {
|
||||
defer s.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
subscription, err := s.Subscribe(ctx, clientID, query.Empty{})
|
||||
ch := make(chan interface{}, 1)
|
||||
err := s.Subscribe(ctx, clientID, query.Empty{}, ch)
|
||||
require.NoError(t, err)
|
||||
err = s.Publish(ctx, "Ka-Zar")
|
||||
require.NoError(t, err)
|
||||
assertReceive(t, "Ka-Zar", subscription.Out())
|
||||
assertReceive(t, "Ka-Zar", ch)
|
||||
|
||||
published := make(chan struct{})
|
||||
go func() {
|
||||
defer close(published)
|
||||
|
||||
err := s.Publish(ctx, "Quicksilver")
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = s.Publish(ctx, "Asylum")
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-published:
|
||||
assertReceive(t, "Quicksilver", subscription.Out())
|
||||
assertCancelled(t, subscription, pubsub.ErrOutOfCapacity)
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Fatal("Expected Publish(Asylum) not to block")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubscribeWithCapacity(t *testing.T) {
|
||||
s := pubsub.NewServer()
|
||||
s.SetLogger(log.TestingLogger())
|
||||
s.Start()
|
||||
defer s.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
assert.Panics(t, func() {
|
||||
s.Subscribe(ctx, clientID, query.Empty{}, -1)
|
||||
})
|
||||
assert.Panics(t, func() {
|
||||
s.Subscribe(ctx, clientID, query.Empty{}, 0)
|
||||
})
|
||||
subscription, err := s.Subscribe(ctx, clientID, query.Empty{}, 1)
|
||||
err = s.Publish(ctx, "Quicksilver")
|
||||
require.NoError(t, err)
|
||||
err = s.Publish(ctx, "Aggamon")
|
||||
require.NoError(t, err)
|
||||
assertReceive(t, "Aggamon", subscription.Out())
|
||||
}
|
||||
|
||||
func TestSubscribeUnbuffered(t *testing.T) {
|
||||
s := pubsub.NewServer()
|
||||
s.SetLogger(log.TestingLogger())
|
||||
s.Start()
|
||||
defer s.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
subscription, err := s.SubscribeUnbuffered(ctx, clientID, query.Empty{})
|
||||
require.NoError(t, err)
|
||||
|
||||
published := make(chan struct{})
|
||||
go func() {
|
||||
defer close(published)
|
||||
|
||||
err := s.Publish(ctx, "Ultron")
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = s.Publish(ctx, "Darkhawk")
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-published:
|
||||
t.Fatal("Expected Publish(Darkhawk) to block")
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
assertReceive(t, "Ultron", subscription.Out())
|
||||
assertReceive(t, "Darkhawk", subscription.Out())
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlowClientIsRemovedWithErrOutOfCapacity(t *testing.T) {
|
||||
s := pubsub.NewServer()
|
||||
s.SetLogger(log.TestingLogger())
|
||||
s.Start()
|
||||
defer s.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
subscription, err := s.Subscribe(ctx, clientID, query.Empty{})
|
||||
require.NoError(t, err)
|
||||
err = s.Publish(ctx, "Fat Cobra")
|
||||
require.NoError(t, err)
|
||||
err = s.Publish(ctx, "Viper")
|
||||
require.NoError(t, err)
|
||||
|
||||
assertCancelled(t, subscription, pubsub.ErrOutOfCapacity)
|
||||
assertReceive(t, "Quicksilver", ch)
|
||||
}
|
||||
|
||||
func TestDifferentClients(t *testing.T) {
|
||||
@@ -127,24 +46,27 @@ func TestDifferentClients(t *testing.T) {
|
||||
defer s.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
subscription1, err := s.Subscribe(ctx, "client-1", query.MustParse("tm.events.type='NewBlock'"))
|
||||
ch1 := make(chan interface{}, 1)
|
||||
err := s.Subscribe(ctx, "client-1", query.MustParse("tm.events.type='NewBlock'"), ch1)
|
||||
require.NoError(t, err)
|
||||
err = s.PublishWithTags(ctx, "Iceman", map[string]string{"tm.events.type": "NewBlock"})
|
||||
err = s.PublishWithTags(ctx, "Iceman", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock"}))
|
||||
require.NoError(t, err)
|
||||
assertReceive(t, "Iceman", subscription1.Out())
|
||||
assertReceive(t, "Iceman", ch1)
|
||||
|
||||
subscription2, err := s.Subscribe(ctx, "client-2", query.MustParse("tm.events.type='NewBlock' AND abci.account.name='Igor'"))
|
||||
ch2 := make(chan interface{}, 1)
|
||||
err = s.Subscribe(ctx, "client-2", query.MustParse("tm.events.type='NewBlock' AND abci.account.name='Igor'"), ch2)
|
||||
require.NoError(t, err)
|
||||
err = s.PublishWithTags(ctx, "Ultimo", map[string]string{"tm.events.type": "NewBlock", "abci.account.name": "Igor"})
|
||||
err = s.PublishWithTags(ctx, "Ultimo", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock", "abci.account.name": "Igor"}))
|
||||
require.NoError(t, err)
|
||||
assertReceive(t, "Ultimo", subscription1.Out())
|
||||
assertReceive(t, "Ultimo", subscription2.Out())
|
||||
assertReceive(t, "Ultimo", ch1)
|
||||
assertReceive(t, "Ultimo", ch2)
|
||||
|
||||
subscription3, err := s.Subscribe(ctx, "client-3", query.MustParse("tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10"))
|
||||
ch3 := make(chan interface{}, 1)
|
||||
err = s.Subscribe(ctx, "client-3", query.MustParse("tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10"), ch3)
|
||||
require.NoError(t, err)
|
||||
err = s.PublishWithTags(ctx, "Valeria Richards", map[string]string{"tm.events.type": "NewRoundStep"})
|
||||
err = s.PublishWithTags(ctx, "Valeria Richards", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewRoundStep"}))
|
||||
require.NoError(t, err)
|
||||
assert.Zero(t, len(subscription3.Out()))
|
||||
assert.Zero(t, len(ch3))
|
||||
}
|
||||
|
||||
func TestClientSubscribesTwice(t *testing.T) {
|
||||
@@ -156,19 +78,20 @@ func TestClientSubscribesTwice(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
q := query.MustParse("tm.events.type='NewBlock'")
|
||||
|
||||
subscription1, err := s.Subscribe(ctx, clientID, q)
|
||||
ch1 := make(chan interface{}, 1)
|
||||
err := s.Subscribe(ctx, clientID, q, ch1)
|
||||
require.NoError(t, err)
|
||||
err = s.PublishWithTags(ctx, "Goblin Queen", map[string]string{"tm.events.type": "NewBlock"})
|
||||
err = s.PublishWithTags(ctx, "Goblin Queen", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock"}))
|
||||
require.NoError(t, err)
|
||||
assertReceive(t, "Goblin Queen", subscription1.Out())
|
||||
assertReceive(t, "Goblin Queen", ch1)
|
||||
|
||||
subscription2, err := s.Subscribe(ctx, clientID, q)
|
||||
ch2 := make(chan interface{}, 1)
|
||||
err = s.Subscribe(ctx, clientID, q, ch2)
|
||||
require.Error(t, err)
|
||||
require.Nil(t, subscription2)
|
||||
|
||||
err = s.PublishWithTags(ctx, "Spider-Man", map[string]string{"tm.events.type": "NewBlock"})
|
||||
err = s.PublishWithTags(ctx, "Spider-Man", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock"}))
|
||||
require.NoError(t, err)
|
||||
assertReceive(t, "Spider-Man", subscription1.Out())
|
||||
assertReceive(t, "Spider-Man", ch1)
|
||||
}
|
||||
|
||||
func TestUnsubscribe(t *testing.T) {
|
||||
@@ -178,16 +101,18 @@ func TestUnsubscribe(t *testing.T) {
|
||||
defer s.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
subscription, err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"))
|
||||
ch := make(chan interface{})
|
||||
err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"), ch)
|
||||
require.NoError(t, err)
|
||||
err = s.Unsubscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.Publish(ctx, "Nick Fury")
|
||||
require.NoError(t, err)
|
||||
assert.Zero(t, len(subscription.Out()), "Should not receive anything after Unsubscribe")
|
||||
assert.Zero(t, len(ch), "Should not receive anything after Unsubscribe")
|
||||
|
||||
assertCancelled(t, subscription, pubsub.ErrUnsubscribed)
|
||||
_, ok := <-ch
|
||||
assert.False(t, ok)
|
||||
}
|
||||
|
||||
func TestClientUnsubscribesTwice(t *testing.T) {
|
||||
@@ -197,7 +122,8 @@ func TestClientUnsubscribesTwice(t *testing.T) {
|
||||
defer s.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
_, err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"))
|
||||
ch := make(chan interface{})
|
||||
err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"), ch)
|
||||
require.NoError(t, err)
|
||||
err = s.Unsubscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"))
|
||||
require.NoError(t, err)
|
||||
@@ -215,16 +141,18 @@ func TestResubscribe(t *testing.T) {
|
||||
defer s.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
subscription, err := s.Subscribe(ctx, clientID, query.Empty{})
|
||||
ch := make(chan interface{})
|
||||
err := s.Subscribe(ctx, clientID, query.Empty{}, ch)
|
||||
require.NoError(t, err)
|
||||
err = s.Unsubscribe(ctx, clientID, query.Empty{})
|
||||
require.NoError(t, err)
|
||||
subscription, err = s.Subscribe(ctx, clientID, query.Empty{})
|
||||
ch = make(chan interface{})
|
||||
err = s.Subscribe(ctx, clientID, query.Empty{}, ch)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.Publish(ctx, "Cable")
|
||||
require.NoError(t, err)
|
||||
assertReceive(t, "Cable", subscription.Out())
|
||||
assertReceive(t, "Cable", ch)
|
||||
}
|
||||
|
||||
func TestUnsubscribeAll(t *testing.T) {
|
||||
@@ -234,9 +162,10 @@ func TestUnsubscribeAll(t *testing.T) {
|
||||
defer s.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
subscription1, err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"))
|
||||
ch1, ch2 := make(chan interface{}, 1), make(chan interface{}, 1)
|
||||
err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"), ch1)
|
||||
require.NoError(t, err)
|
||||
subscription2, err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlockHeader'"))
|
||||
err = s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlockHeader'"), ch2)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.UnsubscribeAll(ctx, clientID)
|
||||
@@ -244,11 +173,13 @@ func TestUnsubscribeAll(t *testing.T) {
|
||||
|
||||
err = s.Publish(ctx, "Nick Fury")
|
||||
require.NoError(t, err)
|
||||
assert.Zero(t, len(subscription1.Out()), "Should not receive anything after UnsubscribeAll")
|
||||
assert.Zero(t, len(subscription2.Out()), "Should not receive anything after UnsubscribeAll")
|
||||
assert.Zero(t, len(ch1), "Should not receive anything after UnsubscribeAll")
|
||||
assert.Zero(t, len(ch2), "Should not receive anything after UnsubscribeAll")
|
||||
|
||||
assertCancelled(t, subscription1, pubsub.ErrUnsubscribed)
|
||||
assertCancelled(t, subscription2, pubsub.ErrUnsubscribed)
|
||||
_, ok := <-ch1
|
||||
assert.False(t, ok)
|
||||
_, ok = <-ch2
|
||||
assert.False(t, ok)
|
||||
}
|
||||
|
||||
func TestBufferCapacity(t *testing.T) {
|
||||
@@ -286,26 +217,18 @@ func benchmarkNClients(n int, b *testing.B) {
|
||||
|
||||
ctx := context.Background()
|
||||
for i := 0; i < n; i++ {
|
||||
subscription, err := s.Subscribe(ctx, clientID, query.MustParse(fmt.Sprintf("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = %d", i)))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
ch := make(chan interface{})
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-subscription.Out():
|
||||
continue
|
||||
case <-subscription.Cancelled():
|
||||
return
|
||||
}
|
||||
for range ch {
|
||||
}
|
||||
}()
|
||||
s.Subscribe(ctx, clientID, query.MustParse(fmt.Sprintf("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = %d", i)), ch)
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.PublishWithTags(ctx, "Gamora", map[string]string{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": string(i)})
|
||||
s.PublishWithTags(ctx, "Gamora", pubsub.NewTagMap(map[string]string{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": string(i)}))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -317,26 +240,18 @@ func benchmarkNClientsOneQuery(n int, b *testing.B) {
|
||||
ctx := context.Background()
|
||||
q := query.MustParse("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = 1")
|
||||
for i := 0; i < n; i++ {
|
||||
subscription, err := s.Subscribe(ctx, clientID, q)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
ch := make(chan interface{})
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-subscription.Out():
|
||||
continue
|
||||
case <-subscription.Cancelled():
|
||||
return
|
||||
}
|
||||
for range ch {
|
||||
}
|
||||
}()
|
||||
s.Subscribe(ctx, clientID, q, ch)
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.PublishWithTags(ctx, "Gamora", map[string]string{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": "1"})
|
||||
s.PublishWithTags(ctx, "Gamora", pubsub.NewTagMap(map[string]string{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": "1"}))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -344,18 +259,14 @@ func benchmarkNClientsOneQuery(n int, b *testing.B) {
|
||||
/// HELPERS
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func assertReceive(t *testing.T, expected interface{}, ch <-chan pubsub.Message, msgAndArgs ...interface{}) {
|
||||
func assertReceive(t *testing.T, expected interface{}, ch <-chan interface{}, msgAndArgs ...interface{}) {
|
||||
select {
|
||||
case actual := <-ch:
|
||||
assert.Equal(t, expected, actual.Data(), msgAndArgs...)
|
||||
if actual != nil {
|
||||
assert.Equal(t, expected, actual, msgAndArgs...)
|
||||
}
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Errorf("Expected to receive %v from the channel, got nothing after 1s", expected)
|
||||
debug.PrintStack()
|
||||
}
|
||||
}
|
||||
|
||||
func assertCancelled(t *testing.T, subscription *pubsub.Subscription, err error) {
|
||||
_, ok := <-subscription.Cancelled()
|
||||
assert.False(t, ok)
|
||||
assert.Equal(t, err, subscription.Err())
|
||||
}
|
||||
|
@@ -1,11 +1,13 @@
|
||||
package query
|
||||
|
||||
import "github.com/tendermint/tendermint/libs/pubsub"
|
||||
|
||||
// Empty query matches any set of tags.
|
||||
type Empty struct {
|
||||
}
|
||||
|
||||
// Matches always returns true.
|
||||
func (Empty) Matches(tags map[string]string) bool {
|
||||
func (Empty) Matches(tags pubsub.TagMap) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
|
@@ -5,13 +5,14 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/pubsub"
|
||||
"github.com/tendermint/tendermint/libs/pubsub/query"
|
||||
)
|
||||
|
||||
func TestEmptyQueryMatchesAnything(t *testing.T) {
|
||||
q := query.Empty{}
|
||||
assert.True(t, q.Matches(map[string]string{}))
|
||||
assert.True(t, q.Matches(map[string]string{"Asher": "Roth"}))
|
||||
assert.True(t, q.Matches(map[string]string{"Route": "66"}))
|
||||
assert.True(t, q.Matches(map[string]string{"Route": "66", "Billy": "Blue"}))
|
||||
assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{})))
|
||||
assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{"Asher": "Roth"})))
|
||||
assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{"Route": "66"})))
|
||||
assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{"Route": "66", "Billy": "Blue"})))
|
||||
}
|
||||
|
@@ -14,6 +14,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/pubsub"
|
||||
)
|
||||
|
||||
// Query holds the query string and the query parser.
|
||||
@@ -152,8 +154,8 @@ func (q *Query) Conditions() []Condition {
|
||||
//
|
||||
// For example, query "name=John" matches tags = {"name": "John"}. More
|
||||
// examples could be found in parser_test.go and query_test.go.
|
||||
func (q *Query) Matches(tags map[string]string) bool {
|
||||
if len(tags) == 0 {
|
||||
func (q *Query) Matches(tags pubsub.TagMap) bool {
|
||||
if tags.Len() == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -238,9 +240,9 @@ func (q *Query) Matches(tags map[string]string) bool {
|
||||
// value from it to the operand using the operator.
|
||||
//
|
||||
// "tx.gas", "=", "7", { "tx.gas": 7, "tx.ID": "4AE393495334" }
|
||||
func match(tag string, op Operator, operand reflect.Value, tags map[string]string) bool {
|
||||
func match(tag string, op Operator, operand reflect.Value, tags pubsub.TagMap) bool {
|
||||
// look up the tag from the query in tags
|
||||
value, ok := tags[tag]
|
||||
value, ok := tags.Get(tag)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/pubsub"
|
||||
"github.com/tendermint/tendermint/libs/pubsub/query"
|
||||
)
|
||||
|
||||
@@ -52,9 +53,9 @@ func TestMatches(t *testing.T) {
|
||||
}
|
||||
|
||||
if tc.matches {
|
||||
assert.True(t, q.Matches(tc.tags), "Query '%s' should match %v", tc.s, tc.tags)
|
||||
assert.True(t, q.Matches(pubsub.NewTagMap(tc.tags)), "Query '%s' should match %v", tc.s, tc.tags)
|
||||
} else {
|
||||
assert.False(t, q.Matches(tc.tags), "Query '%s' should not match %v", tc.s, tc.tags)
|
||||
assert.False(t, q.Matches(pubsub.NewTagMap(tc.tags)), "Query '%s' should not match %v", tc.s, tc.tags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,89 +0,0 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrUnsubscribed is returned by Err when a client unsubscribes.
|
||||
ErrUnsubscribed = errors.New("client unsubscribed")
|
||||
|
||||
// ErrOutOfCapacity is returned by Err when a client is not pulling messages
|
||||
// fast enough. Note the client's subscription will be terminated.
|
||||
ErrOutOfCapacity = errors.New("client is not pulling messages fast enough")
|
||||
)
|
||||
|
||||
// A Subscription represents a client subscription for a particular query and
|
||||
// consists of three things:
|
||||
// 1) channel onto which messages and tags are published
|
||||
// 2) channel which is closed if a client is too slow or choose to unsubscribe
|
||||
// 3) err indicating the reason for (2)
|
||||
type Subscription struct {
|
||||
out chan Message
|
||||
|
||||
cancelled chan struct{}
|
||||
mtx sync.RWMutex
|
||||
err error
|
||||
}
|
||||
|
||||
// NewSubscription returns a new subscription with the given outCapacity.
|
||||
func NewSubscription(outCapacity int) *Subscription {
|
||||
return &Subscription{
|
||||
out: make(chan Message, outCapacity),
|
||||
cancelled: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Out returns a channel onto which messages and tags are published.
|
||||
// Unsubscribe/UnsubscribeAll does not close the channel to avoid clients from
|
||||
// receiving a nil message.
|
||||
func (s *Subscription) Out() <-chan Message {
|
||||
return s.out
|
||||
}
|
||||
|
||||
// Cancelled returns a channel that's closed when the subscription is
|
||||
// terminated and supposed to be used in a select statement.
|
||||
func (s *Subscription) Cancelled() <-chan struct{} {
|
||||
return s.cancelled
|
||||
}
|
||||
|
||||
// Err returns nil if the channel returned by Cancelled is not yet closed.
|
||||
// If the channel is closed, Err returns a non-nil error explaining why:
|
||||
// - ErrUnsubscribed if the subscriber choose to unsubscribe,
|
||||
// - ErrOutOfCapacity if the subscriber is not pulling messages fast enough
|
||||
// and the channel returned by Out became full,
|
||||
// After Err returns a non-nil error, successive calls to Err return the same
|
||||
// error.
|
||||
func (s *Subscription) Err() error {
|
||||
s.mtx.RLock()
|
||||
defer s.mtx.RUnlock()
|
||||
return s.err
|
||||
}
|
||||
|
||||
func (s *Subscription) cancel(err error) {
|
||||
s.mtx.Lock()
|
||||
s.err = err
|
||||
s.mtx.Unlock()
|
||||
close(s.cancelled)
|
||||
}
|
||||
|
||||
// Message glues data and tags together.
|
||||
type Message struct {
|
||||
data interface{}
|
||||
tags map[string]string
|
||||
}
|
||||
|
||||
func NewMessage(data interface{}, tags map[string]string) Message {
|
||||
return Message{data, tags}
|
||||
}
|
||||
|
||||
// Data returns an original data published.
|
||||
func (msg Message) Data() interface{} {
|
||||
return msg.data
|
||||
}
|
||||
|
||||
// Tags returns tags, which matched the client's query.
|
||||
func (msg Message) Tags() map[string]string {
|
||||
return msg.tags
|
||||
}
|
@@ -54,6 +54,7 @@ func (dbp *DBProvider) SaveFullCommit(fc FullCommit) error {
|
||||
|
||||
dbp.logger.Info("DBProvider.SaveFullCommit()...", "fc", fc)
|
||||
batch := dbp.db.NewBatch()
|
||||
defer batch.Close()
|
||||
|
||||
// Save the fc.validators.
|
||||
// We might be overwriting what we already have, but
|
||||
|
@@ -63,26 +63,13 @@ var (
|
||||
// ErrTxInCache is returned to the client if we saw tx earlier
|
||||
ErrTxInCache = errors.New("Tx already exists in cache")
|
||||
|
||||
// ErrMempoolIsFull means Tendermint & an application can't handle that much load
|
||||
ErrMempoolIsFull = errors.New("Mempool is full")
|
||||
|
||||
// ErrTxTooLarge means the tx is too big to be sent in a message to other peers
|
||||
ErrTxTooLarge = fmt.Errorf("Tx too large. Max size is %d", maxTxSize)
|
||||
)
|
||||
|
||||
// ErrMempoolIsFull means Tendermint & an application can't handle that much load
|
||||
type ErrMempoolIsFull struct {
|
||||
numTxs int
|
||||
maxTxs int
|
||||
|
||||
txsBytes int64
|
||||
maxTxsBytes int64
|
||||
}
|
||||
|
||||
func (e ErrMempoolIsFull) Error() string {
|
||||
return fmt.Sprintf(
|
||||
"Mempool is full: number of txs %d (max: %d), total txs bytes %d (max: %d)",
|
||||
e.numTxs, e.maxTxs,
|
||||
e.txsBytes, e.maxTxsBytes)
|
||||
}
|
||||
|
||||
// ErrPreCheck is returned when tx is too big
|
||||
type ErrPreCheck struct {
|
||||
Reason error
|
||||
@@ -160,9 +147,6 @@ type Mempool struct {
|
||||
preCheck PreCheckFunc
|
||||
postCheck PostCheckFunc
|
||||
|
||||
// Atomic integers
|
||||
txsBytes int64 // see TxsBytes
|
||||
|
||||
// Keep a cache of already-seen txs.
|
||||
// This reduces the pressure on the proxyApp.
|
||||
cache txCache
|
||||
@@ -281,13 +265,8 @@ func (mem *Mempool) Size() int {
|
||||
return mem.txs.Len()
|
||||
}
|
||||
|
||||
// TxsBytes returns the total size of all txs in the mempool.
|
||||
func (mem *Mempool) TxsBytes() int64 {
|
||||
return atomic.LoadInt64(&mem.txsBytes)
|
||||
}
|
||||
|
||||
// FlushAppConn flushes the mempool connection to ensure async resCb calls are
|
||||
// done e.g. from CheckTx.
|
||||
// Flushes the mempool connection to ensure async resCb calls are done e.g.
|
||||
// from CheckTx.
|
||||
func (mem *Mempool) FlushAppConn() error {
|
||||
return mem.proxyAppConn.FlushSync()
|
||||
}
|
||||
@@ -303,8 +282,6 @@ func (mem *Mempool) Flush() {
|
||||
mem.txs.Remove(e)
|
||||
e.DetachPrev()
|
||||
}
|
||||
|
||||
_ = atomic.SwapInt64(&mem.txsBytes, 0)
|
||||
}
|
||||
|
||||
// TxsFront returns the first transaction in the ordered list for peer
|
||||
@@ -331,15 +308,8 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) {
|
||||
// use defer to unlock mutex because application (*local client*) might panic
|
||||
defer mem.proxyMtx.Unlock()
|
||||
|
||||
var (
|
||||
memSize = mem.Size()
|
||||
txsBytes = mem.TxsBytes()
|
||||
)
|
||||
if memSize >= mem.config.Size ||
|
||||
int64(len(tx))+txsBytes > mem.config.MaxTxsBytes {
|
||||
return ErrMempoolIsFull{
|
||||
memSize, mem.config.Size,
|
||||
txsBytes, mem.config.MaxTxsBytes}
|
||||
if mem.Size() >= mem.config.Size {
|
||||
return ErrMempoolIsFull
|
||||
}
|
||||
|
||||
// The size of the corresponding amino-encoded TxMessage
|
||||
@@ -413,7 +383,6 @@ func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) {
|
||||
tx: tx,
|
||||
}
|
||||
mem.txs.PushBack(memTx)
|
||||
atomic.AddInt64(&mem.txsBytes, int64(len(tx)))
|
||||
mem.logger.Info("Added good transaction",
|
||||
"tx", TxID(tx),
|
||||
"res", r,
|
||||
@@ -455,7 +424,6 @@ func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) {
|
||||
// Tx became invalidated due to newly committed block.
|
||||
mem.logger.Info("Tx is no longer valid", "tx", TxID(tx), "res", r, "err", postCheckErr)
|
||||
mem.txs.Remove(mem.recheckCursor)
|
||||
atomic.AddInt64(&mem.txsBytes, int64(-len(tx)))
|
||||
mem.recheckCursor.DetachPrev()
|
||||
|
||||
// remove from cache (it might be good later)
|
||||
@@ -629,7 +597,6 @@ func (mem *Mempool) removeTxs(txs types.Txs) []types.Tx {
|
||||
if _, ok := txsMap[string(memTx.tx)]; ok {
|
||||
// remove from clist
|
||||
mem.txs.Remove(e)
|
||||
atomic.AddInt64(&mem.txsBytes, int64(-len(memTx.tx)))
|
||||
e.DetachPrev()
|
||||
|
||||
// NOTE: we don't remove committed txs from the cache.
|
||||
|
@@ -30,10 +30,8 @@ import (
|
||||
type cleanupFunc func()
|
||||
|
||||
func newMempoolWithApp(cc proxy.ClientCreator) (*Mempool, cleanupFunc) {
|
||||
return newMempoolWithAppAndConfig(cc, cfg.ResetTestRoot("mempool_test"))
|
||||
}
|
||||
config := cfg.ResetTestRoot("mempool_test")
|
||||
|
||||
func newMempoolWithAppAndConfig(cc proxy.ClientCreator, config *cfg.Config) (*Mempool, cleanupFunc) {
|
||||
appConnMem, _ := cc.NewABCIClient()
|
||||
appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
|
||||
err := appConnMem.Start()
|
||||
@@ -464,72 +462,6 @@ func TestMempoolMaxMsgSize(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestMempoolTxsBytes(t *testing.T) {
|
||||
app := kvstore.NewKVStoreApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
config := cfg.ResetTestRoot("mempool_test")
|
||||
config.Mempool.MaxTxsBytes = 10
|
||||
mempool, cleanup := newMempoolWithAppAndConfig(cc, config)
|
||||
defer cleanup()
|
||||
|
||||
// 1. zero by default
|
||||
assert.EqualValues(t, 0, mempool.TxsBytes())
|
||||
|
||||
// 2. len(tx) after CheckTx
|
||||
err := mempool.CheckTx([]byte{0x01}, nil)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 1, mempool.TxsBytes())
|
||||
|
||||
// 3. zero again after tx is removed by Update
|
||||
mempool.Update(1, []types.Tx{[]byte{0x01}}, nil, nil)
|
||||
assert.EqualValues(t, 0, mempool.TxsBytes())
|
||||
|
||||
// 4. zero after Flush
|
||||
err = mempool.CheckTx([]byte{0x02, 0x03}, nil)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 2, mempool.TxsBytes())
|
||||
|
||||
mempool.Flush()
|
||||
assert.EqualValues(t, 0, mempool.TxsBytes())
|
||||
|
||||
// 5. ErrMempoolIsFull is returned when/if MaxTxsBytes limit is reached.
|
||||
err = mempool.CheckTx([]byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04}, nil)
|
||||
require.NoError(t, err)
|
||||
err = mempool.CheckTx([]byte{0x05}, nil)
|
||||
if assert.Error(t, err) {
|
||||
assert.IsType(t, ErrMempoolIsFull{}, err)
|
||||
}
|
||||
|
||||
// 6. zero after tx is rechecked and removed due to not being valid anymore
|
||||
app2 := counter.NewCounterApplication(true)
|
||||
cc = proxy.NewLocalClientCreator(app2)
|
||||
mempool, cleanup = newMempoolWithApp(cc)
|
||||
defer cleanup()
|
||||
|
||||
txBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(txBytes, uint64(0))
|
||||
|
||||
err = mempool.CheckTx(txBytes, nil)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, 8, mempool.TxsBytes())
|
||||
|
||||
appConnCon, _ := cc.NewABCIClient()
|
||||
appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
|
||||
err = appConnCon.Start()
|
||||
require.Nil(t, err)
|
||||
defer appConnCon.Stop()
|
||||
res, err := appConnCon.DeliverTxSync(txBytes)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 0, res.Code)
|
||||
res2, err := appConnCon.CommitSync()
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, res2.Data)
|
||||
|
||||
// Pretend like we committed nothing so txBytes gets rechecked and removed.
|
||||
mempool.Update(1, []types.Tx{}, nil, nil)
|
||||
assert.EqualValues(t, 0, mempool.TxsBytes())
|
||||
}
|
||||
|
||||
func checksumIt(data []byte) string {
|
||||
h := sha256.New()
|
||||
h.Write(data)
|
||||
|
@@ -914,7 +914,7 @@ func createAndStartPrivValidatorSocketClient(
|
||||
)
|
||||
}
|
||||
|
||||
pvsc := privval.NewSignerValidatorEndpoint(logger.With("module", "privval"), listener)
|
||||
pvsc := privval.NewSocketVal(logger.With("module", "privval"), listener)
|
||||
if err := pvsc.Start(); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to start private validator")
|
||||
}
|
||||
|
@@ -42,12 +42,11 @@ func TestNodeStartStop(t *testing.T) {
|
||||
t.Logf("Started node %v", n.sw.NodeInfo())
|
||||
|
||||
// wait for the node to produce a block
|
||||
blocksSub, err := n.EventBus().Subscribe(context.Background(), "node_test", types.EventQueryNewBlock)
|
||||
blockCh := make(chan interface{})
|
||||
err = n.EventBus().Subscribe(context.Background(), "node_test", types.EventQueryNewBlock, blockCh)
|
||||
require.NoError(t, err)
|
||||
select {
|
||||
case <-blocksSub.Out():
|
||||
case <-blocksSub.Cancelled():
|
||||
t.Fatal("blocksSub was cancelled")
|
||||
case <-blockCh:
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatal("timed out waiting for the node to produce a block")
|
||||
}
|
||||
@@ -132,13 +131,13 @@ func TestNodeSetPrivValTCP(t *testing.T) {
|
||||
config.BaseConfig.PrivValidatorListenAddr = addr
|
||||
|
||||
dialer := privval.DialTCPFn(addr, 100*time.Millisecond, ed25519.GenPrivKey())
|
||||
pvsc := privval.NewSignerServiceEndpoint(
|
||||
pvsc := privval.NewRemoteSigner(
|
||||
log.TestingLogger(),
|
||||
config.ChainID(),
|
||||
types.NewMockPV(),
|
||||
dialer,
|
||||
)
|
||||
privval.SignerServiceEndpointTimeoutReadWrite(100 * time.Millisecond)(pvsc)
|
||||
privval.RemoteSignerConnDeadline(100 * time.Millisecond)(pvsc)
|
||||
|
||||
go func() {
|
||||
err := pvsc.Start()
|
||||
@@ -150,7 +149,7 @@ func TestNodeSetPrivValTCP(t *testing.T) {
|
||||
|
||||
n, err := DefaultNewNode(config, log.TestingLogger())
|
||||
require.NoError(t, err)
|
||||
assert.IsType(t, &privval.SignerValidatorEndpoint{}, n.PrivValidator())
|
||||
assert.IsType(t, &privval.SocketVal{}, n.PrivValidator())
|
||||
}
|
||||
|
||||
// address without a protocol must result in error
|
||||
@@ -174,13 +173,13 @@ func TestNodeSetPrivValIPC(t *testing.T) {
|
||||
config.BaseConfig.PrivValidatorListenAddr = "unix://" + tmpfile
|
||||
|
||||
dialer := privval.DialUnixFn(tmpfile)
|
||||
pvsc := privval.NewSignerServiceEndpoint(
|
||||
pvsc := privval.NewRemoteSigner(
|
||||
log.TestingLogger(),
|
||||
config.ChainID(),
|
||||
types.NewMockPV(),
|
||||
dialer,
|
||||
)
|
||||
privval.SignerServiceEndpointTimeoutReadWrite(100 * time.Millisecond)(pvsc)
|
||||
privval.RemoteSignerConnDeadline(100 * time.Millisecond)(pvsc)
|
||||
|
||||
go func() {
|
||||
err := pvsc.Start()
|
||||
@@ -190,7 +189,7 @@ func TestNodeSetPrivValIPC(t *testing.T) {
|
||||
|
||||
n, err := DefaultNewNode(config, log.TestingLogger())
|
||||
require.NoError(t, err)
|
||||
assert.IsType(t, &privval.SignerValidatorEndpoint{}, n.PrivValidator())
|
||||
assert.IsType(t, &privval.SocketVal{}, n.PrivValidator())
|
||||
|
||||
}
|
||||
|
||||
|
@@ -29,10 +29,7 @@ const aeadSizeOverhead = 16 // overhead of poly 1305 authentication tag
|
||||
const aeadKeySize = chacha20poly1305.KeySize
|
||||
const aeadNonceSize = chacha20poly1305.NonceSize
|
||||
|
||||
var (
|
||||
ErrSmallOrderRemotePubKey = errors.New("detected low order point from remote peer")
|
||||
ErrSharedSecretIsZero = errors.New("shared secret is all zeroes")
|
||||
)
|
||||
var ErrSmallOrderRemotePubKey = errors.New("detected low order point from remote peer")
|
||||
|
||||
// SecretConnection implements net.Conn.
|
||||
// It is an implementation of the STS protocol.
|
||||
@@ -93,10 +90,7 @@ func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (*
|
||||
locIsLeast := bytes.Equal(locEphPub[:], loEphPub[:])
|
||||
|
||||
// Compute common diffie hellman secret using X25519.
|
||||
dhSecret, err := computeDHSecret(remEphPub, locEphPriv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dhSecret := computeDHSecret(remEphPub, locEphPriv)
|
||||
|
||||
// generate the secret used for receiving, sending, challenge via hkdf-sha2 on dhSecret
|
||||
recvSecret, sendSecret, challenge := deriveSecretAndChallenge(dhSecret, locIsLeast)
|
||||
@@ -236,12 +230,9 @@ func (sc *SecretConnection) SetWriteDeadline(t time.Time) error {
|
||||
|
||||
func genEphKeys() (ephPub, ephPriv *[32]byte) {
|
||||
var err error
|
||||
// TODO: Probably not a problem but ask Tony: different from the rust implementation (uses x25519-dalek),
|
||||
// we do not "clamp" the private key scalar:
|
||||
// see: https://github.com/dalek-cryptography/x25519-dalek/blob/34676d336049df2bba763cc076a75e47ae1f170f/src/x25519.rs#L56-L74
|
||||
ephPub, ephPriv, err = box.GenerateKey(crand.Reader)
|
||||
if err != nil {
|
||||
panic("Could not generate ephemeral key-pair")
|
||||
panic("Could not generate ephemeral keypairs")
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -358,20 +349,9 @@ func deriveSecretAndChallenge(dhSecret *[32]byte, locIsLeast bool) (recvSecret,
|
||||
return
|
||||
}
|
||||
|
||||
// computeDHSecret computes a Diffie-Hellman shared secret key
|
||||
// from our own local private key and the other's public key.
|
||||
//
|
||||
// It returns an error if the computed shared secret is all zeroes.
|
||||
func computeDHSecret(remPubKey, locPrivKey *[32]byte) (shrKey *[32]byte, err error) {
|
||||
func computeDHSecret(remPubKey, locPrivKey *[32]byte) (shrKey *[32]byte) {
|
||||
shrKey = new([32]byte)
|
||||
curve25519.ScalarMult(shrKey, locPrivKey, remPubKey)
|
||||
|
||||
// reject if the returned shared secret is all zeroes
|
||||
// related to: https://github.com/tendermint/tendermint/issues/3010
|
||||
zero := new([32]byte)
|
||||
if subtle.ConstantTimeCompare(shrKey[:], zero[:]) == 1 {
|
||||
return nil, ErrSharedSecretIsZero
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@@ -100,12 +100,8 @@ func TestSecretConnectionHandshake(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test that shareEphPubKey rejects lower order public keys based on an
|
||||
// (incomplete) blacklist.
|
||||
func TestShareLowOrderPubkey(t *testing.T) {
|
||||
var fooConn, barConn = makeKVStoreConnPair()
|
||||
defer fooConn.Close()
|
||||
defer barConn.Close()
|
||||
locEphPub, _ := genEphKeys()
|
||||
|
||||
// all blacklisted low order points:
|
||||
@@ -130,19 +126,6 @@ func TestShareLowOrderPubkey(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test that additionally that the Diffie-Hellman shared secret is non-zero.
|
||||
// The shared secret would be zero for lower order pub-keys (but tested against the blacklist only).
|
||||
func TestComputeDHFailsOnLowOrder(t *testing.T) {
|
||||
_, locPrivKey := genEphKeys()
|
||||
for _, remLowOrderPubKey := range blacklist {
|
||||
shared, err := computeDHSecret(&remLowOrderPubKey, locPrivKey)
|
||||
assert.Error(t, err)
|
||||
|
||||
assert.Equal(t, err, ErrSharedSecretIsZero)
|
||||
assert.Empty(t, shared)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConcurrentWrite(t *testing.T) {
|
||||
fooSecConn, barSecConn := makeSecretConnPair(t)
|
||||
fooWriteText := cmn.RandStr(dataMaxSize)
|
||||
|
240
privval/client.go
Normal file
240
privval/client.go
Normal file
@@ -0,0 +1,240 @@
|
||||
package privval
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultConnHeartBeatSeconds = 2
|
||||
defaultDialRetries = 10
|
||||
)
|
||||
|
||||
// Socket errors.
|
||||
var (
|
||||
ErrUnexpectedResponse = errors.New("received unexpected response")
|
||||
)
|
||||
|
||||
var (
|
||||
connHeartbeat = time.Second * defaultConnHeartBeatSeconds
|
||||
)
|
||||
|
||||
// SocketValOption sets an optional parameter on the SocketVal.
|
||||
type SocketValOption func(*SocketVal)
|
||||
|
||||
// SocketValHeartbeat sets the period on which to check the liveness of the
|
||||
// connected Signer connections.
|
||||
func SocketValHeartbeat(period time.Duration) SocketValOption {
|
||||
return func(sc *SocketVal) { sc.connHeartbeat = period }
|
||||
}
|
||||
|
||||
// SocketVal implements PrivValidator.
|
||||
// It listens for an external process to dial in and uses
|
||||
// the socket to request signatures.
|
||||
type SocketVal struct {
|
||||
cmn.BaseService
|
||||
|
||||
listener net.Listener
|
||||
|
||||
// ping
|
||||
cancelPing chan struct{}
|
||||
pingTicker *time.Ticker
|
||||
connHeartbeat time.Duration
|
||||
|
||||
// signer is mutable since it can be
|
||||
// reset if the connection fails.
|
||||
// failures are detected by a background
|
||||
// ping routine.
|
||||
// All messages are request/response, so we hold the mutex
|
||||
// so only one request/response pair can happen at a time.
|
||||
// Methods on the underlying net.Conn itself
|
||||
// are already gorountine safe.
|
||||
mtx sync.Mutex
|
||||
signer *RemoteSignerClient
|
||||
}
|
||||
|
||||
// Check that SocketVal implements PrivValidator.
|
||||
var _ types.PrivValidator = (*SocketVal)(nil)
|
||||
|
||||
// NewSocketVal returns an instance of SocketVal.
|
||||
func NewSocketVal(
|
||||
logger log.Logger,
|
||||
listener net.Listener,
|
||||
) *SocketVal {
|
||||
sc := &SocketVal{
|
||||
listener: listener,
|
||||
connHeartbeat: connHeartbeat,
|
||||
}
|
||||
|
||||
sc.BaseService = *cmn.NewBaseService(logger, "SocketVal", sc)
|
||||
|
||||
return sc
|
||||
}
|
||||
|
||||
//--------------------------------------------------------
|
||||
// Implement PrivValidator
|
||||
|
||||
// GetPubKey implements PrivValidator.
|
||||
func (sc *SocketVal) GetPubKey() crypto.PubKey {
|
||||
sc.mtx.Lock()
|
||||
defer sc.mtx.Unlock()
|
||||
return sc.signer.GetPubKey()
|
||||
}
|
||||
|
||||
// SignVote implements PrivValidator.
|
||||
func (sc *SocketVal) SignVote(chainID string, vote *types.Vote) error {
|
||||
sc.mtx.Lock()
|
||||
defer sc.mtx.Unlock()
|
||||
return sc.signer.SignVote(chainID, vote)
|
||||
}
|
||||
|
||||
// SignProposal implements PrivValidator.
|
||||
func (sc *SocketVal) SignProposal(chainID string, proposal *types.Proposal) error {
|
||||
sc.mtx.Lock()
|
||||
defer sc.mtx.Unlock()
|
||||
return sc.signer.SignProposal(chainID, proposal)
|
||||
}
|
||||
|
||||
//--------------------------------------------------------
|
||||
// More thread safe methods proxied to the signer
|
||||
|
||||
// Ping is used to check connection health.
|
||||
func (sc *SocketVal) Ping() error {
|
||||
sc.mtx.Lock()
|
||||
defer sc.mtx.Unlock()
|
||||
return sc.signer.Ping()
|
||||
}
|
||||
|
||||
// Close closes the underlying net.Conn.
|
||||
func (sc *SocketVal) Close() {
|
||||
sc.mtx.Lock()
|
||||
defer sc.mtx.Unlock()
|
||||
if sc.signer != nil {
|
||||
if err := sc.signer.Close(); err != nil {
|
||||
sc.Logger.Error("OnStop", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if sc.listener != nil {
|
||||
if err := sc.listener.Close(); err != nil {
|
||||
sc.Logger.Error("OnStop", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//--------------------------------------------------------
|
||||
// Service start and stop
|
||||
|
||||
// OnStart implements cmn.Service.
|
||||
func (sc *SocketVal) OnStart() error {
|
||||
if closed, err := sc.reset(); err != nil {
|
||||
sc.Logger.Error("OnStart", "err", err)
|
||||
return err
|
||||
} else if closed {
|
||||
return fmt.Errorf("listener is closed")
|
||||
}
|
||||
|
||||
// Start a routine to keep the connection alive
|
||||
sc.cancelPing = make(chan struct{}, 1)
|
||||
sc.pingTicker = time.NewTicker(sc.connHeartbeat)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-sc.pingTicker.C:
|
||||
err := sc.Ping()
|
||||
if err != nil {
|
||||
sc.Logger.Error("Ping", "err", err)
|
||||
if err == ErrUnexpectedResponse {
|
||||
return
|
||||
}
|
||||
|
||||
closed, err := sc.reset()
|
||||
if err != nil {
|
||||
sc.Logger.Error("Reconnecting to remote signer failed", "err", err)
|
||||
continue
|
||||
}
|
||||
if closed {
|
||||
sc.Logger.Info("listener is closing")
|
||||
return
|
||||
}
|
||||
|
||||
sc.Logger.Info("Re-created connection to remote signer", "impl", sc)
|
||||
}
|
||||
case <-sc.cancelPing:
|
||||
sc.pingTicker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop implements cmn.Service.
|
||||
func (sc *SocketVal) OnStop() {
|
||||
if sc.cancelPing != nil {
|
||||
close(sc.cancelPing)
|
||||
}
|
||||
sc.Close()
|
||||
}
|
||||
|
||||
//--------------------------------------------------------
|
||||
// Connection and signer management
|
||||
|
||||
// waits to accept and sets a new connection.
|
||||
// connection is closed in OnStop.
|
||||
// returns true if the listener is closed
|
||||
// (ie. it returns a nil conn).
|
||||
func (sc *SocketVal) reset() (closed bool, err error) {
|
||||
sc.mtx.Lock()
|
||||
defer sc.mtx.Unlock()
|
||||
|
||||
// first check if the conn already exists and close it.
|
||||
if sc.signer != nil {
|
||||
if err := sc.signer.Close(); err != nil {
|
||||
sc.Logger.Error("error closing socket val connection during reset", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// wait for a new conn
|
||||
conn, err := sc.acceptConnection()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// listener is closed
|
||||
if conn == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
sc.signer, err = NewRemoteSignerClient(conn)
|
||||
if err != nil {
|
||||
// failed to fetch the pubkey. close out the connection.
|
||||
if err := conn.Close(); err != nil {
|
||||
sc.Logger.Error("error closing connection", "err", err)
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Attempt to accept a connection.
|
||||
// Times out after the listener's acceptDeadline
|
||||
func (sc *SocketVal) acceptConnection() (net.Conn, error) {
|
||||
conn, err := sc.listener.Accept()
|
||||
if err != nil {
|
||||
if !sc.IsRunning() {
|
||||
return nil, nil // Ignore error from listener closing.
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
461
privval/client_test.go
Normal file
461
privval/client_test.go
Normal file
@@ -0,0 +1,461 @@
|
||||
package privval
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var (
|
||||
testAcceptDeadline = defaultAcceptDeadlineSeconds * time.Second
|
||||
|
||||
testConnDeadline = 100 * time.Millisecond
|
||||
testConnDeadline2o3 = 66 * time.Millisecond // 2/3 of the other one
|
||||
|
||||
testHeartbeatTimeout = 10 * time.Millisecond
|
||||
testHeartbeatTimeout3o2 = 6 * time.Millisecond // 3/2 of the other one
|
||||
)
|
||||
|
||||
type socketTestCase struct {
|
||||
addr string
|
||||
dialer Dialer
|
||||
}
|
||||
|
||||
func socketTestCases(t *testing.T) []socketTestCase {
|
||||
tcpAddr := fmt.Sprintf("tcp://%s", testFreeTCPAddr(t))
|
||||
unixFilePath, err := testUnixAddr()
|
||||
require.NoError(t, err)
|
||||
unixAddr := fmt.Sprintf("unix://%s", unixFilePath)
|
||||
return []socketTestCase{
|
||||
{
|
||||
addr: tcpAddr,
|
||||
dialer: DialTCPFn(tcpAddr, testConnDeadline, ed25519.GenPrivKey()),
|
||||
},
|
||||
{
|
||||
addr: unixAddr,
|
||||
dialer: DialUnixFn(unixFilePath),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestSocketPVAddress(t *testing.T) {
|
||||
for _, tc := range socketTestCases(t) {
|
||||
// Execute the test within a closure to ensure the deferred statements
|
||||
// are called between each for loop iteration, for isolated test cases.
|
||||
func() {
|
||||
var (
|
||||
chainID = cmn.RandStr(12)
|
||||
sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV(), tc.addr, tc.dialer)
|
||||
)
|
||||
defer sc.Stop()
|
||||
defer rs.Stop()
|
||||
|
||||
serverAddr := rs.privVal.GetPubKey().Address()
|
||||
clientAddr := sc.GetPubKey().Address()
|
||||
|
||||
assert.Equal(t, serverAddr, clientAddr)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestSocketPVPubKey(t *testing.T) {
|
||||
for _, tc := range socketTestCases(t) {
|
||||
func() {
|
||||
var (
|
||||
chainID = cmn.RandStr(12)
|
||||
sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV(), tc.addr, tc.dialer)
|
||||
)
|
||||
defer sc.Stop()
|
||||
defer rs.Stop()
|
||||
|
||||
clientKey := sc.GetPubKey()
|
||||
|
||||
privvalPubKey := rs.privVal.GetPubKey()
|
||||
|
||||
assert.Equal(t, privvalPubKey, clientKey)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestSocketPVProposal(t *testing.T) {
|
||||
for _, tc := range socketTestCases(t) {
|
||||
func() {
|
||||
var (
|
||||
chainID = cmn.RandStr(12)
|
||||
sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV(), tc.addr, tc.dialer)
|
||||
|
||||
ts = time.Now()
|
||||
privProposal = &types.Proposal{Timestamp: ts}
|
||||
clientProposal = &types.Proposal{Timestamp: ts}
|
||||
)
|
||||
defer sc.Stop()
|
||||
defer rs.Stop()
|
||||
|
||||
require.NoError(t, rs.privVal.SignProposal(chainID, privProposal))
|
||||
require.NoError(t, sc.SignProposal(chainID, clientProposal))
|
||||
assert.Equal(t, privProposal.Signature, clientProposal.Signature)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestSocketPVVote(t *testing.T) {
|
||||
for _, tc := range socketTestCases(t) {
|
||||
func() {
|
||||
var (
|
||||
chainID = cmn.RandStr(12)
|
||||
sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV(), tc.addr, tc.dialer)
|
||||
|
||||
ts = time.Now()
|
||||
vType = types.PrecommitType
|
||||
want = &types.Vote{Timestamp: ts, Type: vType}
|
||||
have = &types.Vote{Timestamp: ts, Type: vType}
|
||||
)
|
||||
defer sc.Stop()
|
||||
defer rs.Stop()
|
||||
|
||||
require.NoError(t, rs.privVal.SignVote(chainID, want))
|
||||
require.NoError(t, sc.SignVote(chainID, have))
|
||||
assert.Equal(t, want.Signature, have.Signature)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestSocketPVVoteResetDeadline(t *testing.T) {
|
||||
for _, tc := range socketTestCases(t) {
|
||||
func() {
|
||||
var (
|
||||
chainID = cmn.RandStr(12)
|
||||
sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV(), tc.addr, tc.dialer)
|
||||
|
||||
ts = time.Now()
|
||||
vType = types.PrecommitType
|
||||
want = &types.Vote{Timestamp: ts, Type: vType}
|
||||
have = &types.Vote{Timestamp: ts, Type: vType}
|
||||
)
|
||||
defer sc.Stop()
|
||||
defer rs.Stop()
|
||||
|
||||
time.Sleep(testConnDeadline2o3)
|
||||
|
||||
require.NoError(t, rs.privVal.SignVote(chainID, want))
|
||||
require.NoError(t, sc.SignVote(chainID, have))
|
||||
assert.Equal(t, want.Signature, have.Signature)
|
||||
|
||||
// This would exceed the deadline if it was not extended by the previous message
|
||||
time.Sleep(testConnDeadline2o3)
|
||||
|
||||
require.NoError(t, rs.privVal.SignVote(chainID, want))
|
||||
require.NoError(t, sc.SignVote(chainID, have))
|
||||
assert.Equal(t, want.Signature, have.Signature)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestSocketPVVoteKeepalive(t *testing.T) {
|
||||
for _, tc := range socketTestCases(t) {
|
||||
func() {
|
||||
var (
|
||||
chainID = cmn.RandStr(12)
|
||||
sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV(), tc.addr, tc.dialer)
|
||||
|
||||
ts = time.Now()
|
||||
vType = types.PrecommitType
|
||||
want = &types.Vote{Timestamp: ts, Type: vType}
|
||||
have = &types.Vote{Timestamp: ts, Type: vType}
|
||||
)
|
||||
defer sc.Stop()
|
||||
defer rs.Stop()
|
||||
|
||||
time.Sleep(testConnDeadline * 2)
|
||||
|
||||
require.NoError(t, rs.privVal.SignVote(chainID, want))
|
||||
require.NoError(t, sc.SignVote(chainID, have))
|
||||
assert.Equal(t, want.Signature, have.Signature)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestSocketPVDeadline(t *testing.T) {
|
||||
for _, tc := range socketTestCases(t) {
|
||||
func() {
|
||||
var (
|
||||
listenc = make(chan struct{})
|
||||
thisConnTimeout = 100 * time.Millisecond
|
||||
sc = newSocketVal(log.TestingLogger(), tc.addr, thisConnTimeout)
|
||||
)
|
||||
|
||||
go func(sc *SocketVal) {
|
||||
defer close(listenc)
|
||||
|
||||
// Note: the TCP connection times out at the accept() phase,
|
||||
// whereas the Unix domain sockets connection times out while
|
||||
// attempting to fetch the remote signer's public key.
|
||||
assert.True(t, IsConnTimeout(sc.Start()))
|
||||
|
||||
assert.False(t, sc.IsRunning())
|
||||
}(sc)
|
||||
|
||||
for {
|
||||
_, err := cmn.Connect(tc.addr)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
<-listenc
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoteSignVoteErrors(t *testing.T) {
|
||||
for _, tc := range socketTestCases(t) {
|
||||
func() {
|
||||
var (
|
||||
chainID = cmn.RandStr(12)
|
||||
sc, rs = testSetupSocketPair(t, chainID, types.NewErroringMockPV(), tc.addr, tc.dialer)
|
||||
|
||||
ts = time.Now()
|
||||
vType = types.PrecommitType
|
||||
vote = &types.Vote{Timestamp: ts, Type: vType}
|
||||
)
|
||||
defer sc.Stop()
|
||||
defer rs.Stop()
|
||||
|
||||
err := sc.SignVote("", vote)
|
||||
require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error())
|
||||
|
||||
err = rs.privVal.SignVote(chainID, vote)
|
||||
require.Error(t, err)
|
||||
err = sc.SignVote(chainID, vote)
|
||||
require.Error(t, err)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoteSignProposalErrors(t *testing.T) {
|
||||
for _, tc := range socketTestCases(t) {
|
||||
func() {
|
||||
var (
|
||||
chainID = cmn.RandStr(12)
|
||||
sc, rs = testSetupSocketPair(t, chainID, types.NewErroringMockPV(), tc.addr, tc.dialer)
|
||||
|
||||
ts = time.Now()
|
||||
proposal = &types.Proposal{Timestamp: ts}
|
||||
)
|
||||
defer sc.Stop()
|
||||
defer rs.Stop()
|
||||
|
||||
err := sc.SignProposal("", proposal)
|
||||
require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error())
|
||||
|
||||
err = rs.privVal.SignProposal(chainID, proposal)
|
||||
require.Error(t, err)
|
||||
|
||||
err = sc.SignProposal(chainID, proposal)
|
||||
require.Error(t, err)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrUnexpectedResponse(t *testing.T) {
|
||||
for _, tc := range socketTestCases(t) {
|
||||
func() {
|
||||
var (
|
||||
logger = log.TestingLogger()
|
||||
chainID = cmn.RandStr(12)
|
||||
readyc = make(chan struct{})
|
||||
errc = make(chan error, 1)
|
||||
|
||||
rs = NewRemoteSigner(
|
||||
logger,
|
||||
chainID,
|
||||
types.NewMockPV(),
|
||||
tc.dialer,
|
||||
)
|
||||
sc = newSocketVal(logger, tc.addr, testConnDeadline)
|
||||
)
|
||||
|
||||
testStartSocketPV(t, readyc, sc)
|
||||
defer sc.Stop()
|
||||
RemoteSignerConnDeadline(time.Millisecond)(rs)
|
||||
RemoteSignerConnRetries(100)(rs)
|
||||
// we do not want to Start() the remote signer here and instead use the connection to
|
||||
// reply with intentionally wrong replies below:
|
||||
rsConn, err := rs.connect()
|
||||
defer rsConn.Close()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, rsConn)
|
||||
// send over public key to get the remote signer running:
|
||||
go testReadWriteResponse(t, &PubKeyResponse{}, rsConn)
|
||||
<-readyc
|
||||
|
||||
// Proposal:
|
||||
go func(errc chan error) {
|
||||
errc <- sc.SignProposal(chainID, &types.Proposal{})
|
||||
}(errc)
|
||||
// read request and write wrong response:
|
||||
go testReadWriteResponse(t, &SignedVoteResponse{}, rsConn)
|
||||
err = <-errc
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err, ErrUnexpectedResponse)
|
||||
|
||||
// Vote:
|
||||
go func(errc chan error) {
|
||||
errc <- sc.SignVote(chainID, &types.Vote{})
|
||||
}(errc)
|
||||
// read request and write wrong response:
|
||||
go testReadWriteResponse(t, &SignedProposalResponse{}, rsConn)
|
||||
err = <-errc
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err, ErrUnexpectedResponse)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetryConnToRemoteSigner(t *testing.T) {
|
||||
for _, tc := range socketTestCases(t) {
|
||||
func() {
|
||||
var (
|
||||
logger = log.TestingLogger()
|
||||
chainID = cmn.RandStr(12)
|
||||
readyc = make(chan struct{})
|
||||
|
||||
rs = NewRemoteSigner(
|
||||
logger,
|
||||
chainID,
|
||||
types.NewMockPV(),
|
||||
tc.dialer,
|
||||
)
|
||||
thisConnTimeout = testConnDeadline
|
||||
sc = newSocketVal(logger, tc.addr, thisConnTimeout)
|
||||
)
|
||||
// Ping every:
|
||||
SocketValHeartbeat(testHeartbeatTimeout)(sc)
|
||||
|
||||
RemoteSignerConnDeadline(testConnDeadline)(rs)
|
||||
RemoteSignerConnRetries(10)(rs)
|
||||
|
||||
testStartSocketPV(t, readyc, sc)
|
||||
defer sc.Stop()
|
||||
require.NoError(t, rs.Start())
|
||||
assert.True(t, rs.IsRunning())
|
||||
|
||||
<-readyc
|
||||
time.Sleep(testHeartbeatTimeout * 2)
|
||||
|
||||
rs.Stop()
|
||||
rs2 := NewRemoteSigner(
|
||||
logger,
|
||||
chainID,
|
||||
types.NewMockPV(),
|
||||
tc.dialer,
|
||||
)
|
||||
// let some pings pass
|
||||
time.Sleep(testHeartbeatTimeout3o2)
|
||||
require.NoError(t, rs2.Start())
|
||||
assert.True(t, rs2.IsRunning())
|
||||
defer rs2.Stop()
|
||||
|
||||
// give the client some time to re-establish the conn to the remote signer
|
||||
// should see sth like this in the logs:
|
||||
//
|
||||
// E[10016-01-10|17:12:46.128] Ping err="remote signer timed out"
|
||||
// I[10016-01-10|17:16:42.447] Re-created connection to remote signer impl=SocketVal
|
||||
time.Sleep(testConnDeadline * 2)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func newSocketVal(logger log.Logger, addr string, connDeadline time.Duration) *SocketVal {
|
||||
proto, address := cmn.ProtocolAndAddress(addr)
|
||||
ln, err := net.Listen(proto, address)
|
||||
logger.Info("Listening at", "proto", proto, "address", address)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var svln net.Listener
|
||||
if proto == "unix" {
|
||||
unixLn := NewUnixListener(ln)
|
||||
UnixListenerAcceptDeadline(testAcceptDeadline)(unixLn)
|
||||
UnixListenerConnDeadline(connDeadline)(unixLn)
|
||||
svln = unixLn
|
||||
} else {
|
||||
tcpLn := NewTCPListener(ln, ed25519.GenPrivKey())
|
||||
TCPListenerAcceptDeadline(testAcceptDeadline)(tcpLn)
|
||||
TCPListenerConnDeadline(connDeadline)(tcpLn)
|
||||
svln = tcpLn
|
||||
}
|
||||
return NewSocketVal(logger, svln)
|
||||
}
|
||||
|
||||
func testSetupSocketPair(
|
||||
t *testing.T,
|
||||
chainID string,
|
||||
privValidator types.PrivValidator,
|
||||
addr string,
|
||||
dialer Dialer,
|
||||
) (*SocketVal, *RemoteSigner) {
|
||||
var (
|
||||
logger = log.TestingLogger()
|
||||
privVal = privValidator
|
||||
readyc = make(chan struct{})
|
||||
rs = NewRemoteSigner(
|
||||
logger,
|
||||
chainID,
|
||||
privVal,
|
||||
dialer,
|
||||
)
|
||||
|
||||
thisConnTimeout = testConnDeadline
|
||||
sc = newSocketVal(logger, addr, thisConnTimeout)
|
||||
)
|
||||
|
||||
SocketValHeartbeat(testHeartbeatTimeout)(sc)
|
||||
RemoteSignerConnDeadline(testConnDeadline)(rs)
|
||||
RemoteSignerConnRetries(1e6)(rs)
|
||||
|
||||
testStartSocketPV(t, readyc, sc)
|
||||
|
||||
require.NoError(t, rs.Start())
|
||||
assert.True(t, rs.IsRunning())
|
||||
|
||||
<-readyc
|
||||
|
||||
return sc, rs
|
||||
}
|
||||
|
||||
func testReadWriteResponse(t *testing.T, resp RemoteSignerMsg, rsConn net.Conn) {
|
||||
_, err := readMsg(rsConn)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = writeMsg(rsConn, resp)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func testStartSocketPV(t *testing.T, readyc chan struct{}, sc *SocketVal) {
|
||||
go func(sc *SocketVal) {
|
||||
require.NoError(t, sc.Start())
|
||||
assert.True(t, sc.IsRunning())
|
||||
|
||||
readyc <- struct{}{}
|
||||
}(sc)
|
||||
}
|
||||
|
||||
// testFreeTCPAddr claims a free port so we don't block on listener being ready.
|
||||
func testFreeTCPAddr(t *testing.T) string {
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
defer ln.Close()
|
||||
|
||||
return fmt.Sprintf("127.0.0.1:%d", ln.Addr().(*net.TCPAddr).Port)
|
||||
}
|
@@ -6,16 +6,16 @@ FilePV
|
||||
|
||||
FilePV is the simplest implementation and developer default. It uses one file for the private key and another to store state.
|
||||
|
||||
SignerValidatorEndpoint
|
||||
SocketVal
|
||||
|
||||
SignerValidatorEndpoint establishes a connection to an external process, like a Key Management Server (KMS), using a socket.
|
||||
SignerValidatorEndpoint listens for the external KMS process to dial in.
|
||||
SignerValidatorEndpoint takes a listener, which determines the type of connection
|
||||
SocketVal establishes a connection to an external process, like a Key Management Server (KMS), using a socket.
|
||||
SocketVal listens for the external KMS process to dial in.
|
||||
SocketVal takes a listener, which determines the type of connection
|
||||
(ie. encrypted over tcp, or unencrypted over unix).
|
||||
|
||||
SignerServiceEndpoint
|
||||
RemoteSigner
|
||||
|
||||
SignerServiceEndpoint is a simple wrapper around a net.Conn. It's used by both IPCVal and TCPVal.
|
||||
RemoteSigner is a simple wrapper around a net.Conn. It's used by both IPCVal and TCPVal.
|
||||
|
||||
*/
|
||||
package privval
|
||||
|
@@ -1,22 +0,0 @@
|
||||
package privval
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Socket errors.
|
||||
var (
|
||||
ErrUnexpectedResponse = fmt.Errorf("received unexpected response")
|
||||
ErrConnTimeout = fmt.Errorf("remote signer timed out")
|
||||
)
|
||||
|
||||
// RemoteSignerError allows (remote) validators to include meaningful error descriptions in their reply.
|
||||
type RemoteSignerError struct {
|
||||
// TODO(ismail): create an enum of known errors
|
||||
Code int
|
||||
Description string
|
||||
}
|
||||
|
||||
func (e *RemoteSignerError) Error() string {
|
||||
return fmt.Sprintf("signerServiceEndpoint returned error #%d: %s", e.Code, e.Description)
|
||||
}
|
@@ -49,7 +49,7 @@ type FilePVKey struct {
|
||||
func (pvKey FilePVKey) Save() {
|
||||
outFile := pvKey.filePath
|
||||
if outFile == "" {
|
||||
panic("cannot save PrivValidator key: filePath not set")
|
||||
panic("Cannot save PrivValidator key: filePath not set")
|
||||
}
|
||||
|
||||
jsonBytes, err := cdc.MarshalJSONIndent(pvKey, "", " ")
|
||||
@@ -86,17 +86,17 @@ type FilePVLastSignState struct {
|
||||
func (lss *FilePVLastSignState) CheckHRS(height int64, round int, step int8) (bool, error) {
|
||||
|
||||
if lss.Height > height {
|
||||
return false, fmt.Errorf("height regression. Got %v, last height %v", height, lss.Height)
|
||||
return false, fmt.Errorf("Height regression. Got %v, last height %v", height, lss.Height)
|
||||
}
|
||||
|
||||
if lss.Height == height {
|
||||
if lss.Round > round {
|
||||
return false, fmt.Errorf("round regression at height %v. Got %v, last round %v", height, round, lss.Round)
|
||||
return false, fmt.Errorf("Round regression at height %v. Got %v, last round %v", height, round, lss.Round)
|
||||
}
|
||||
|
||||
if lss.Round == round {
|
||||
if lss.Step > step {
|
||||
return false, fmt.Errorf("step regression at height %v round %v. Got %v, last step %v", height, round, step, lss.Step)
|
||||
return false, fmt.Errorf("Step regression at height %v round %v. Got %v, last step %v", height, round, step, lss.Step)
|
||||
} else if lss.Step == step {
|
||||
if lss.SignBytes != nil {
|
||||
if lss.Signature == nil {
|
||||
@@ -104,7 +104,7 @@ func (lss *FilePVLastSignState) CheckHRS(height int64, round int, step int8) (bo
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
return false, errors.New("no SignBytes found")
|
||||
return false, errors.New("No SignBytes found")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -115,7 +115,7 @@ func (lss *FilePVLastSignState) CheckHRS(height int64, round int, step int8) (bo
|
||||
func (lss *FilePVLastSignState) Save() {
|
||||
outFile := lss.filePath
|
||||
if outFile == "" {
|
||||
panic("cannot save FilePVLastSignState: filePath not set")
|
||||
panic("Cannot save FilePVLastSignState: filePath not set")
|
||||
}
|
||||
jsonBytes, err := cdc.MarshalJSONIndent(lss, "", " ")
|
||||
if err != nil {
|
||||
@@ -237,7 +237,7 @@ func (pv *FilePV) GetPubKey() crypto.PubKey {
|
||||
// chainID. Implements PrivValidator.
|
||||
func (pv *FilePV) SignVote(chainID string, vote *types.Vote) error {
|
||||
if err := pv.signVote(chainID, vote); err != nil {
|
||||
return fmt.Errorf("error signing vote: %v", err)
|
||||
return fmt.Errorf("Error signing vote: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -246,7 +246,7 @@ func (pv *FilePV) SignVote(chainID string, vote *types.Vote) error {
|
||||
// the chainID. Implements PrivValidator.
|
||||
func (pv *FilePV) SignProposal(chainID string, proposal *types.Proposal) error {
|
||||
if err := pv.signProposal(chainID, proposal); err != nil {
|
||||
return fmt.Errorf("error signing proposal: %v", err)
|
||||
return fmt.Errorf("Error signing proposal: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -303,7 +303,7 @@ func (pv *FilePV) signVote(chainID string, vote *types.Vote) error {
|
||||
vote.Timestamp = timestamp
|
||||
vote.Signature = lss.Signature
|
||||
} else {
|
||||
err = fmt.Errorf("conflicting data")
|
||||
err = fmt.Errorf("Conflicting data")
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -345,7 +345,7 @@ func (pv *FilePV) signProposal(chainID string, proposal *types.Proposal) error {
|
||||
proposal.Timestamp = timestamp
|
||||
proposal.Signature = lss.Signature
|
||||
} else {
|
||||
err = fmt.Errorf("conflicting data")
|
||||
err = fmt.Errorf("Conflicting data")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@@ -1,61 +0,0 @@
|
||||
package privval
|
||||
|
||||
import (
|
||||
amino "github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// RemoteSignerMsg is sent between SignerServiceEndpoint and the SignerServiceEndpoint client.
|
||||
type RemoteSignerMsg interface{}
|
||||
|
||||
func RegisterRemoteSignerMsg(cdc *amino.Codec) {
|
||||
cdc.RegisterInterface((*RemoteSignerMsg)(nil), nil)
|
||||
cdc.RegisterConcrete(&PubKeyRequest{}, "tendermint/remotesigner/PubKeyRequest", nil)
|
||||
cdc.RegisterConcrete(&PubKeyResponse{}, "tendermint/remotesigner/PubKeyResponse", nil)
|
||||
cdc.RegisterConcrete(&SignVoteRequest{}, "tendermint/remotesigner/SignVoteRequest", nil)
|
||||
cdc.RegisterConcrete(&SignedVoteResponse{}, "tendermint/remotesigner/SignedVoteResponse", nil)
|
||||
cdc.RegisterConcrete(&SignProposalRequest{}, "tendermint/remotesigner/SignProposalRequest", nil)
|
||||
cdc.RegisterConcrete(&SignedProposalResponse{}, "tendermint/remotesigner/SignedProposalResponse", nil)
|
||||
cdc.RegisterConcrete(&PingRequest{}, "tendermint/remotesigner/PingRequest", nil)
|
||||
cdc.RegisterConcrete(&PingResponse{}, "tendermint/remotesigner/PingResponse", nil)
|
||||
}
|
||||
|
||||
// PubKeyRequest requests the consensus public key from the remote signer.
|
||||
type PubKeyRequest struct{}
|
||||
|
||||
// PubKeyResponse is a PrivValidatorSocket message containing the public key.
|
||||
type PubKeyResponse struct {
|
||||
PubKey crypto.PubKey
|
||||
Error *RemoteSignerError
|
||||
}
|
||||
|
||||
// SignVoteRequest is a PrivValidatorSocket message containing a vote.
|
||||
type SignVoteRequest struct {
|
||||
Vote *types.Vote
|
||||
}
|
||||
|
||||
// SignedVoteResponse is a PrivValidatorSocket message containing a signed vote along with a potenial error message.
|
||||
type SignedVoteResponse struct {
|
||||
Vote *types.Vote
|
||||
Error *RemoteSignerError
|
||||
}
|
||||
|
||||
// SignProposalRequest is a PrivValidatorSocket message containing a Proposal.
|
||||
type SignProposalRequest struct {
|
||||
Proposal *types.Proposal
|
||||
}
|
||||
|
||||
// SignedProposalResponse is a PrivValidatorSocket message containing a proposal response
|
||||
type SignedProposalResponse struct {
|
||||
Proposal *types.Proposal
|
||||
Error *RemoteSignerError
|
||||
}
|
||||
|
||||
// PingRequest is a PrivValidatorSocket message to keep the connection alive.
|
||||
type PingRequest struct {
|
||||
}
|
||||
|
||||
// PingRequest is a PrivValidatorSocket response to keep the connection alive.
|
||||
type PingResponse struct {
|
||||
}
|
@@ -10,7 +10,6 @@ import (
|
||||
)
|
||||
|
||||
// OldFilePV is the old version of the FilePV, pre v0.28.0.
|
||||
// Deprecated: Use FilePV instead.
|
||||
type OldFilePV struct {
|
||||
Address types.Address `json:"address"`
|
||||
PubKey crypto.PubKey `json:"pub_key"`
|
@@ -7,44 +7,51 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
amino "github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// SignerRemote implements PrivValidator.
|
||||
// It uses a net.Conn to request signatures from an external process.
|
||||
type SignerRemote struct {
|
||||
// Socket errors.
|
||||
var (
|
||||
ErrConnTimeout = errors.New("remote signer timed out")
|
||||
)
|
||||
|
||||
// RemoteSignerClient implements PrivValidator.
|
||||
// It uses a net.Conn to request signatures
|
||||
// from an external process.
|
||||
type RemoteSignerClient struct {
|
||||
conn net.Conn
|
||||
|
||||
// memoized
|
||||
consensusPubKey crypto.PubKey
|
||||
}
|
||||
|
||||
// Check that SignerRemote implements PrivValidator.
|
||||
var _ types.PrivValidator = (*SignerRemote)(nil)
|
||||
// Check that RemoteSignerClient implements PrivValidator.
|
||||
var _ types.PrivValidator = (*RemoteSignerClient)(nil)
|
||||
|
||||
// NewSignerRemote returns an instance of SignerRemote.
|
||||
func NewSignerRemote(conn net.Conn) (*SignerRemote, error) {
|
||||
// NewRemoteSignerClient returns an instance of RemoteSignerClient.
|
||||
func NewRemoteSignerClient(conn net.Conn) (*RemoteSignerClient, error) {
|
||||
|
||||
// retrieve and memoize the consensus public key once.
|
||||
pubKey, err := getPubKey(conn)
|
||||
if err != nil {
|
||||
return nil, cmn.ErrorWrap(err, "error while retrieving public key for remote signer")
|
||||
}
|
||||
return &SignerRemote{
|
||||
return &RemoteSignerClient{
|
||||
conn: conn,
|
||||
consensusPubKey: pubKey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close calls Close on the underlying net.Conn.
|
||||
func (sc *SignerRemote) Close() error {
|
||||
func (sc *RemoteSignerClient) Close() error {
|
||||
return sc.conn.Close()
|
||||
}
|
||||
|
||||
// GetPubKey implements PrivValidator.
|
||||
func (sc *SignerRemote) GetPubKey() crypto.PubKey {
|
||||
func (sc *RemoteSignerClient) GetPubKey() crypto.PubKey {
|
||||
return sc.consensusPubKey
|
||||
}
|
||||
|
||||
@@ -59,7 +66,6 @@ func getPubKey(conn net.Conn) (crypto.PubKey, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pubKeyResp, ok := res.(*PubKeyResponse)
|
||||
if !ok {
|
||||
return nil, errors.Wrap(ErrUnexpectedResponse, "response is not PubKeyResponse")
|
||||
@@ -73,7 +79,7 @@ func getPubKey(conn net.Conn) (crypto.PubKey, error) {
|
||||
}
|
||||
|
||||
// SignVote implements PrivValidator.
|
||||
func (sc *SignerRemote) SignVote(chainID string, vote *types.Vote) error {
|
||||
func (sc *RemoteSignerClient) SignVote(chainID string, vote *types.Vote) error {
|
||||
err := writeMsg(sc.conn, &SignVoteRequest{Vote: vote})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -97,7 +103,10 @@ func (sc *SignerRemote) SignVote(chainID string, vote *types.Vote) error {
|
||||
}
|
||||
|
||||
// SignProposal implements PrivValidator.
|
||||
func (sc *SignerRemote) SignProposal(chainID string, proposal *types.Proposal) error {
|
||||
func (sc *RemoteSignerClient) SignProposal(
|
||||
chainID string,
|
||||
proposal *types.Proposal,
|
||||
) error {
|
||||
err := writeMsg(sc.conn, &SignProposalRequest{Proposal: proposal})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -120,7 +129,7 @@ func (sc *SignerRemote) SignProposal(chainID string, proposal *types.Proposal) e
|
||||
}
|
||||
|
||||
// Ping is used to check connection health.
|
||||
func (sc *SignerRemote) Ping() error {
|
||||
func (sc *RemoteSignerClient) Ping() error {
|
||||
err := writeMsg(sc.conn, &PingRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -138,6 +147,69 @@ func (sc *SignerRemote) Ping() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoteSignerMsg is sent between RemoteSigner and the RemoteSigner client.
|
||||
type RemoteSignerMsg interface{}
|
||||
|
||||
func RegisterRemoteSignerMsg(cdc *amino.Codec) {
|
||||
cdc.RegisterInterface((*RemoteSignerMsg)(nil), nil)
|
||||
cdc.RegisterConcrete(&PubKeyRequest{}, "tendermint/remotesigner/PubKeyRequest", nil)
|
||||
cdc.RegisterConcrete(&PubKeyResponse{}, "tendermint/remotesigner/PubKeyResponse", nil)
|
||||
cdc.RegisterConcrete(&SignVoteRequest{}, "tendermint/remotesigner/SignVoteRequest", nil)
|
||||
cdc.RegisterConcrete(&SignedVoteResponse{}, "tendermint/remotesigner/SignedVoteResponse", nil)
|
||||
cdc.RegisterConcrete(&SignProposalRequest{}, "tendermint/remotesigner/SignProposalRequest", nil)
|
||||
cdc.RegisterConcrete(&SignedProposalResponse{}, "tendermint/remotesigner/SignedProposalResponse", nil)
|
||||
cdc.RegisterConcrete(&PingRequest{}, "tendermint/remotesigner/PingRequest", nil)
|
||||
cdc.RegisterConcrete(&PingResponse{}, "tendermint/remotesigner/PingResponse", nil)
|
||||
}
|
||||
|
||||
// PubKeyRequest requests the consensus public key from the remote signer.
|
||||
type PubKeyRequest struct{}
|
||||
|
||||
// PubKeyResponse is a PrivValidatorSocket message containing the public key.
|
||||
type PubKeyResponse struct {
|
||||
PubKey crypto.PubKey
|
||||
Error *RemoteSignerError
|
||||
}
|
||||
|
||||
// SignVoteRequest is a PrivValidatorSocket message containing a vote.
|
||||
type SignVoteRequest struct {
|
||||
Vote *types.Vote
|
||||
}
|
||||
|
||||
// SignedVoteResponse is a PrivValidatorSocket message containing a signed vote along with a potenial error message.
|
||||
type SignedVoteResponse struct {
|
||||
Vote *types.Vote
|
||||
Error *RemoteSignerError
|
||||
}
|
||||
|
||||
// SignProposalRequest is a PrivValidatorSocket message containing a Proposal.
|
||||
type SignProposalRequest struct {
|
||||
Proposal *types.Proposal
|
||||
}
|
||||
|
||||
type SignedProposalResponse struct {
|
||||
Proposal *types.Proposal
|
||||
Error *RemoteSignerError
|
||||
}
|
||||
|
||||
// PingRequest is a PrivValidatorSocket message to keep the connection alive.
|
||||
type PingRequest struct {
|
||||
}
|
||||
|
||||
type PingResponse struct {
|
||||
}
|
||||
|
||||
// RemoteSignerError allows (remote) validators to include meaningful error descriptions in their reply.
|
||||
type RemoteSignerError struct {
|
||||
// TODO(ismail): create an enum of known errors
|
||||
Code int
|
||||
Description string
|
||||
}
|
||||
|
||||
func (e *RemoteSignerError) Error() string {
|
||||
return fmt.Sprintf("RemoteSigner returned error #%d: %s", e.Code, e.Description)
|
||||
}
|
||||
|
||||
func readMsg(r io.Reader) (msg RemoteSignerMsg, err error) {
|
||||
const maxRemoteSignerMsgSize = 1024 * 10
|
||||
_, err = cdc.UnmarshalBinaryLengthPrefixedReader(r, &msg, maxRemoteSignerMsgSize)
|
||||
@@ -164,7 +236,6 @@ func handleRequest(req RemoteSignerMsg, chainID string, privVal types.PrivValida
|
||||
var p crypto.PubKey
|
||||
p = privVal.GetPubKey()
|
||||
res = &PubKeyResponse{p, nil}
|
||||
|
||||
case *SignVoteRequest:
|
||||
err = privVal.SignVote(chainID, r.Vote)
|
||||
if err != nil {
|
||||
@@ -172,7 +243,6 @@ func handleRequest(req RemoteSignerMsg, chainID string, privVal types.PrivValida
|
||||
} else {
|
||||
res = &SignedVoteResponse{r.Vote, nil}
|
||||
}
|
||||
|
||||
case *SignProposalRequest:
|
||||
err = privVal.SignProposal(chainID, r.Proposal)
|
||||
if err != nil {
|
||||
@@ -180,13 +250,26 @@ func handleRequest(req RemoteSignerMsg, chainID string, privVal types.PrivValida
|
||||
} else {
|
||||
res = &SignedProposalResponse{r.Proposal, nil}
|
||||
}
|
||||
|
||||
case *PingRequest:
|
||||
res = &PingResponse{}
|
||||
|
||||
default:
|
||||
err = fmt.Errorf("unknown msg: %v", r)
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
// IsConnTimeout returns a boolean indicating whether the error is known to
|
||||
// report that a connection timeout occurred. This detects both fundamental
|
||||
// network timeouts, as well as ErrConnTimeout errors.
|
||||
func IsConnTimeout(err error) bool {
|
||||
if cmnErr, ok := err.(cmn.Error); ok {
|
||||
if cmnErr.Data() == ErrConnTimeout {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if _, ok := err.(timeoutError); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
90
privval/remote_signer_test.go
Normal file
90
privval/remote_signer_test.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package privval
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// TestRemoteSignerRetryTCPOnly will test connection retry attempts over TCP. We
|
||||
// don't need this for Unix sockets because the OS instantly knows the state of
|
||||
// both ends of the socket connection. This basically causes the
|
||||
// RemoteSigner.dialer() call inside RemoteSigner.connect() to return
|
||||
// successfully immediately, putting an instant stop to any retry attempts.
|
||||
func TestRemoteSignerRetryTCPOnly(t *testing.T) {
|
||||
var (
|
||||
attemptc = make(chan int)
|
||||
retries = 2
|
||||
)
|
||||
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
go func(ln net.Listener, attemptc chan<- int) {
|
||||
attempts := 0
|
||||
|
||||
for {
|
||||
conn, err := ln.Accept()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = conn.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
attempts++
|
||||
|
||||
if attempts == retries {
|
||||
attemptc <- attempts
|
||||
break
|
||||
}
|
||||
}
|
||||
}(ln, attemptc)
|
||||
|
||||
rs := NewRemoteSigner(
|
||||
log.TestingLogger(),
|
||||
cmn.RandStr(12),
|
||||
types.NewMockPV(),
|
||||
DialTCPFn(ln.Addr().String(), testConnDeadline, ed25519.GenPrivKey()),
|
||||
)
|
||||
defer rs.Stop()
|
||||
|
||||
RemoteSignerConnDeadline(time.Millisecond)(rs)
|
||||
RemoteSignerConnRetries(retries)(rs)
|
||||
|
||||
assert.Equal(t, rs.Start(), ErrDialRetryMax)
|
||||
|
||||
select {
|
||||
case attempts := <-attemptc:
|
||||
assert.Equal(t, retries, attempts)
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Error("expected remote to observe connection attempts")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsConnTimeoutForFundamentalTimeouts(t *testing.T) {
|
||||
// Generate a networking timeout
|
||||
dialer := DialTCPFn(testFreeTCPAddr(t), time.Millisecond, ed25519.GenPrivKey())
|
||||
_, err := dialer()
|
||||
assert.Error(t, err)
|
||||
assert.True(t, IsConnTimeout(err))
|
||||
}
|
||||
|
||||
func TestIsConnTimeoutForWrappedConnTimeouts(t *testing.T) {
|
||||
dialer := DialTCPFn(testFreeTCPAddr(t), time.Millisecond, ed25519.GenPrivKey())
|
||||
_, err := dialer()
|
||||
assert.Error(t, err)
|
||||
err = cmn.ErrorWrap(ErrConnTimeout, err.Error())
|
||||
assert.True(t, IsConnTimeout(err))
|
||||
}
|
||||
|
||||
func TestIsConnTimeoutForNonTimeoutErrors(t *testing.T) {
|
||||
assert.False(t, IsConnTimeout(cmn.ErrorWrap(ErrDialRetryMax, "max retries exceeded")))
|
||||
assert.False(t, IsConnTimeout(errors.New("completely irrelevant error")))
|
||||
}
|
168
privval/server.go
Normal file
168
privval/server.go
Normal file
@@ -0,0 +1,168 @@
|
||||
package privval
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
p2pconn "github.com/tendermint/tendermint/p2p/conn"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Socket errors.
|
||||
var (
|
||||
ErrDialRetryMax = errors.New("dialed maximum retries")
|
||||
)
|
||||
|
||||
// RemoteSignerOption sets an optional parameter on the RemoteSigner.
|
||||
type RemoteSignerOption func(*RemoteSigner)
|
||||
|
||||
// RemoteSignerConnDeadline sets the read and write deadline for connections
|
||||
// from external signing processes.
|
||||
func RemoteSignerConnDeadline(deadline time.Duration) RemoteSignerOption {
|
||||
return func(ss *RemoteSigner) { ss.connDeadline = deadline }
|
||||
}
|
||||
|
||||
// RemoteSignerConnRetries sets the amount of attempted retries to connect.
|
||||
func RemoteSignerConnRetries(retries int) RemoteSignerOption {
|
||||
return func(ss *RemoteSigner) { ss.connRetries = retries }
|
||||
}
|
||||
|
||||
// RemoteSigner dials using its dialer and responds to any
|
||||
// signature requests using its privVal.
|
||||
type RemoteSigner struct {
|
||||
cmn.BaseService
|
||||
|
||||
chainID string
|
||||
connDeadline time.Duration
|
||||
connRetries int
|
||||
privVal types.PrivValidator
|
||||
|
||||
dialer Dialer
|
||||
conn net.Conn
|
||||
}
|
||||
|
||||
// Dialer dials a remote address and returns a net.Conn or an error.
|
||||
type Dialer func() (net.Conn, error)
|
||||
|
||||
// DialTCPFn dials the given tcp addr, using the given connTimeout and privKey for the
|
||||
// authenticated encryption handshake.
|
||||
func DialTCPFn(addr string, connTimeout time.Duration, privKey ed25519.PrivKeyEd25519) Dialer {
|
||||
return func() (net.Conn, error) {
|
||||
conn, err := cmn.Connect(addr)
|
||||
if err == nil {
|
||||
err = conn.SetDeadline(time.Now().Add(connTimeout))
|
||||
}
|
||||
if err == nil {
|
||||
conn, err = p2pconn.MakeSecretConnection(conn, privKey)
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
}
|
||||
|
||||
// DialUnixFn dials the given unix socket.
|
||||
func DialUnixFn(addr string) Dialer {
|
||||
return func() (net.Conn, error) {
|
||||
unixAddr := &net.UnixAddr{Name: addr, Net: "unix"}
|
||||
return net.DialUnix("unix", nil, unixAddr)
|
||||
}
|
||||
}
|
||||
|
||||
// NewRemoteSigner return a RemoteSigner that will dial using the given
|
||||
// dialer and respond to any signature requests over the connection
|
||||
// using the given privVal.
|
||||
func NewRemoteSigner(
|
||||
logger log.Logger,
|
||||
chainID string,
|
||||
privVal types.PrivValidator,
|
||||
dialer Dialer,
|
||||
) *RemoteSigner {
|
||||
rs := &RemoteSigner{
|
||||
chainID: chainID,
|
||||
connDeadline: time.Second * defaultConnDeadlineSeconds,
|
||||
connRetries: defaultDialRetries,
|
||||
privVal: privVal,
|
||||
dialer: dialer,
|
||||
}
|
||||
|
||||
rs.BaseService = *cmn.NewBaseService(logger, "RemoteSigner", rs)
|
||||
return rs
|
||||
}
|
||||
|
||||
// OnStart implements cmn.Service.
|
||||
func (rs *RemoteSigner) OnStart() error {
|
||||
conn, err := rs.connect()
|
||||
if err != nil {
|
||||
rs.Logger.Error("OnStart", "err", err)
|
||||
return err
|
||||
}
|
||||
rs.conn = conn
|
||||
|
||||
go rs.handleConnection(conn)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop implements cmn.Service.
|
||||
func (rs *RemoteSigner) OnStop() {
|
||||
if rs.conn == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err := rs.conn.Close(); err != nil {
|
||||
rs.Logger.Error("OnStop", "err", cmn.ErrorWrap(err, "closing listener failed"))
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *RemoteSigner) connect() (net.Conn, error) {
|
||||
for retries := rs.connRetries; retries > 0; retries-- {
|
||||
// Don't sleep if it is the first retry.
|
||||
if retries != rs.connRetries {
|
||||
time.Sleep(rs.connDeadline)
|
||||
}
|
||||
conn, err := rs.dialer()
|
||||
if err != nil {
|
||||
rs.Logger.Error("dialing", "err", err)
|
||||
continue
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
return nil, ErrDialRetryMax
|
||||
}
|
||||
|
||||
func (rs *RemoteSigner) handleConnection(conn net.Conn) {
|
||||
for {
|
||||
if !rs.IsRunning() {
|
||||
return // Ignore error from listener closing.
|
||||
}
|
||||
|
||||
// Reset the connection deadline
|
||||
conn.SetDeadline(time.Now().Add(rs.connDeadline))
|
||||
|
||||
req, err := readMsg(conn)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
rs.Logger.Error("handleConnection readMsg", "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
res, err := handleRequest(req, rs.chainID, rs.privVal)
|
||||
|
||||
if err != nil {
|
||||
// only log the error; we'll reply with an error in res
|
||||
rs.Logger.Error("handleConnection handleRequest", "err", err)
|
||||
}
|
||||
|
||||
err = writeMsg(conn, res)
|
||||
if err != nil {
|
||||
rs.Logger.Error("handleConnection writeMsg", "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,68 +0,0 @@
|
||||
package privval
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// TestSignerRemoteRetryTCPOnly will test connection retry attempts over TCP. We
|
||||
// don't need this for Unix sockets because the OS instantly knows the state of
|
||||
// both ends of the socket connection. This basically causes the
|
||||
// SignerServiceEndpoint.dialer() call inside SignerServiceEndpoint.connect() to return
|
||||
// successfully immediately, putting an instant stop to any retry attempts.
|
||||
func TestSignerRemoteRetryTCPOnly(t *testing.T) {
|
||||
var (
|
||||
attemptCh = make(chan int)
|
||||
retries = 2
|
||||
)
|
||||
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
go func(ln net.Listener, attemptCh chan<- int) {
|
||||
attempts := 0
|
||||
|
||||
for {
|
||||
conn, err := ln.Accept()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = conn.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
attempts++
|
||||
|
||||
if attempts == retries {
|
||||
attemptCh <- attempts
|
||||
break
|
||||
}
|
||||
}
|
||||
}(ln, attemptCh)
|
||||
|
||||
serviceEndpoint := NewSignerServiceEndpoint(
|
||||
log.TestingLogger(),
|
||||
cmn.RandStr(12),
|
||||
types.NewMockPV(),
|
||||
DialTCPFn(ln.Addr().String(), testTimeoutReadWrite, ed25519.GenPrivKey()),
|
||||
)
|
||||
defer serviceEndpoint.Stop()
|
||||
|
||||
SignerServiceEndpointTimeoutReadWrite(time.Millisecond)(serviceEndpoint)
|
||||
SignerServiceEndpointConnRetries(retries)(serviceEndpoint)
|
||||
|
||||
assert.Equal(t, serviceEndpoint.Start(), ErrDialRetryMax)
|
||||
|
||||
select {
|
||||
case attempts := <-attemptCh:
|
||||
assert.Equal(t, retries, attempts)
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Error("expected remote to observe connection attempts")
|
||||
}
|
||||
}
|
@@ -1,139 +0,0 @@
|
||||
package privval
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// SignerServiceEndpointOption sets an optional parameter on the SignerServiceEndpoint.
|
||||
type SignerServiceEndpointOption func(*SignerServiceEndpoint)
|
||||
|
||||
// SignerServiceEndpointTimeoutReadWrite sets the read and write timeout for connections
|
||||
// from external signing processes.
|
||||
func SignerServiceEndpointTimeoutReadWrite(timeout time.Duration) SignerServiceEndpointOption {
|
||||
return func(ss *SignerServiceEndpoint) { ss.timeoutReadWrite = timeout }
|
||||
}
|
||||
|
||||
// SignerServiceEndpointConnRetries sets the amount of attempted retries to connect.
|
||||
func SignerServiceEndpointConnRetries(retries int) SignerServiceEndpointOption {
|
||||
return func(ss *SignerServiceEndpoint) { ss.connRetries = retries }
|
||||
}
|
||||
|
||||
// SignerServiceEndpoint dials using its dialer and responds to any
|
||||
// signature requests using its privVal.
|
||||
type SignerServiceEndpoint struct {
|
||||
cmn.BaseService
|
||||
|
||||
chainID string
|
||||
timeoutReadWrite time.Duration
|
||||
connRetries int
|
||||
privVal types.PrivValidator
|
||||
|
||||
dialer SocketDialer
|
||||
conn net.Conn
|
||||
}
|
||||
|
||||
// NewSignerServiceEndpoint returns a SignerServiceEndpoint that will dial using the given
|
||||
// dialer and respond to any signature requests over the connection
|
||||
// using the given privVal.
|
||||
func NewSignerServiceEndpoint(
|
||||
logger log.Logger,
|
||||
chainID string,
|
||||
privVal types.PrivValidator,
|
||||
dialer SocketDialer,
|
||||
) *SignerServiceEndpoint {
|
||||
se := &SignerServiceEndpoint{
|
||||
chainID: chainID,
|
||||
timeoutReadWrite: time.Second * defaultTimeoutReadWriteSeconds,
|
||||
connRetries: defaultMaxDialRetries,
|
||||
privVal: privVal,
|
||||
dialer: dialer,
|
||||
}
|
||||
|
||||
se.BaseService = *cmn.NewBaseService(logger, "SignerServiceEndpoint", se)
|
||||
return se
|
||||
}
|
||||
|
||||
// OnStart implements cmn.Service.
|
||||
func (se *SignerServiceEndpoint) OnStart() error {
|
||||
conn, err := se.connect()
|
||||
if err != nil {
|
||||
se.Logger.Error("OnStart", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
se.conn = conn
|
||||
go se.handleConnection(conn)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop implements cmn.Service.
|
||||
func (se *SignerServiceEndpoint) OnStop() {
|
||||
if se.conn == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err := se.conn.Close(); err != nil {
|
||||
se.Logger.Error("OnStop", "err", cmn.ErrorWrap(err, "closing listener failed"))
|
||||
}
|
||||
}
|
||||
|
||||
func (se *SignerServiceEndpoint) connect() (net.Conn, error) {
|
||||
for retries := 0; retries < se.connRetries; retries++ {
|
||||
// Don't sleep if it is the first retry.
|
||||
if retries > 0 {
|
||||
time.Sleep(se.timeoutReadWrite)
|
||||
}
|
||||
|
||||
conn, err := se.dialer()
|
||||
if err == nil {
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
se.Logger.Error("dialing", "err", err)
|
||||
}
|
||||
|
||||
return nil, ErrDialRetryMax
|
||||
}
|
||||
|
||||
func (se *SignerServiceEndpoint) handleConnection(conn net.Conn) {
|
||||
for {
|
||||
if !se.IsRunning() {
|
||||
return // Ignore error from listener closing.
|
||||
}
|
||||
|
||||
// Reset the connection deadline
|
||||
deadline := time.Now().Add(se.timeoutReadWrite)
|
||||
err := conn.SetDeadline(deadline)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
req, err := readMsg(conn)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
se.Logger.Error("handleConnection readMsg", "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
res, err := handleRequest(req, se.chainID, se.privVal)
|
||||
|
||||
if err != nil {
|
||||
// only log the error; we'll reply with an error in res
|
||||
se.Logger.Error("handleConnection handleRequest", "err", err)
|
||||
}
|
||||
|
||||
err = writeMsg(conn, res)
|
||||
if err != nil {
|
||||
se.Logger.Error("handleConnection writeMsg", "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,230 +0,0 @@
|
||||
package privval
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultHeartbeatSeconds = 2
|
||||
defaultMaxDialRetries = 10
|
||||
)
|
||||
|
||||
var (
|
||||
heartbeatPeriod = time.Second * defaultHeartbeatSeconds
|
||||
)
|
||||
|
||||
// SignerValidatorEndpointOption sets an optional parameter on the SocketVal.
|
||||
type SignerValidatorEndpointOption func(*SignerValidatorEndpoint)
|
||||
|
||||
// SignerValidatorEndpointSetHeartbeat sets the period on which to check the liveness of the
|
||||
// connected Signer connections.
|
||||
func SignerValidatorEndpointSetHeartbeat(period time.Duration) SignerValidatorEndpointOption {
|
||||
return func(sc *SignerValidatorEndpoint) { sc.heartbeatPeriod = period }
|
||||
}
|
||||
|
||||
// SocketVal implements PrivValidator.
|
||||
// It listens for an external process to dial in and uses
|
||||
// the socket to request signatures.
|
||||
type SignerValidatorEndpoint struct {
|
||||
cmn.BaseService
|
||||
|
||||
listener net.Listener
|
||||
|
||||
// ping
|
||||
cancelPingCh chan struct{}
|
||||
pingTicker *time.Ticker
|
||||
heartbeatPeriod time.Duration
|
||||
|
||||
// signer is mutable since it can be reset if the connection fails.
|
||||
// failures are detected by a background ping routine.
|
||||
// All messages are request/response, so we hold the mutex
|
||||
// so only one request/response pair can happen at a time.
|
||||
// Methods on the underlying net.Conn itself are already goroutine safe.
|
||||
mtx sync.Mutex
|
||||
|
||||
// TODO: Signer should encapsulate and hide the endpoint completely. Invert the relation
|
||||
signer *SignerRemote
|
||||
}
|
||||
|
||||
// Check that SignerValidatorEndpoint implements PrivValidator.
|
||||
var _ types.PrivValidator = (*SignerValidatorEndpoint)(nil)
|
||||
|
||||
// NewSignerValidatorEndpoint returns an instance of SignerValidatorEndpoint.
|
||||
func NewSignerValidatorEndpoint(logger log.Logger, listener net.Listener) *SignerValidatorEndpoint {
|
||||
sc := &SignerValidatorEndpoint{
|
||||
listener: listener,
|
||||
heartbeatPeriod: heartbeatPeriod,
|
||||
}
|
||||
|
||||
sc.BaseService = *cmn.NewBaseService(logger, "SignerValidatorEndpoint", sc)
|
||||
|
||||
return sc
|
||||
}
|
||||
|
||||
//--------------------------------------------------------
|
||||
// Implement PrivValidator
|
||||
|
||||
// GetPubKey implements PrivValidator.
|
||||
func (ve *SignerValidatorEndpoint) GetPubKey() crypto.PubKey {
|
||||
ve.mtx.Lock()
|
||||
defer ve.mtx.Unlock()
|
||||
return ve.signer.GetPubKey()
|
||||
}
|
||||
|
||||
// SignVote implements PrivValidator.
|
||||
func (ve *SignerValidatorEndpoint) SignVote(chainID string, vote *types.Vote) error {
|
||||
ve.mtx.Lock()
|
||||
defer ve.mtx.Unlock()
|
||||
return ve.signer.SignVote(chainID, vote)
|
||||
}
|
||||
|
||||
// SignProposal implements PrivValidator.
|
||||
func (ve *SignerValidatorEndpoint) SignProposal(chainID string, proposal *types.Proposal) error {
|
||||
ve.mtx.Lock()
|
||||
defer ve.mtx.Unlock()
|
||||
return ve.signer.SignProposal(chainID, proposal)
|
||||
}
|
||||
|
||||
//--------------------------------------------------------
|
||||
// More thread safe methods proxied to the signer
|
||||
|
||||
// Ping is used to check connection health.
|
||||
func (ve *SignerValidatorEndpoint) Ping() error {
|
||||
ve.mtx.Lock()
|
||||
defer ve.mtx.Unlock()
|
||||
return ve.signer.Ping()
|
||||
}
|
||||
|
||||
// Close closes the underlying net.Conn.
|
||||
func (ve *SignerValidatorEndpoint) Close() {
|
||||
ve.mtx.Lock()
|
||||
defer ve.mtx.Unlock()
|
||||
if ve.signer != nil {
|
||||
if err := ve.signer.Close(); err != nil {
|
||||
ve.Logger.Error("OnStop", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if ve.listener != nil {
|
||||
if err := ve.listener.Close(); err != nil {
|
||||
ve.Logger.Error("OnStop", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//--------------------------------------------------------
|
||||
// Service start and stop
|
||||
|
||||
// OnStart implements cmn.Service.
|
||||
func (ve *SignerValidatorEndpoint) OnStart() error {
|
||||
if closed, err := ve.reset(); err != nil {
|
||||
ve.Logger.Error("OnStart", "err", err)
|
||||
return err
|
||||
} else if closed {
|
||||
return fmt.Errorf("listener is closed")
|
||||
}
|
||||
|
||||
// Start a routine to keep the connection alive
|
||||
ve.cancelPingCh = make(chan struct{}, 1)
|
||||
ve.pingTicker = time.NewTicker(ve.heartbeatPeriod)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ve.pingTicker.C:
|
||||
err := ve.Ping()
|
||||
if err != nil {
|
||||
ve.Logger.Error("Ping", "err", err)
|
||||
if err == ErrUnexpectedResponse {
|
||||
return
|
||||
}
|
||||
|
||||
closed, err := ve.reset()
|
||||
if err != nil {
|
||||
ve.Logger.Error("Reconnecting to remote signer failed", "err", err)
|
||||
continue
|
||||
}
|
||||
if closed {
|
||||
ve.Logger.Info("listener is closing")
|
||||
return
|
||||
}
|
||||
|
||||
ve.Logger.Info("Re-created connection to remote signer", "impl", ve)
|
||||
}
|
||||
case <-ve.cancelPingCh:
|
||||
ve.pingTicker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop implements cmn.Service.
|
||||
func (ve *SignerValidatorEndpoint) OnStop() {
|
||||
if ve.cancelPingCh != nil {
|
||||
close(ve.cancelPingCh)
|
||||
}
|
||||
ve.Close()
|
||||
}
|
||||
|
||||
//--------------------------------------------------------
|
||||
// Connection and signer management
|
||||
|
||||
// waits to accept and sets a new connection.
|
||||
// connection is closed in OnStop.
|
||||
// returns true if the listener is closed
|
||||
// (ie. it returns a nil conn).
|
||||
func (ve *SignerValidatorEndpoint) reset() (closed bool, err error) {
|
||||
ve.mtx.Lock()
|
||||
defer ve.mtx.Unlock()
|
||||
|
||||
// first check if the conn already exists and close it.
|
||||
if ve.signer != nil {
|
||||
if tmpErr := ve.signer.Close(); tmpErr != nil {
|
||||
ve.Logger.Error("error closing socket val connection during reset", "err", tmpErr)
|
||||
}
|
||||
}
|
||||
|
||||
// wait for a new conn
|
||||
conn, err := ve.acceptConnection()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// listener is closed
|
||||
if conn == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
ve.signer, err = NewSignerRemote(conn)
|
||||
if err != nil {
|
||||
// failed to fetch the pubkey. close out the connection.
|
||||
if tmpErr := conn.Close(); tmpErr != nil {
|
||||
ve.Logger.Error("error closing connection", "err", tmpErr)
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Attempt to accept a connection.
|
||||
// Times out after the listener's timeoutAccept
|
||||
func (ve *SignerValidatorEndpoint) acceptConnection() (net.Conn, error) {
|
||||
conn, err := ve.listener.Accept()
|
||||
if err != nil {
|
||||
if !ve.IsRunning() {
|
||||
return nil, nil // Ignore error from listener closing.
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
@@ -1,505 +0,0 @@
|
||||
package privval
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var (
|
||||
testTimeoutAccept = defaultTimeoutAcceptSeconds * time.Second
|
||||
|
||||
testTimeoutReadWrite = 100 * time.Millisecond
|
||||
testTimeoutReadWrite2o3 = 66 * time.Millisecond // 2/3 of the other one
|
||||
|
||||
testTimeoutHeartbeat = 10 * time.Millisecond
|
||||
testTimeoutHeartbeat3o2 = 6 * time.Millisecond // 3/2 of the other one
|
||||
)
|
||||
|
||||
type socketTestCase struct {
|
||||
addr string
|
||||
dialer SocketDialer
|
||||
}
|
||||
|
||||
func socketTestCases(t *testing.T) []socketTestCase {
|
||||
tcpAddr := fmt.Sprintf("tcp://%s", testFreeTCPAddr(t))
|
||||
unixFilePath, err := testUnixAddr()
|
||||
require.NoError(t, err)
|
||||
unixAddr := fmt.Sprintf("unix://%s", unixFilePath)
|
||||
return []socketTestCase{
|
||||
{
|
||||
addr: tcpAddr,
|
||||
dialer: DialTCPFn(tcpAddr, testTimeoutReadWrite, ed25519.GenPrivKey()),
|
||||
},
|
||||
{
|
||||
addr: unixAddr,
|
||||
dialer: DialUnixFn(unixFilePath),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestSocketPVAddress(t *testing.T) {
|
||||
for _, tc := range socketTestCases(t) {
|
||||
// Execute the test within a closure to ensure the deferred statements
|
||||
// are called between each for loop iteration, for isolated test cases.
|
||||
func() {
|
||||
var (
|
||||
chainID = cmn.RandStr(12)
|
||||
validatorEndpoint, serviceEndpoint = testSetupSocketPair(t, chainID, types.NewMockPV(), tc.addr, tc.dialer)
|
||||
)
|
||||
defer validatorEndpoint.Stop()
|
||||
defer serviceEndpoint.Stop()
|
||||
|
||||
serviceAddr := serviceEndpoint.privVal.GetPubKey().Address()
|
||||
validatorAddr := validatorEndpoint.GetPubKey().Address()
|
||||
|
||||
assert.Equal(t, serviceAddr, validatorAddr)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestSocketPVPubKey(t *testing.T) {
|
||||
for _, tc := range socketTestCases(t) {
|
||||
func() {
|
||||
var (
|
||||
chainID = cmn.RandStr(12)
|
||||
validatorEndpoint, serviceEndpoint = testSetupSocketPair(
|
||||
t,
|
||||
chainID,
|
||||
types.NewMockPV(),
|
||||
tc.addr,
|
||||
tc.dialer)
|
||||
)
|
||||
defer validatorEndpoint.Stop()
|
||||
defer serviceEndpoint.Stop()
|
||||
|
||||
clientKey := validatorEndpoint.GetPubKey()
|
||||
privvalPubKey := serviceEndpoint.privVal.GetPubKey()
|
||||
|
||||
assert.Equal(t, privvalPubKey, clientKey)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestSocketPVProposal(t *testing.T) {
|
||||
for _, tc := range socketTestCases(t) {
|
||||
func() {
|
||||
var (
|
||||
chainID = cmn.RandStr(12)
|
||||
validatorEndpoint, serviceEndpoint = testSetupSocketPair(
|
||||
t,
|
||||
chainID,
|
||||
types.NewMockPV(),
|
||||
tc.addr,
|
||||
tc.dialer)
|
||||
|
||||
ts = time.Now()
|
||||
privProposal = &types.Proposal{Timestamp: ts}
|
||||
clientProposal = &types.Proposal{Timestamp: ts}
|
||||
)
|
||||
defer validatorEndpoint.Stop()
|
||||
defer serviceEndpoint.Stop()
|
||||
|
||||
require.NoError(t, serviceEndpoint.privVal.SignProposal(chainID, privProposal))
|
||||
require.NoError(t, validatorEndpoint.SignProposal(chainID, clientProposal))
|
||||
|
||||
assert.Equal(t, privProposal.Signature, clientProposal.Signature)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestSocketPVVote(t *testing.T) {
|
||||
for _, tc := range socketTestCases(t) {
|
||||
func() {
|
||||
var (
|
||||
chainID = cmn.RandStr(12)
|
||||
validatorEndpoint, serviceEndpoint = testSetupSocketPair(
|
||||
t,
|
||||
chainID,
|
||||
types.NewMockPV(),
|
||||
tc.addr,
|
||||
tc.dialer)
|
||||
|
||||
ts = time.Now()
|
||||
vType = types.PrecommitType
|
||||
want = &types.Vote{Timestamp: ts, Type: vType}
|
||||
have = &types.Vote{Timestamp: ts, Type: vType}
|
||||
)
|
||||
defer validatorEndpoint.Stop()
|
||||
defer serviceEndpoint.Stop()
|
||||
|
||||
require.NoError(t, serviceEndpoint.privVal.SignVote(chainID, want))
|
||||
require.NoError(t, validatorEndpoint.SignVote(chainID, have))
|
||||
assert.Equal(t, want.Signature, have.Signature)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestSocketPVVoteResetDeadline(t *testing.T) {
|
||||
for _, tc := range socketTestCases(t) {
|
||||
func() {
|
||||
var (
|
||||
chainID = cmn.RandStr(12)
|
||||
validatorEndpoint, serviceEndpoint = testSetupSocketPair(
|
||||
t,
|
||||
chainID,
|
||||
types.NewMockPV(),
|
||||
tc.addr,
|
||||
tc.dialer)
|
||||
|
||||
ts = time.Now()
|
||||
vType = types.PrecommitType
|
||||
want = &types.Vote{Timestamp: ts, Type: vType}
|
||||
have = &types.Vote{Timestamp: ts, Type: vType}
|
||||
)
|
||||
defer validatorEndpoint.Stop()
|
||||
defer serviceEndpoint.Stop()
|
||||
|
||||
time.Sleep(testTimeoutReadWrite2o3)
|
||||
|
||||
require.NoError(t, serviceEndpoint.privVal.SignVote(chainID, want))
|
||||
require.NoError(t, validatorEndpoint.SignVote(chainID, have))
|
||||
assert.Equal(t, want.Signature, have.Signature)
|
||||
|
||||
// This would exceed the deadline if it was not extended by the previous message
|
||||
time.Sleep(testTimeoutReadWrite2o3)
|
||||
|
||||
require.NoError(t, serviceEndpoint.privVal.SignVote(chainID, want))
|
||||
require.NoError(t, validatorEndpoint.SignVote(chainID, have))
|
||||
assert.Equal(t, want.Signature, have.Signature)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestSocketPVVoteKeepalive(t *testing.T) {
|
||||
for _, tc := range socketTestCases(t) {
|
||||
func() {
|
||||
var (
|
||||
chainID = cmn.RandStr(12)
|
||||
validatorEndpoint, serviceEndpoint = testSetupSocketPair(
|
||||
t,
|
||||
chainID,
|
||||
types.NewMockPV(),
|
||||
tc.addr,
|
||||
tc.dialer)
|
||||
|
||||
ts = time.Now()
|
||||
vType = types.PrecommitType
|
||||
want = &types.Vote{Timestamp: ts, Type: vType}
|
||||
have = &types.Vote{Timestamp: ts, Type: vType}
|
||||
)
|
||||
defer validatorEndpoint.Stop()
|
||||
defer serviceEndpoint.Stop()
|
||||
|
||||
time.Sleep(testTimeoutReadWrite * 2)
|
||||
|
||||
require.NoError(t, serviceEndpoint.privVal.SignVote(chainID, want))
|
||||
require.NoError(t, validatorEndpoint.SignVote(chainID, have))
|
||||
assert.Equal(t, want.Signature, have.Signature)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestSocketPVDeadline(t *testing.T) {
|
||||
for _, tc := range socketTestCases(t) {
|
||||
func() {
|
||||
var (
|
||||
listenc = make(chan struct{})
|
||||
thisConnTimeout = 100 * time.Millisecond
|
||||
validatorEndpoint = newSignerValidatorEndpoint(log.TestingLogger(), tc.addr, thisConnTimeout)
|
||||
)
|
||||
|
||||
go func(sc *SignerValidatorEndpoint) {
|
||||
defer close(listenc)
|
||||
|
||||
// Note: the TCP connection times out at the accept() phase,
|
||||
// whereas the Unix domain sockets connection times out while
|
||||
// attempting to fetch the remote signer's public key.
|
||||
assert.True(t, IsConnTimeout(sc.Start()))
|
||||
|
||||
assert.False(t, sc.IsRunning())
|
||||
}(validatorEndpoint)
|
||||
|
||||
for {
|
||||
_, err := cmn.Connect(tc.addr)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
<-listenc
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoteSignVoteErrors(t *testing.T) {
|
||||
for _, tc := range socketTestCases(t) {
|
||||
func() {
|
||||
var (
|
||||
chainID = cmn.RandStr(12)
|
||||
validatorEndpoint, serviceEndpoint = testSetupSocketPair(
|
||||
t,
|
||||
chainID,
|
||||
types.NewErroringMockPV(),
|
||||
tc.addr,
|
||||
tc.dialer)
|
||||
|
||||
ts = time.Now()
|
||||
vType = types.PrecommitType
|
||||
vote = &types.Vote{Timestamp: ts, Type: vType}
|
||||
)
|
||||
defer validatorEndpoint.Stop()
|
||||
defer serviceEndpoint.Stop()
|
||||
|
||||
err := validatorEndpoint.SignVote("", vote)
|
||||
require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error())
|
||||
|
||||
err = serviceEndpoint.privVal.SignVote(chainID, vote)
|
||||
require.Error(t, err)
|
||||
err = validatorEndpoint.SignVote(chainID, vote)
|
||||
require.Error(t, err)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoteSignProposalErrors(t *testing.T) {
|
||||
for _, tc := range socketTestCases(t) {
|
||||
func() {
|
||||
var (
|
||||
chainID = cmn.RandStr(12)
|
||||
validatorEndpoint, serviceEndpoint = testSetupSocketPair(
|
||||
t,
|
||||
chainID,
|
||||
types.NewErroringMockPV(),
|
||||
tc.addr,
|
||||
tc.dialer)
|
||||
|
||||
ts = time.Now()
|
||||
proposal = &types.Proposal{Timestamp: ts}
|
||||
)
|
||||
defer validatorEndpoint.Stop()
|
||||
defer serviceEndpoint.Stop()
|
||||
|
||||
err := validatorEndpoint.SignProposal("", proposal)
|
||||
require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error())
|
||||
|
||||
err = serviceEndpoint.privVal.SignProposal(chainID, proposal)
|
||||
require.Error(t, err)
|
||||
|
||||
err = validatorEndpoint.SignProposal(chainID, proposal)
|
||||
require.Error(t, err)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrUnexpectedResponse(t *testing.T) {
|
||||
for _, tc := range socketTestCases(t) {
|
||||
func() {
|
||||
var (
|
||||
logger = log.TestingLogger()
|
||||
chainID = cmn.RandStr(12)
|
||||
readyCh = make(chan struct{})
|
||||
errCh = make(chan error, 1)
|
||||
|
||||
serviceEndpoint = NewSignerServiceEndpoint(
|
||||
logger,
|
||||
chainID,
|
||||
types.NewMockPV(),
|
||||
tc.dialer,
|
||||
)
|
||||
|
||||
validatorEndpoint = newSignerValidatorEndpoint(
|
||||
logger,
|
||||
tc.addr,
|
||||
testTimeoutReadWrite)
|
||||
)
|
||||
|
||||
testStartEndpoint(t, readyCh, validatorEndpoint)
|
||||
defer validatorEndpoint.Stop()
|
||||
SignerServiceEndpointTimeoutReadWrite(time.Millisecond)(serviceEndpoint)
|
||||
SignerServiceEndpointConnRetries(100)(serviceEndpoint)
|
||||
// we do not want to Start() the remote signer here and instead use the connection to
|
||||
// reply with intentionally wrong replies below:
|
||||
rsConn, err := serviceEndpoint.connect()
|
||||
defer rsConn.Close()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, rsConn)
|
||||
// send over public key to get the remote signer running:
|
||||
go testReadWriteResponse(t, &PubKeyResponse{}, rsConn)
|
||||
<-readyCh
|
||||
|
||||
// Proposal:
|
||||
go func(errc chan error) {
|
||||
errc <- validatorEndpoint.SignProposal(chainID, &types.Proposal{})
|
||||
}(errCh)
|
||||
|
||||
// read request and write wrong response:
|
||||
go testReadWriteResponse(t, &SignedVoteResponse{}, rsConn)
|
||||
err = <-errCh
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err, ErrUnexpectedResponse)
|
||||
|
||||
// Vote:
|
||||
go func(errc chan error) {
|
||||
errc <- validatorEndpoint.SignVote(chainID, &types.Vote{})
|
||||
}(errCh)
|
||||
// read request and write wrong response:
|
||||
go testReadWriteResponse(t, &SignedProposalResponse{}, rsConn)
|
||||
err = <-errCh
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err, ErrUnexpectedResponse)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetryConnToRemoteSigner(t *testing.T) {
|
||||
for _, tc := range socketTestCases(t) {
|
||||
func() {
|
||||
var (
|
||||
logger = log.TestingLogger()
|
||||
chainID = cmn.RandStr(12)
|
||||
readyCh = make(chan struct{})
|
||||
|
||||
serviceEndpoint = NewSignerServiceEndpoint(
|
||||
logger,
|
||||
chainID,
|
||||
types.NewMockPV(),
|
||||
tc.dialer,
|
||||
)
|
||||
thisConnTimeout = testTimeoutReadWrite
|
||||
validatorEndpoint = newSignerValidatorEndpoint(logger, tc.addr, thisConnTimeout)
|
||||
)
|
||||
// Ping every:
|
||||
SignerValidatorEndpointSetHeartbeat(testTimeoutHeartbeat)(validatorEndpoint)
|
||||
|
||||
SignerServiceEndpointTimeoutReadWrite(testTimeoutReadWrite)(serviceEndpoint)
|
||||
SignerServiceEndpointConnRetries(10)(serviceEndpoint)
|
||||
|
||||
testStartEndpoint(t, readyCh, validatorEndpoint)
|
||||
defer validatorEndpoint.Stop()
|
||||
require.NoError(t, serviceEndpoint.Start())
|
||||
assert.True(t, serviceEndpoint.IsRunning())
|
||||
|
||||
<-readyCh
|
||||
time.Sleep(testTimeoutHeartbeat * 2)
|
||||
|
||||
serviceEndpoint.Stop()
|
||||
rs2 := NewSignerServiceEndpoint(
|
||||
logger,
|
||||
chainID,
|
||||
types.NewMockPV(),
|
||||
tc.dialer,
|
||||
)
|
||||
// let some pings pass
|
||||
time.Sleep(testTimeoutHeartbeat3o2)
|
||||
require.NoError(t, rs2.Start())
|
||||
assert.True(t, rs2.IsRunning())
|
||||
defer rs2.Stop()
|
||||
|
||||
// give the client some time to re-establish the conn to the remote signer
|
||||
// should see sth like this in the logs:
|
||||
//
|
||||
// E[10016-01-10|17:12:46.128] Ping err="remote signer timed out"
|
||||
// I[10016-01-10|17:16:42.447] Re-created connection to remote signer impl=SocketVal
|
||||
time.Sleep(testTimeoutReadWrite * 2)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func newSignerValidatorEndpoint(logger log.Logger, addr string, timeoutReadWrite time.Duration) *SignerValidatorEndpoint {
|
||||
proto, address := cmn.ProtocolAndAddress(addr)
|
||||
|
||||
ln, err := net.Listen(proto, address)
|
||||
logger.Info("Listening at", "proto", proto, "address", address)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var listener net.Listener
|
||||
|
||||
if proto == "unix" {
|
||||
unixLn := NewUnixListener(ln)
|
||||
UnixListenerTimeoutAccept(testTimeoutAccept)(unixLn)
|
||||
UnixListenerTimeoutReadWrite(timeoutReadWrite)(unixLn)
|
||||
listener = unixLn
|
||||
} else {
|
||||
tcpLn := NewTCPListener(ln, ed25519.GenPrivKey())
|
||||
TCPListenerTimeoutAccept(testTimeoutAccept)(tcpLn)
|
||||
TCPListenerTimeoutReadWrite(timeoutReadWrite)(tcpLn)
|
||||
listener = tcpLn
|
||||
}
|
||||
|
||||
return NewSignerValidatorEndpoint(logger, listener)
|
||||
}
|
||||
|
||||
func testSetupSocketPair(
|
||||
t *testing.T,
|
||||
chainID string,
|
||||
privValidator types.PrivValidator,
|
||||
addr string,
|
||||
socketDialer SocketDialer,
|
||||
) (*SignerValidatorEndpoint, *SignerServiceEndpoint) {
|
||||
var (
|
||||
logger = log.TestingLogger()
|
||||
privVal = privValidator
|
||||
readyc = make(chan struct{})
|
||||
serviceEndpoint = NewSignerServiceEndpoint(
|
||||
logger,
|
||||
chainID,
|
||||
privVal,
|
||||
socketDialer,
|
||||
)
|
||||
|
||||
thisConnTimeout = testTimeoutReadWrite
|
||||
validatorEndpoint = newSignerValidatorEndpoint(logger, addr, thisConnTimeout)
|
||||
)
|
||||
|
||||
SignerValidatorEndpointSetHeartbeat(testTimeoutHeartbeat)(validatorEndpoint)
|
||||
SignerServiceEndpointTimeoutReadWrite(testTimeoutReadWrite)(serviceEndpoint)
|
||||
SignerServiceEndpointConnRetries(1e6)(serviceEndpoint)
|
||||
|
||||
testStartEndpoint(t, readyc, validatorEndpoint)
|
||||
|
||||
require.NoError(t, serviceEndpoint.Start())
|
||||
assert.True(t, serviceEndpoint.IsRunning())
|
||||
|
||||
<-readyc
|
||||
|
||||
return validatorEndpoint, serviceEndpoint
|
||||
}
|
||||
|
||||
func testReadWriteResponse(t *testing.T, resp RemoteSignerMsg, rsConn net.Conn) {
|
||||
_, err := readMsg(rsConn)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = writeMsg(rsConn, resp)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func testStartEndpoint(t *testing.T, readyCh chan struct{}, sc *SignerValidatorEndpoint) {
|
||||
go func(sc *SignerValidatorEndpoint) {
|
||||
require.NoError(t, sc.Start())
|
||||
assert.True(t, sc.IsRunning())
|
||||
|
||||
readyCh <- struct{}{}
|
||||
}(sc)
|
||||
}
|
||||
|
||||
// testFreeTCPAddr claims a free port so we don't block on listener being ready.
|
||||
func testFreeTCPAddr(t *testing.T) string {
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
defer ln.Close()
|
||||
|
||||
return fmt.Sprintf("127.0.0.1:%d", ln.Addr().(*net.TCPAddr).Port)
|
||||
}
|
@@ -9,8 +9,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultTimeoutAcceptSeconds = 3
|
||||
defaultTimeoutReadWriteSeconds = 3
|
||||
defaultAcceptDeadlineSeconds = 3
|
||||
defaultConnDeadlineSeconds = 3
|
||||
)
|
||||
|
||||
// timeoutError can be used to check if an error returned from the netp package
|
||||
@@ -25,16 +25,16 @@ type timeoutError interface {
|
||||
// TCPListenerOption sets an optional parameter on the tcpListener.
|
||||
type TCPListenerOption func(*tcpListener)
|
||||
|
||||
// TCPListenerTimeoutAccept sets the timeout for the listener.
|
||||
// A zero time value disables the timeout.
|
||||
func TCPListenerTimeoutAccept(timeout time.Duration) TCPListenerOption {
|
||||
return func(tl *tcpListener) { tl.timeoutAccept = timeout }
|
||||
// TCPListenerAcceptDeadline sets the deadline for the listener.
|
||||
// A zero time value disables the deadline.
|
||||
func TCPListenerAcceptDeadline(deadline time.Duration) TCPListenerOption {
|
||||
return func(tl *tcpListener) { tl.acceptDeadline = deadline }
|
||||
}
|
||||
|
||||
// TCPListenerTimeoutReadWrite sets the read and write timeout for connections
|
||||
// TCPListenerConnDeadline sets the read and write deadline for connections
|
||||
// from external signing processes.
|
||||
func TCPListenerTimeoutReadWrite(timeout time.Duration) TCPListenerOption {
|
||||
return func(tl *tcpListener) { tl.timeoutReadWrite = timeout }
|
||||
func TCPListenerConnDeadline(deadline time.Duration) TCPListenerOption {
|
||||
return func(tl *tcpListener) { tl.connDeadline = deadline }
|
||||
}
|
||||
|
||||
// tcpListener implements net.Listener.
|
||||
@@ -47,25 +47,24 @@ type tcpListener struct {
|
||||
|
||||
secretConnKey ed25519.PrivKeyEd25519
|
||||
|
||||
timeoutAccept time.Duration
|
||||
timeoutReadWrite time.Duration
|
||||
acceptDeadline time.Duration
|
||||
connDeadline time.Duration
|
||||
}
|
||||
|
||||
// NewTCPListener returns a listener that accepts authenticated encrypted connections
|
||||
// using the given secretConnKey and the default timeout values.
|
||||
func NewTCPListener(ln net.Listener, secretConnKey ed25519.PrivKeyEd25519) *tcpListener {
|
||||
return &tcpListener{
|
||||
TCPListener: ln.(*net.TCPListener),
|
||||
secretConnKey: secretConnKey,
|
||||
timeoutAccept: time.Second * defaultTimeoutAcceptSeconds,
|
||||
timeoutReadWrite: time.Second * defaultTimeoutReadWriteSeconds,
|
||||
TCPListener: ln.(*net.TCPListener),
|
||||
secretConnKey: secretConnKey,
|
||||
acceptDeadline: time.Second * defaultAcceptDeadlineSeconds,
|
||||
connDeadline: time.Second * defaultConnDeadlineSeconds,
|
||||
}
|
||||
}
|
||||
|
||||
// Accept implements net.Listener.
|
||||
func (ln *tcpListener) Accept() (net.Conn, error) {
|
||||
deadline := time.Now().Add(ln.timeoutAccept)
|
||||
err := ln.SetDeadline(deadline)
|
||||
err := ln.SetDeadline(time.Now().Add(ln.acceptDeadline))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -76,7 +75,7 @@ func (ln *tcpListener) Accept() (net.Conn, error) {
|
||||
}
|
||||
|
||||
// Wrap the conn in our timeout and encryption wrappers
|
||||
timeoutConn := newTimeoutConn(tc, ln.timeoutReadWrite)
|
||||
timeoutConn := newTimeoutConn(tc, ln.connDeadline)
|
||||
secretConn, err := p2pconn.MakeSecretConnection(timeoutConn, ln.secretConnKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -93,16 +92,16 @@ var _ net.Listener = (*unixListener)(nil)
|
||||
|
||||
type UnixListenerOption func(*unixListener)
|
||||
|
||||
// UnixListenerTimeoutAccept sets the timeout for the listener.
|
||||
// A zero time value disables the timeout.
|
||||
func UnixListenerTimeoutAccept(timeout time.Duration) UnixListenerOption {
|
||||
return func(ul *unixListener) { ul.timeoutAccept = timeout }
|
||||
// UnixListenerAcceptDeadline sets the deadline for the listener.
|
||||
// A zero time value disables the deadline.
|
||||
func UnixListenerAcceptDeadline(deadline time.Duration) UnixListenerOption {
|
||||
return func(ul *unixListener) { ul.acceptDeadline = deadline }
|
||||
}
|
||||
|
||||
// UnixListenerTimeoutReadWrite sets the read and write timeout for connections
|
||||
// UnixListenerConnDeadline sets the read and write deadline for connections
|
||||
// from external signing processes.
|
||||
func UnixListenerTimeoutReadWrite(timeout time.Duration) UnixListenerOption {
|
||||
return func(ul *unixListener) { ul.timeoutReadWrite = timeout }
|
||||
func UnixListenerConnDeadline(deadline time.Duration) UnixListenerOption {
|
||||
return func(ul *unixListener) { ul.connDeadline = deadline }
|
||||
}
|
||||
|
||||
// unixListener wraps a *net.UnixListener to standardise protocol timeouts
|
||||
@@ -110,24 +109,23 @@ func UnixListenerTimeoutReadWrite(timeout time.Duration) UnixListenerOption {
|
||||
type unixListener struct {
|
||||
*net.UnixListener
|
||||
|
||||
timeoutAccept time.Duration
|
||||
timeoutReadWrite time.Duration
|
||||
acceptDeadline time.Duration
|
||||
connDeadline time.Duration
|
||||
}
|
||||
|
||||
// NewUnixListener returns a listener that accepts unencrypted connections
|
||||
// using the default timeout values.
|
||||
func NewUnixListener(ln net.Listener) *unixListener {
|
||||
return &unixListener{
|
||||
UnixListener: ln.(*net.UnixListener),
|
||||
timeoutAccept: time.Second * defaultTimeoutAcceptSeconds,
|
||||
timeoutReadWrite: time.Second * defaultTimeoutReadWriteSeconds,
|
||||
UnixListener: ln.(*net.UnixListener),
|
||||
acceptDeadline: time.Second * defaultAcceptDeadlineSeconds,
|
||||
connDeadline: time.Second * defaultConnDeadlineSeconds,
|
||||
}
|
||||
}
|
||||
|
||||
// Accept implements net.Listener.
|
||||
func (ln *unixListener) Accept() (net.Conn, error) {
|
||||
deadline := time.Now().Add(ln.timeoutAccept)
|
||||
err := ln.SetDeadline(deadline)
|
||||
err := ln.SetDeadline(time.Now().Add(ln.acceptDeadline))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -138,7 +136,7 @@ func (ln *unixListener) Accept() (net.Conn, error) {
|
||||
}
|
||||
|
||||
// Wrap the conn in our timeout wrapper
|
||||
conn := newTimeoutConn(tc, ln.timeoutReadWrite)
|
||||
conn := newTimeoutConn(tc, ln.connDeadline)
|
||||
|
||||
// TODO: wrap in something that authenticates
|
||||
// with a MAC - https://github.com/tendermint/tendermint/issues/3099
|
||||
@@ -155,25 +153,24 @@ var _ net.Conn = (*timeoutConn)(nil)
|
||||
// timeoutConn wraps a net.Conn to standardise protocol timeouts / deadline resets.
|
||||
type timeoutConn struct {
|
||||
net.Conn
|
||||
timeout time.Duration
|
||||
|
||||
connDeadline time.Duration
|
||||
}
|
||||
|
||||
// newTimeoutConn returns an instance of timeoutConn.
|
||||
func newTimeoutConn(conn net.Conn, timeout time.Duration) *timeoutConn {
|
||||
func newTimeoutConn(
|
||||
conn net.Conn,
|
||||
connDeadline time.Duration) *timeoutConn {
|
||||
return &timeoutConn{
|
||||
conn,
|
||||
timeout,
|
||||
connDeadline,
|
||||
}
|
||||
}
|
||||
|
||||
// Read implements net.Conn.
|
||||
func (c timeoutConn) Read(b []byte) (n int, err error) {
|
||||
// Reset deadline
|
||||
deadline := time.Now().Add(c.timeout)
|
||||
err = c.Conn.SetReadDeadline(deadline)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
c.Conn.SetReadDeadline(time.Now().Add(c.connDeadline))
|
||||
|
||||
return c.Conn.Read(b)
|
||||
}
|
||||
@@ -181,11 +178,7 @@ func (c timeoutConn) Read(b []byte) (n int, err error) {
|
||||
// Write implements net.Conn.
|
||||
func (c timeoutConn) Write(b []byte) (n int, err error) {
|
||||
// Reset deadline
|
||||
deadline := time.Now().Add(c.timeout)
|
||||
err = c.Conn.SetWriteDeadline(deadline)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
c.Conn.SetWriteDeadline(time.Now().Add(c.connDeadline))
|
||||
|
||||
return c.Conn.Write(b)
|
||||
}
|
@@ -1,43 +0,0 @@
|
||||
package privval
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
p2pconn "github.com/tendermint/tendermint/p2p/conn"
|
||||
)
|
||||
|
||||
// Socket errors.
|
||||
var (
|
||||
ErrDialRetryMax = errors.New("dialed maximum retries")
|
||||
)
|
||||
|
||||
// SocketDialer dials a remote address and returns a net.Conn or an error.
|
||||
type SocketDialer func() (net.Conn, error)
|
||||
|
||||
// DialTCPFn dials the given tcp addr, using the given timeoutReadWrite and
|
||||
// privKey for the authenticated encryption handshake.
|
||||
func DialTCPFn(addr string, timeoutReadWrite time.Duration, privKey ed25519.PrivKeyEd25519) SocketDialer {
|
||||
return func() (net.Conn, error) {
|
||||
conn, err := cmn.Connect(addr)
|
||||
if err == nil {
|
||||
deadline := time.Now().Add(timeoutReadWrite)
|
||||
err = conn.SetDeadline(deadline)
|
||||
}
|
||||
if err == nil {
|
||||
conn, err = p2pconn.MakeSecretConnection(conn, privKey)
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
}
|
||||
|
||||
// DialUnixFn dials the given unix socket.
|
||||
func DialUnixFn(addr string) SocketDialer {
|
||||
return func() (net.Conn, error) {
|
||||
unixAddr := &net.UnixAddr{Name: addr, Net: "unix"}
|
||||
return net.DialUnix("unix", nil, unixAddr)
|
||||
}
|
||||
}
|
@@ -1,26 +0,0 @@
|
||||
package privval
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
func TestIsConnTimeoutForFundamentalTimeouts(t *testing.T) {
|
||||
// Generate a networking timeout
|
||||
dialer := DialTCPFn(testFreeTCPAddr(t), time.Millisecond, ed25519.GenPrivKey())
|
||||
_, err := dialer()
|
||||
assert.Error(t, err)
|
||||
assert.True(t, IsConnTimeout(err))
|
||||
}
|
||||
|
||||
func TestIsConnTimeoutForWrappedConnTimeouts(t *testing.T) {
|
||||
dialer := DialTCPFn(testFreeTCPAddr(t), time.Millisecond, ed25519.GenPrivKey())
|
||||
_, err := dialer()
|
||||
assert.Error(t, err)
|
||||
err = cmn.ErrorWrap(ErrConnTimeout, err.Error())
|
||||
assert.True(t, IsConnTimeout(err))
|
||||
}
|
@@ -23,7 +23,7 @@ func newPrivKey() ed25519.PrivKeyEd25519 {
|
||||
type listenerTestCase struct {
|
||||
description string // For test reporting purposes.
|
||||
listener net.Listener
|
||||
dialer SocketDialer
|
||||
dialer Dialer
|
||||
}
|
||||
|
||||
// testUnixAddr will attempt to obtain a platform-independent temporary file
|
||||
@@ -39,23 +39,23 @@ func testUnixAddr() (string, error) {
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
func tcpListenerTestCase(t *testing.T, timeoutAccept, timeoutReadWrite time.Duration) listenerTestCase {
|
||||
func tcpListenerTestCase(t *testing.T, acceptDeadline, connectDeadline time.Duration) listenerTestCase {
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tcpLn := NewTCPListener(ln, newPrivKey())
|
||||
TCPListenerTimeoutAccept(timeoutAccept)(tcpLn)
|
||||
TCPListenerTimeoutReadWrite(timeoutReadWrite)(tcpLn)
|
||||
TCPListenerAcceptDeadline(acceptDeadline)(tcpLn)
|
||||
TCPListenerConnDeadline(connectDeadline)(tcpLn)
|
||||
return listenerTestCase{
|
||||
description: "TCP",
|
||||
listener: tcpLn,
|
||||
dialer: DialTCPFn(ln.Addr().String(), testTimeoutReadWrite, newPrivKey()),
|
||||
dialer: DialTCPFn(ln.Addr().String(), testConnDeadline, newPrivKey()),
|
||||
}
|
||||
}
|
||||
|
||||
func unixListenerTestCase(t *testing.T, timeoutAccept, timeoutReadWrite time.Duration) listenerTestCase {
|
||||
func unixListenerTestCase(t *testing.T, acceptDeadline, connectDeadline time.Duration) listenerTestCase {
|
||||
addr, err := testUnixAddr()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -66,8 +66,8 @@ func unixListenerTestCase(t *testing.T, timeoutAccept, timeoutReadWrite time.Dur
|
||||
}
|
||||
|
||||
unixLn := NewUnixListener(ln)
|
||||
UnixListenerTimeoutAccept(timeoutAccept)(unixLn)
|
||||
UnixListenerTimeoutReadWrite(timeoutReadWrite)(unixLn)
|
||||
UnixListenerAcceptDeadline(acceptDeadline)(unixLn)
|
||||
UnixListenerConnDeadline(connectDeadline)(unixLn)
|
||||
return listenerTestCase{
|
||||
description: "Unix",
|
||||
listener: unixLn,
|
||||
@@ -75,14 +75,14 @@ func unixListenerTestCase(t *testing.T, timeoutAccept, timeoutReadWrite time.Dur
|
||||
}
|
||||
}
|
||||
|
||||
func listenerTestCases(t *testing.T, timeoutAccept, timeoutReadWrite time.Duration) []listenerTestCase {
|
||||
func listenerTestCases(t *testing.T, acceptDeadline, connectDeadline time.Duration) []listenerTestCase {
|
||||
return []listenerTestCase{
|
||||
tcpListenerTestCase(t, timeoutAccept, timeoutReadWrite),
|
||||
unixListenerTestCase(t, timeoutAccept, timeoutReadWrite),
|
||||
tcpListenerTestCase(t, acceptDeadline, connectDeadline),
|
||||
unixListenerTestCase(t, acceptDeadline, connectDeadline),
|
||||
}
|
||||
}
|
||||
|
||||
func TestListenerTimeoutAccept(t *testing.T) {
|
||||
func TestListenerAcceptDeadlines(t *testing.T) {
|
||||
for _, tc := range listenerTestCases(t, time.Millisecond, time.Second) {
|
||||
_, err := tc.listener.Accept()
|
||||
opErr, ok := err.(*net.OpError)
|
||||
@@ -96,9 +96,9 @@ func TestListenerTimeoutAccept(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestListenerTimeoutReadWrite(t *testing.T) {
|
||||
func TestListenerConnectDeadlines(t *testing.T) {
|
||||
for _, tc := range listenerTestCases(t, time.Second, time.Millisecond) {
|
||||
go func(dialer SocketDialer) {
|
||||
go func(dialer Dialer) {
|
||||
_, err := dialer()
|
||||
if err != nil {
|
||||
panic(err)
|
@@ -1,20 +0,0 @@
|
||||
package privval
|
||||
|
||||
import (
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
// IsConnTimeout returns a boolean indicating whether the error is known to
|
||||
// report that a connection timeout occurred. This detects both fundamental
|
||||
// network timeouts, as well as ErrConnTimeout errors.
|
||||
func IsConnTimeout(err error) bool {
|
||||
if cmnErr, ok := err.(cmn.Error); ok {
|
||||
if cmnErr.Data() == ErrConnTimeout {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if _, ok := err.(timeoutError); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
@@ -1,14 +0,0 @@
|
||||
package privval
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
func TestIsConnTimeoutForNonTimeoutErrors(t *testing.T) {
|
||||
assert.False(t, IsConnTimeout(cmn.ErrorWrap(ErrDialRetryMax, "max retries exceeded")))
|
||||
assert.False(t, IsConnTimeout(fmt.Errorf("completely irrelevant error")))
|
||||
}
|
@@ -59,20 +59,32 @@ func WaitForOneEvent(c EventsClient, evtTyp string, timeout time.Duration) (type
|
||||
const subscriber = "helpers"
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
evts := make(chan interface{}, 1)
|
||||
|
||||
// register for the next event of this type
|
||||
sub, err := c.Subscribe(ctx, subscriber, types.QueryForEvent(evtTyp))
|
||||
query := types.QueryForEvent(evtTyp)
|
||||
err := c.Subscribe(ctx, subscriber, query, evts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to subscribe")
|
||||
}
|
||||
|
||||
// make sure to unregister after the test is over
|
||||
defer c.UnsubscribeAll(ctx, subscriber)
|
||||
defer func() {
|
||||
// drain evts to make sure we don't block
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-evts:
|
||||
default:
|
||||
break LOOP
|
||||
}
|
||||
}
|
||||
c.UnsubscribeAll(ctx, subscriber)
|
||||
}()
|
||||
|
||||
select {
|
||||
case msg := <-sub.Out():
|
||||
return msg.Data().(types.TMEventData), nil
|
||||
case <-sub.Cancelled():
|
||||
return nil, errors.New("subscription was cancelled")
|
||||
case evt := <-evts:
|
||||
return evt.(types.TMEventData), nil
|
||||
case <-ctx.Done():
|
||||
return nil, errors.New("timed out waiting for event")
|
||||
}
|
||||
|
@@ -249,28 +249,6 @@ func (c *HTTP) Validators(height *int64) (*ctypes.ResultValidators, error) {
|
||||
|
||||
/** websocket event stuff here... **/
|
||||
|
||||
type subscription struct {
|
||||
out chan tmpubsub.Message
|
||||
cancelled chan struct{}
|
||||
|
||||
mtx sync.RWMutex
|
||||
err error
|
||||
}
|
||||
|
||||
func (s *subscription) Out() <-chan tmpubsub.Message {
|
||||
return s.out
|
||||
}
|
||||
|
||||
func (s *subscription) Cancelled() <-chan struct{} {
|
||||
return s.cancelled
|
||||
}
|
||||
|
||||
func (s *subscription) Err() error {
|
||||
s.mtx.RLock()
|
||||
defer s.mtx.RUnlock()
|
||||
return s.err
|
||||
}
|
||||
|
||||
type WSEvents struct {
|
||||
cmn.BaseService
|
||||
cdc *amino.Codec
|
||||
@@ -278,9 +256,8 @@ type WSEvents struct {
|
||||
endpoint string
|
||||
ws *rpcclient.WSClient
|
||||
|
||||
mtx sync.RWMutex
|
||||
// query -> subscription
|
||||
subscriptions map[string]*subscription
|
||||
mtx sync.RWMutex
|
||||
subscriptions map[string]chan<- interface{}
|
||||
}
|
||||
|
||||
func newWSEvents(cdc *amino.Codec, remote, endpoint string) *WSEvents {
|
||||
@@ -288,7 +265,7 @@ func newWSEvents(cdc *amino.Codec, remote, endpoint string) *WSEvents {
|
||||
cdc: cdc,
|
||||
endpoint: endpoint,
|
||||
remote: remote,
|
||||
subscriptions: make(map[string]*subscription),
|
||||
subscriptions: make(map[string]chan<- interface{}),
|
||||
}
|
||||
|
||||
wsEvents.BaseService = *cmn.NewBaseService(nil, "WSEvents", wsEvents)
|
||||
@@ -318,29 +295,21 @@ func (w *WSEvents) OnStop() {
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WSEvents) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, outCapacity ...int) (types.Subscription, error) {
|
||||
func (w *WSEvents) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error {
|
||||
q := query.String()
|
||||
|
||||
err := w.ws.Subscribe(ctx, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
outCap := 1
|
||||
if len(outCapacity) > 0 && outCapacity[0] >= 0 {
|
||||
outCap = outCapacity[0]
|
||||
return err
|
||||
}
|
||||
|
||||
w.mtx.Lock()
|
||||
// subscriber param is ignored because Tendermint will override it with
|
||||
// remote IP anyway.
|
||||
w.subscriptions[q] = &subscription{
|
||||
out: make(chan tmpubsub.Message, outCap),
|
||||
cancelled: make(chan struct{}),
|
||||
}
|
||||
w.subscriptions[q] = out
|
||||
w.mtx.Unlock()
|
||||
|
||||
return w.subscriptions[q], nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *WSEvents) Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error {
|
||||
@@ -352,12 +321,9 @@ func (w *WSEvents) Unsubscribe(ctx context.Context, subscriber string, query tmp
|
||||
}
|
||||
|
||||
w.mtx.Lock()
|
||||
sub, ok := w.subscriptions[q]
|
||||
ch, ok := w.subscriptions[q]
|
||||
if ok {
|
||||
close(sub.cancelled)
|
||||
sub.mtx.Lock()
|
||||
sub.err = errors.New("unsubscribed")
|
||||
sub.mtx.Unlock()
|
||||
close(ch)
|
||||
delete(w.subscriptions, q)
|
||||
}
|
||||
w.mtx.Unlock()
|
||||
@@ -372,13 +338,10 @@ func (w *WSEvents) UnsubscribeAll(ctx context.Context, subscriber string) error
|
||||
}
|
||||
|
||||
w.mtx.Lock()
|
||||
for _, sub := range w.subscriptions {
|
||||
close(sub.cancelled)
|
||||
sub.mtx.Lock()
|
||||
sub.err = errors.New("unsubscribed")
|
||||
sub.mtx.Unlock()
|
||||
for _, ch := range w.subscriptions {
|
||||
close(ch)
|
||||
}
|
||||
w.subscriptions = make(map[string]*subscription)
|
||||
w.subscriptions = make(map[string]chan<- interface{})
|
||||
w.mtx.Unlock()
|
||||
|
||||
return nil
|
||||
@@ -418,8 +381,8 @@ func (w *WSEvents) eventListener() {
|
||||
// NOTE: writing also happens inside mutex so we can't close a channel in
|
||||
// Unsubscribe/UnsubscribeAll.
|
||||
w.mtx.RLock()
|
||||
if sub, ok := w.subscriptions[result.Query]; ok {
|
||||
sub.out <- tmpubsub.NewMessage(result.Data, result.Tags)
|
||||
if ch, ok := w.subscriptions[result.Query]; ok {
|
||||
ch <- result.Data
|
||||
}
|
||||
w.mtx.RUnlock()
|
||||
case <-w.Quit():
|
||||
|
@@ -140,8 +140,8 @@ func (Local) TxSearch(query string, prove bool, page, perPage int) (*ctypes.Resu
|
||||
return core.TxSearch(query, prove, page, perPage)
|
||||
}
|
||||
|
||||
func (c *Local) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, outCapacity ...int) (types.Subscription, error) {
|
||||
return c.EventBus.Subscribe(ctx, subscriber, query, outCapacity...)
|
||||
func (c *Local) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error {
|
||||
return c.EventBus.Subscribe(ctx, subscriber, query, out)
|
||||
}
|
||||
|
||||
func (c *Local) Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error {
|
||||
|
@@ -290,13 +290,9 @@ func TestUnconfirmedTxs(t *testing.T) {
|
||||
for i, c := range GetClients() {
|
||||
mc, ok := c.(client.MempoolClient)
|
||||
require.True(t, ok, "%d", i)
|
||||
res, err := mc.UnconfirmedTxs(1)
|
||||
txs, err := mc.UnconfirmedTxs(1)
|
||||
require.Nil(t, err, "%d: %+v", i, err)
|
||||
|
||||
assert.Equal(t, 1, res.Count)
|
||||
assert.Equal(t, 1, res.Total)
|
||||
assert.Equal(t, mempool.TxsBytes(), res.TotalBytes)
|
||||
assert.Exactly(t, types.Txs{tx}, types.Txs(res.Txs))
|
||||
assert.Exactly(t, types.Txs{tx}, types.Txs(txs.Txs))
|
||||
}
|
||||
|
||||
mempool.Flush()
|
||||
@@ -315,9 +311,7 @@ func TestNumUnconfirmedTxs(t *testing.T) {
|
||||
res, err := mc.NumUnconfirmedTxs()
|
||||
require.Nil(t, err, "%d: %+v", i, err)
|
||||
|
||||
assert.Equal(t, mempoolSize, res.Count)
|
||||
assert.Equal(t, mempoolSize, res.Total)
|
||||
assert.Equal(t, mempool.TxsBytes(), res.TotalBytes)
|
||||
assert.Equal(t, mempoolSize, res.N)
|
||||
}
|
||||
|
||||
mempool.Flush()
|
||||
|
@@ -101,30 +101,16 @@ func Subscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultSubscri
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), subscribeTimeout)
|
||||
defer cancel()
|
||||
sub, err := eventBusFor(wsCtx).Subscribe(ctx, addr, q)
|
||||
ch := make(chan interface{})
|
||||
err = eventBusFor(wsCtx).Subscribe(ctx, addr, q, ch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case msg := <-sub.Out():
|
||||
resultEvent := &ctypes.ResultEvent{Query: query, Data: msg.Data(), Tags: msg.Tags()}
|
||||
wsCtx.TryWriteRPCResponse(
|
||||
rpctypes.NewRPCSuccessResponse(
|
||||
wsCtx.Codec(),
|
||||
rpctypes.JSONRPCStringID(fmt.Sprintf("%v#event", wsCtx.Request.ID)),
|
||||
resultEvent,
|
||||
))
|
||||
case <-sub.Cancelled():
|
||||
wsCtx.TryWriteRPCResponse(
|
||||
rpctypes.RPCServerError(rpctypes.JSONRPCStringID(
|
||||
fmt.Sprintf("%v#event", wsCtx.Request.ID)),
|
||||
fmt.Errorf("subscription was cancelled (reason: %v)", sub.Err()),
|
||||
))
|
||||
return
|
||||
}
|
||||
for event := range ch {
|
||||
tmResult := &ctypes.ResultEvent{Query: query, Data: event.(tmtypes.TMEventData)}
|
||||
wsCtx.TryWriteRPCResponse(rpctypes.NewRPCSuccessResponse(wsCtx.Codec(), rpctypes.JSONRPCStringID(fmt.Sprintf("%v#event", wsCtx.Request.ID)), tmResult))
|
||||
}
|
||||
}()
|
||||
|
||||
|
@@ -16,13 +16,7 @@ import (
|
||||
//-----------------------------------------------------------------------------
|
||||
// NOTE: tx should be signed, but this is only checked at the app level (not by Tendermint!)
|
||||
|
||||
// Returns right away, with no response. Does not wait for CheckTx nor
|
||||
// DeliverTx results.
|
||||
//
|
||||
// Please refer to
|
||||
// https://tendermint.com/docs/tendermint-core/using-tendermint.html#formatting
|
||||
// for formatting/encoding rules.
|
||||
//
|
||||
// Returns right away, with no response
|
||||
//
|
||||
// ```shell
|
||||
// curl 'localhost:26657/broadcast_tx_async?tx="123"'
|
||||
@@ -67,11 +61,7 @@ func BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
|
||||
return &ctypes.ResultBroadcastTx{Hash: tx.Hash()}, nil
|
||||
}
|
||||
|
||||
// Returns with the response from CheckTx. Does not wait for DeliverTx result.
|
||||
//
|
||||
// Please refer to
|
||||
// https://tendermint.com/docs/tendermint-core/using-tendermint.html#formatting
|
||||
// for formatting/encoding rules.
|
||||
// Returns with the response from CheckTx.
|
||||
//
|
||||
// ```shell
|
||||
// curl 'localhost:26657/broadcast_tx_sync?tx="456"'
|
||||
@@ -126,19 +116,12 @@ func BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Returns with the responses from CheckTx and DeliverTx.
|
||||
//
|
||||
// CONTRACT: only returns error if mempool.CheckTx() errs or if we timeout
|
||||
// waiting for tx to commit.
|
||||
//
|
||||
// If CheckTx or DeliverTx fail, no error will be returned, but the returned result
|
||||
// will contain a non-OK ABCI code.
|
||||
//
|
||||
// Please refer to
|
||||
// https://tendermint.com/docs/tendermint-core/using-tendermint.html#formatting
|
||||
// for formatting/encoding rules.
|
||||
//
|
||||
//
|
||||
// ```shell
|
||||
// curl 'localhost:26657/broadcast_tx_commit?tx="789"'
|
||||
// ```
|
||||
@@ -186,14 +169,26 @@ func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
|
||||
// Subscribe to tx being committed in block.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), subscribeTimeout)
|
||||
defer cancel()
|
||||
deliverTxResCh := make(chan interface{}, 1)
|
||||
q := types.EventQueryTxFor(tx)
|
||||
deliverTxSub, err := eventBus.Subscribe(ctx, "mempool", q)
|
||||
err := eventBus.Subscribe(ctx, "mempool", q, deliverTxResCh)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "failed to subscribe to tx")
|
||||
logger.Error("Error on broadcast_tx_commit", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
defer eventBus.Unsubscribe(context.Background(), "mempool", q)
|
||||
defer func() {
|
||||
// drain deliverTxResCh to make sure we don't block
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-deliverTxResCh:
|
||||
default:
|
||||
break LOOP
|
||||
}
|
||||
}
|
||||
eventBus.Unsubscribe(context.Background(), "mempool", q)
|
||||
}()
|
||||
|
||||
// Broadcast tx and wait for CheckTx result
|
||||
checkTxResCh := make(chan *abci.Response, 1)
|
||||
@@ -218,22 +213,17 @@ func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
|
||||
// TODO: configurable?
|
||||
var deliverTxTimeout = rpcserver.WriteTimeout / 2
|
||||
select {
|
||||
case msg := <-deliverTxSub.Out(): // The tx was included in a block.
|
||||
deliverTxRes := msg.Data().(types.EventDataTx)
|
||||
case deliverTxResMsg, ok := <-deliverTxResCh: // The tx was included in a block.
|
||||
if !ok {
|
||||
return nil, errors.New("Error on broadcastTxCommit: expected DeliverTxResult, got nil. Did the Tendermint stop?")
|
||||
}
|
||||
deliverTxRes := deliverTxResMsg.(types.EventDataTx)
|
||||
return &ctypes.ResultBroadcastTxCommit{
|
||||
CheckTx: *checkTxRes,
|
||||
DeliverTx: deliverTxRes.Result,
|
||||
Hash: tx.Hash(),
|
||||
Height: deliverTxRes.Height,
|
||||
}, nil
|
||||
case <-deliverTxSub.Cancelled():
|
||||
err = errors.New("deliverTxSub was cancelled. Did the Tendermint stop?")
|
||||
logger.Error("Error on broadcastTxCommit", "err", err)
|
||||
return &ctypes.ResultBroadcastTxCommit{
|
||||
CheckTx: *checkTxRes,
|
||||
DeliverTx: abci.ResponseDeliverTx{},
|
||||
Hash: tx.Hash(),
|
||||
}, err
|
||||
case <-time.After(deliverTxTimeout):
|
||||
err = errors.New("Timed out waiting for tx to be included in a block")
|
||||
logger.Error("Error on broadcastTxCommit", "err", err)
|
||||
@@ -265,32 +255,27 @@ func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "result" : {
|
||||
// "txs" : [],
|
||||
// "total_bytes" : "0",
|
||||
// "n_txs" : "0",
|
||||
// "total" : "0"
|
||||
// },
|
||||
// "jsonrpc" : "2.0",
|
||||
// "id" : ""
|
||||
// }
|
||||
// ```
|
||||
// "error": "",
|
||||
// "result": {
|
||||
// "txs": [],
|
||||
// "n_txs": "0"
|
||||
// },
|
||||
// "id": "",
|
||||
// "jsonrpc": "2.0"
|
||||
// }
|
||||
//
|
||||
// ### Query Parameters
|
||||
//
|
||||
// | Parameter | Type | Default | Required | Description |
|
||||
// |-----------+------+---------+----------+--------------------------------------|
|
||||
// | limit | int | 30 | false | Maximum number of entries (max: 100) |
|
||||
// ```
|
||||
func UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) {
|
||||
// reuse per_page validator
|
||||
limit = validatePerPage(limit)
|
||||
|
||||
txs := mempool.ReapMaxTxs(limit)
|
||||
return &ctypes.ResultUnconfirmedTxs{
|
||||
Count: len(txs),
|
||||
Total: mempool.Size(),
|
||||
TotalBytes: mempool.TxsBytes(),
|
||||
Txs: txs}, nil
|
||||
return &ctypes.ResultUnconfirmedTxs{N: len(txs), Txs: txs}, nil
|
||||
}
|
||||
|
||||
// Get number of unconfirmed transactions.
|
||||
@@ -313,19 +298,15 @@ func UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) {
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "jsonrpc" : "2.0",
|
||||
// "id" : "",
|
||||
// "result" : {
|
||||
// "n_txs" : "0",
|
||||
// "total_bytes" : "0",
|
||||
// "txs" : null,
|
||||
// "total" : "0"
|
||||
// }
|
||||
// "error": "",
|
||||
// "result": {
|
||||
// "txs": null,
|
||||
// "n_txs": "0"
|
||||
// },
|
||||
// "id": "",
|
||||
// "jsonrpc": "2.0"
|
||||
// }
|
||||
// ```
|
||||
func NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) {
|
||||
return &ctypes.ResultUnconfirmedTxs{
|
||||
Count: mempool.Size(),
|
||||
Total: mempool.Size(),
|
||||
TotalBytes: mempool.TxsBytes()}, nil
|
||||
return &ctypes.ResultUnconfirmedTxs{N: mempool.Size()}, nil
|
||||
}
|
||||
|
@@ -16,7 +16,7 @@ import (
|
||||
// place.
|
||||
//
|
||||
// ```shell
|
||||
// curl "localhost:26657/tx?hash=0xF87370F68C82D9AC7201248ECA48CEC5F16FFEC99C461C1B2961341A2FE9C1C8"
|
||||
// curl "localhost:26657/tx?hash=0x2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF"
|
||||
// ```
|
||||
//
|
||||
// ```go
|
||||
@@ -26,8 +26,7 @@ import (
|
||||
// // handle error
|
||||
// }
|
||||
// defer client.Stop()
|
||||
// hashBytes, err := hex.DecodeString("F87370F68C82D9AC7201248ECA48CEC5F16FFEC99C461C1B2961341A2FE9C1C8")
|
||||
// tx, err := client.Tx(hashBytes, true)
|
||||
// tx, err := client.Tx([]byte("2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF"), true)
|
||||
// ```
|
||||
//
|
||||
// > The above command returns JSON structured like this:
|
||||
|
@@ -178,10 +178,8 @@ type ResultTxSearch struct {
|
||||
|
||||
// List of mempool txs
|
||||
type ResultUnconfirmedTxs struct {
|
||||
Count int `json:"n_txs"`
|
||||
Total int `json:"total"`
|
||||
TotalBytes int64 `json:"total_bytes"`
|
||||
Txs []types.Tx `json:"txs"`
|
||||
N int `json:"n_txs"`
|
||||
Txs []types.Tx `json:"txs"`
|
||||
}
|
||||
|
||||
// Info abci msg
|
||||
@@ -207,5 +205,4 @@ type (
|
||||
type ResultEvent struct {
|
||||
Query string `json:"query"`
|
||||
Data types.TMEventData `json:"data"`
|
||||
Tags map[string]string `json:"tags"`
|
||||
}
|
||||
|
@@ -526,7 +526,6 @@ func (wsc *wsConnection) OnStart() error {
|
||||
func (wsc *wsConnection) OnStop() {
|
||||
// Both read and write loops close the websocket connection when they exit their loops.
|
||||
// The writeChan is never closed, to allow WriteRPCResponse() to fail.
|
||||
|
||||
if wsc.eventSub != nil {
|
||||
wsc.eventSub.UnsubscribeAll(context.TODO(), wsc.remoteAddr)
|
||||
}
|
||||
|
@@ -24,19 +24,17 @@ type Result struct {
|
||||
}
|
||||
|
||||
func main() {
|
||||
var (
|
||||
mux = http.NewServeMux()
|
||||
cdc = amino.NewCodec()
|
||||
logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
)
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
cmn.TrapSignal(logger, func() {})
|
||||
|
||||
mux := http.NewServeMux()
|
||||
cdc := amino.NewCodec()
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
rpcserver.RegisterRPCFuncs(mux, routes, cdc, logger)
|
||||
listener, err := rpcserver.Listen("0.0.0.0:8008", rpcserver.Config{})
|
||||
if err != nil {
|
||||
cmn.Exit(err.Error())
|
||||
}
|
||||
rpcserver.StartHTTPServer(listener, mux, logger)
|
||||
go rpcserver.StartHTTPServer(listener, mux, logger)
|
||||
// Wait forever
|
||||
cmn.TrapSignal(func() {
|
||||
})
|
||||
|
||||
}
|
||||
|
@@ -12,7 +12,6 @@ import (
|
||||
amino "github.com/tendermint/go-amino"
|
||||
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
tmtypes "github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// a wrapper to emulate a sum type: jsonrpcid = string | int
|
||||
@@ -245,19 +244,19 @@ type WSRPCConnection interface {
|
||||
Codec() *amino.Codec
|
||||
}
|
||||
|
||||
// EventSubscriber mirros tendermint/tendermint/types.EventBusSubscriber
|
||||
type EventSubscriber interface {
|
||||
Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error
|
||||
Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error
|
||||
UnsubscribeAll(ctx context.Context, subscriber string) error
|
||||
}
|
||||
|
||||
// websocket-only RPCFuncs take this as the first parameter.
|
||||
type WSRPCContext struct {
|
||||
Request RPCRequest
|
||||
WSRPCConnection
|
||||
}
|
||||
|
||||
// EventSubscriber mirrors tendermint/tendermint/types.EventBusSubscriber
|
||||
type EventSubscriber interface {
|
||||
Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, outCapacity ...int) (tmtypes.Subscription, error)
|
||||
Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error
|
||||
UnsubscribeAll(ctx context.Context, subscriber string) error
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// SOCKETS
|
||||
//
|
||||
|
65
scripts/release_management/README.md
Normal file
65
scripts/release_management/README.md
Normal file
@@ -0,0 +1,65 @@
|
||||
# Release management scripts
|
||||
|
||||
## Overview
|
||||
The scripts in this folder are used for release management in CircleCI. Although the scripts are fully configurable using input parameters,
|
||||
the default settings were modified to accommodate CircleCI execution.
|
||||
|
||||
# Build scripts
|
||||
These scripts help during the build process. They prepare the release files.
|
||||
|
||||
## bump-semver.py
|
||||
Bumps the semantic version of the input `--version`. Versions are expected in vMAJOR.MINOR.PATCH format or vMAJOR.MINOR format.
|
||||
|
||||
In vMAJOR.MINOR format, the result will be patch version 0 of that version, for example `v1.2 -> v1.2.0`.
|
||||
|
||||
In vMAJOR.MINOR.PATCH format, the result will be a bumped PATCH version, for example `v1.2.3 -> v1.2.4`.
|
||||
|
||||
If the PATCH number contains letters, it is considered a development version, in which case, the result is the non-development version of that number.
|
||||
The patch number will not be bumped, only the "-dev" or similar additional text will be removed. For example: `v1.2.6-rc1 -> v1.2.6`.
|
||||
|
||||
## zip-file.py
|
||||
Specialized ZIP command for release management. Special features:
|
||||
1. Uses Python ZIP libaries, so the `zip` command does not need to be installed.
|
||||
1. Can only zip one file.
|
||||
1. Optionally gets file version, Go OS and architecture.
|
||||
1. By default all inputs and output is formatted exactly how CircleCI needs it.
|
||||
|
||||
By default, the command will try to ZIP the file at `build/tendermint_${GOOS}_${GOARCH}`.
|
||||
This can be changed with the `--file` input parameter.
|
||||
|
||||
By default, the command will output the ZIP file to `build/tendermint_${CIRCLE_TAG}_${GOOS}_${GOARCH}.zip`.
|
||||
This can be changed with the `--destination` (folder), `--version`, `--goos` and `--goarch` input parameters respectively.
|
||||
|
||||
## sha-files.py
|
||||
Specialized `shasum` command for release management. Special features:
|
||||
1. Reads all ZIP files in the given folder.
|
||||
1. By default all inputs and output is formatted exactly how CircleCI needs it.
|
||||
|
||||
By default, the command will look up all ZIP files in the `build/` folder.
|
||||
|
||||
By default, the command will output results into the `build/SHA256SUMS` file.
|
||||
|
||||
# GitHub management
|
||||
Uploading build results to GitHub requires at least these steps:
|
||||
1. Create a new release on GitHub with content
|
||||
2. Upload all binaries to the release
|
||||
3. Publish the release
|
||||
The below scripts help with these steps.
|
||||
|
||||
## github-draft.py
|
||||
Creates a GitHub release and fills the content with the CHANGELOG.md link. The version number can be changed by the `--version` parameter.
|
||||
|
||||
By default, the command will use the tendermint/tendermint organization/repo, which can be changed using the `--org` and `--repo` parameters.
|
||||
|
||||
By default, the command will get the version number from the `${CIRCLE_TAG}` variable.
|
||||
|
||||
Returns the GitHub release ID.
|
||||
|
||||
## github-upload.py
|
||||
Upload a file to a GitHub release. The release is defined by the mandatory `--id` (release ID) input parameter.
|
||||
|
||||
By default, the command will upload the file `/tmp/workspace/tendermint_${CIRCLE_TAG}_${GOOS}_${GOARCH}.zip`. This can be changed by the `--file` input parameter.
|
||||
|
||||
## github-publish.py
|
||||
Publish a GitHub release. The release is defined by the mandatory `--id` (release ID) input parameter.
|
||||
|
37
scripts/release_management/bump-semver.py
Executable file
37
scripts/release_management/bump-semver.py
Executable file
@@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Bump the release number of a semantic version number and print it. --version is required.
|
||||
# Version is
|
||||
# - vA.B.C, in which case vA.B.C+1 will be returned
|
||||
# - vA.B.C-devorwhatnot in which case vA.B.C will be returned
|
||||
# - vA.B in which case vA.B.0 will be returned
|
||||
|
||||
import re
|
||||
import argparse
|
||||
|
||||
|
||||
def semver(ver):
|
||||
if re.match('v[0-9]+\.[0-9]+',ver) is None:
|
||||
ver="v0.0"
|
||||
#raise argparse.ArgumentTypeError('--version must be a semantic version number with major, minor and patch numbers')
|
||||
return ver
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--version", help="Version number to bump, e.g.: v1.0.0", required=True, type=semver)
|
||||
args = parser.parse_args()
|
||||
|
||||
found = re.match('(v[0-9]+\.[0-9]+)(\.(.+))?', args.version)
|
||||
majorminorprefix = found.group(1)
|
||||
patch = found.group(3)
|
||||
if patch is None:
|
||||
patch = "0-new"
|
||||
|
||||
if re.match('[0-9]+$',patch) is None:
|
||||
patchfound = re.match('([0-9]+)',patch)
|
||||
patch = int(patchfound.group(1))
|
||||
else:
|
||||
patch = int(patch) + 1
|
||||
|
||||
print("{0}.{1}".format(majorminorprefix, patch))
|
61
scripts/release_management/github-draft.py
Executable file
61
scripts/release_management/github-draft.py
Executable file
@@ -0,0 +1,61 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Create a draft release on GitHub. By default in the tendermint/tendermint repo.
|
||||
# Optimized for CircleCI
|
||||
|
||||
import argparse
|
||||
import httplib
|
||||
import json
|
||||
import os
|
||||
from base64 import b64encode
|
||||
|
||||
def request(org, repo, data):
|
||||
user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii")
|
||||
headers = {
|
||||
'User-Agent': 'tenderbot',
|
||||
'Accept': 'application/vnd.github.v3+json',
|
||||
'Authorization': 'Basic %s' % user_and_pass
|
||||
}
|
||||
|
||||
conn = httplib.HTTPSConnection('api.github.com', timeout=5)
|
||||
conn.request('POST', '/repos/{0}/{1}/releases'.format(org,repo), data, headers)
|
||||
response = conn.getresponse()
|
||||
if response.status < 200 or response.status > 299:
|
||||
print("{0}: {1}".format(response.status, response.reason))
|
||||
conn.close()
|
||||
raise IOError(response.reason)
|
||||
responsedata = response.read()
|
||||
conn.close()
|
||||
return json.loads(responsedata)
|
||||
|
||||
|
||||
def create_draft(org,repo,branch,version):
|
||||
draft = {
|
||||
'tag_name': version,
|
||||
'target_commitish': '{0}'.format(branch),
|
||||
'name': '{0} (WARNING: ALPHA SOFTWARE)'.format(version),
|
||||
'body': '<a href=https://github.com/{0}/{1}/blob/{2}/CHANGELOG.md#{3}>https://github.com/{0}/{1}/blob/{2}/CHANGELOG.md#{3}</a>'.format(org,repo,branch,version.replace('.','')),
|
||||
'draft': True,
|
||||
'prerelease': False
|
||||
}
|
||||
data=json.dumps(draft)
|
||||
return request(org, repo, data)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--org", default="tendermint", help="GitHub organization")
|
||||
parser.add_argument("--repo", default="tendermint", help="GitHub repository")
|
||||
parser.add_argument("--branch", default=os.environ.get('CIRCLE_BRANCH'), help="Branch to build from, e.g.: v1.0")
|
||||
parser.add_argument("--version", default=os.environ.get('CIRCLE_TAG'), help="Version number for binary, e.g.: v1.0.0")
|
||||
args = parser.parse_args()
|
||||
|
||||
if not os.environ.has_key('GITHUB_USERNAME'):
|
||||
raise parser.error('environment variable GITHUB_USERNAME is required')
|
||||
|
||||
if not os.environ.has_key('GITHUB_TOKEN'):
|
||||
raise parser.error('environment variable GITHUB_TOKEN is required')
|
||||
|
||||
release = create_draft(args.org,args.repo,args.branch,args.version)
|
||||
|
||||
print(release["id"])
|
||||
|
52
scripts/release_management/github-openpr.py
Executable file
52
scripts/release_management/github-openpr.py
Executable file
@@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Open a PR against the develop branch. --branch required.
|
||||
# Optimized for CircleCI
|
||||
|
||||
import json
|
||||
import os
|
||||
import argparse
|
||||
import httplib
|
||||
from base64 import b64encode
|
||||
|
||||
|
||||
def request(org, repo, data):
|
||||
user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii")
|
||||
headers = {
|
||||
'User-Agent': 'tenderbot',
|
||||
'Accept': 'application/vnd.github.v3+json',
|
||||
'Authorization': 'Basic %s' % user_and_pass
|
||||
}
|
||||
|
||||
conn = httplib.HTTPSConnection('api.github.com', timeout=5)
|
||||
conn.request('POST', '/repos/{0}/{1}/pulls'.format(org,repo), data, headers)
|
||||
response = conn.getresponse()
|
||||
if response.status < 200 or response.status > 299:
|
||||
print(response)
|
||||
conn.close()
|
||||
raise IOError(response.reason)
|
||||
responsedata = response.read()
|
||||
conn.close()
|
||||
return json.loads(responsedata)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--org", default="tendermint", help="GitHub organization. Defaults to tendermint.")
|
||||
parser.add_argument("--repo", default="tendermint", help="GitHub repository. Defaults to tendermint.")
|
||||
parser.add_argument("--head", help="The name of the branch where your changes are implemented.", required=True)
|
||||
parser.add_argument("--base", help="The name of the branch you want the changes pulled into.", required=True)
|
||||
parser.add_argument("--title", default="Security release {0}".format(os.environ.get('CIRCLE_TAG')), help="The title of the pull request.")
|
||||
args = parser.parse_args()
|
||||
|
||||
if not os.environ.has_key('GITHUB_USERNAME'):
|
||||
raise parser.error('GITHUB_USERNAME not set.')
|
||||
|
||||
if not os.environ.has_key('GITHUB_TOKEN'):
|
||||
raise parser.error('GITHUB_TOKEN not set.')
|
||||
|
||||
if os.environ.get('CIRCLE_TAG') is None:
|
||||
raise parser.error('CIRCLE_TAG not set.')
|
||||
|
||||
result = request(args.org, args.repo, data=json.dumps({'title':"{0}".format(args.title),'head':"{0}".format(args.head),'base':"{0}".format(args.base),'body':"<Please fill in details.>"}))
|
||||
print(result['html_url'])
|
28
scripts/release_management/github-public-newbranch.bash
Normal file
28
scripts/release_management/github-public-newbranch.bash
Normal file
@@ -0,0 +1,28 @@
|
||||
#!/bin/sh
|
||||
|
||||
# github-public-newbranch.bash - create public branch from the security repository
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Create new branch
|
||||
BRANCH="${CIRCLE_TAG:-v0.0.0}-security-`date -u +%Y%m%d%H%M%S`"
|
||||
# Check if the patch release exist already as a branch
|
||||
if [ -n "`git branch | grep '${BRANCH}'`" ]; then
|
||||
echo "WARNING: Branch ${BRANCH} already exists."
|
||||
else
|
||||
echo "Creating branch ${BRANCH}."
|
||||
git branch "${BRANCH}"
|
||||
fi
|
||||
|
||||
# ... and check it out
|
||||
git checkout "${BRANCH}"
|
||||
|
||||
# Add entry to public repository
|
||||
git remote add tendermint-origin git@github.com:tendermint/tendermint.git
|
||||
|
||||
# Push branch and tag to public repository
|
||||
git push tendermint-origin
|
||||
git push tendermint-origin --tags
|
||||
|
||||
# Create a PR from the public branch to the assumed release branch in public (release branch has to exist)
|
||||
python -u scripts/release_management/github-openpr.py --head "${BRANCH}" --base "${BRANCH:%.*}"
|
53
scripts/release_management/github-publish.py
Executable file
53
scripts/release_management/github-publish.py
Executable file
@@ -0,0 +1,53 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Publish an existing GitHub draft release. --id required.
|
||||
# Optimized for CircleCI
|
||||
|
||||
import json
|
||||
import os
|
||||
import argparse
|
||||
import httplib
|
||||
from base64 import b64encode
|
||||
|
||||
|
||||
def request(org, repo, id, data):
|
||||
user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii")
|
||||
headers = {
|
||||
'User-Agent': 'tenderbot',
|
||||
'Accept': 'application/vnd.github.v3+json',
|
||||
'Authorization': 'Basic %s' % user_and_pass
|
||||
}
|
||||
|
||||
conn = httplib.HTTPSConnection('api.github.com', timeout=5)
|
||||
conn.request('POST', '/repos/{0}/{1}/releases/{2}'.format(org,repo,id), data, headers)
|
||||
response = conn.getresponse()
|
||||
if response.status < 200 or response.status > 299:
|
||||
print(response)
|
||||
conn.close()
|
||||
raise IOError(response.reason)
|
||||
responsedata = response.read()
|
||||
conn.close()
|
||||
return json.loads(responsedata)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--org", default="tendermint", help="GitHub organization")
|
||||
parser.add_argument("--repo", default="tendermint", help="GitHub repository")
|
||||
parser.add_argument("--id", help="GitHub release ID", required=True, type=int)
|
||||
parser.add_argument("--version", default=os.environ.get('CIRCLE_TAG'), help="Version number for the release, e.g.: v1.0.0")
|
||||
args = parser.parse_args()
|
||||
|
||||
if not os.environ.has_key('GITHUB_USERNAME'):
|
||||
raise parser.error('GITHUB_USERNAME not set.')
|
||||
|
||||
if not os.environ.has_key('GITHUB_TOKEN'):
|
||||
raise parser.error('GITHUB_TOKEN not set.')
|
||||
|
||||
try:
|
||||
result = request(args.org, args.repo, args.id, data=json.dumps({'draft':False,'tag_name':"{0}".format(args.version)}))
|
||||
except IOError as e:
|
||||
print(e)
|
||||
result = request(args.org, args.repo, args.id, data=json.dumps({'draft':False,'tag_name':"{0}-autorelease".format(args.version)}))
|
||||
|
||||
print(result['name'])
|
68
scripts/release_management/github-upload.py
Executable file
68
scripts/release_management/github-upload.py
Executable file
@@ -0,0 +1,68 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Upload a file to a GitHub draft release. --id and --file are required.
|
||||
# Optimized for CircleCI
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import argparse
|
||||
import mimetypes
|
||||
import httplib
|
||||
from base64 import b64encode
|
||||
|
||||
|
||||
def request(baseurl, path, mimetype, mimeencoding, data):
|
||||
user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii")
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'tenderbot',
|
||||
'Accept': 'application/vnd.github.v3.raw+json',
|
||||
'Authorization': 'Basic %s' % user_and_pass,
|
||||
'Content-Type': mimetype,
|
||||
'Content-Encoding': mimeencoding
|
||||
}
|
||||
|
||||
conn = httplib.HTTPSConnection(baseurl, timeout=5)
|
||||
conn.request('POST', path, data, headers)
|
||||
response = conn.getresponse()
|
||||
if response.status < 200 or response.status > 299:
|
||||
print(response)
|
||||
conn.close()
|
||||
raise IOError(response.reason)
|
||||
responsedata = response.read()
|
||||
conn.close()
|
||||
return json.loads(responsedata)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--id", help="GitHub release ID", required=True, type=int)
|
||||
parser.add_argument("--file", default="/tmp/workspace/tendermint_{0}_{1}_{2}.zip".format(os.environ.get('CIRCLE_TAG'),os.environ.get('GOOS'),os.environ.get('GOARCH')), help="File to upload")
|
||||
parser.add_argument("--return-id-only", help="Return only the release ID after upload to GitHub.", action='store_true')
|
||||
args = parser.parse_args()
|
||||
|
||||
if not os.environ.has_key('GITHUB_USERNAME'):
|
||||
raise parser.error('GITHUB_USERNAME not set.')
|
||||
|
||||
if not os.environ.has_key('GITHUB_TOKEN'):
|
||||
raise parser.error('GITHUB_TOKEN not set.')
|
||||
|
||||
mimetypes.init()
|
||||
filename = os.path.basename(args.file)
|
||||
mimetype,mimeencoding = mimetypes.guess_type(filename, strict=False)
|
||||
if mimetype is None:
|
||||
mimetype = 'application/zip'
|
||||
if mimeencoding is None:
|
||||
mimeencoding = 'utf8'
|
||||
|
||||
with open(args.file,'rb') as f:
|
||||
asset = f.read()
|
||||
|
||||
result = request('uploads.github.com', '/repos/tendermint/tendermint/releases/{0}/assets?name={1}'.format(args.id, filename), mimetype, mimeencoding, asset)
|
||||
|
||||
if args.return_id_only:
|
||||
print(result['id'])
|
||||
else:
|
||||
print(result['browser_download_url'])
|
||||
|
35
scripts/release_management/sha-files.py
Executable file
35
scripts/release_management/sha-files.py
Executable file
@@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Create SHA256 summaries from all ZIP files in a folder
|
||||
# Optimized for CircleCI
|
||||
|
||||
import re
|
||||
import os
|
||||
import argparse
|
||||
import zipfile
|
||||
import hashlib
|
||||
|
||||
|
||||
BLOCKSIZE = 65536
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--folder", default="/tmp/workspace", help="Folder to look for, for ZIP files")
|
||||
parser.add_argument("--shafile", default="/tmp/workspace/SHA256SUMS", help="SHA256 summaries File")
|
||||
args = parser.parse_args()
|
||||
|
||||
for filename in os.listdir(args.folder):
|
||||
if re.search('\.zip$',filename) is None:
|
||||
continue
|
||||
if not os.path.isfile(os.path.join(args.folder, filename)):
|
||||
continue
|
||||
with open(args.shafile,'a+') as shafile:
|
||||
hasher = hashlib.sha256()
|
||||
with open(os.path.join(args.folder, filename),'r') as f:
|
||||
buf = f.read(BLOCKSIZE)
|
||||
while len(buf) > 0:
|
||||
hasher.update(buf)
|
||||
buf = f.read(BLOCKSIZE)
|
||||
shafile.write("{0} {1}\n".format(hasher.hexdigest(),filename))
|
||||
|
44
scripts/release_management/zip-file.py
Executable file
44
scripts/release_management/zip-file.py
Executable file
@@ -0,0 +1,44 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# ZIP one file as "tendermint" into a ZIP like tendermint_VERSION_OS_ARCH.zip
|
||||
# Use environment variables CIRCLE_TAG, GOOS and GOARCH for easy input parameters.
|
||||
# Optimized for CircleCI
|
||||
|
||||
import os
|
||||
import argparse
|
||||
import zipfile
|
||||
import hashlib
|
||||
|
||||
|
||||
BLOCKSIZE = 65536
|
||||
|
||||
|
||||
def zip_asset(file,destination,arcname,version,goos,goarch):
|
||||
filename = os.path.basename(file)
|
||||
output = "{0}/{1}_{2}_{3}_{4}.zip".format(destination,arcname,version,goos,goarch)
|
||||
|
||||
with zipfile.ZipFile(output,'w') as f:
|
||||
f.write(filename=file,arcname=arcname)
|
||||
f.comment=filename
|
||||
return output
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--file", default="build/tendermint_{0}_{1}".format(os.environ.get('GOOS'),os.environ.get('GOARCH')), help="File to zip")
|
||||
parser.add_argument("--destination", default="build", help="Destination folder for files")
|
||||
parser.add_argument("--version", default=os.environ.get('CIRCLE_TAG'), help="Version number for binary, e.g.: v1.0.0")
|
||||
parser.add_argument("--goos", default=os.environ.get('GOOS'), help="GOOS parameter")
|
||||
parser.add_argument("--goarch", default=os.environ.get('GOARCH'), help="GOARCH parameter")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.version is None:
|
||||
raise parser.error("argument --version is required")
|
||||
if args.goos is None:
|
||||
raise parser.error("argument --goos is required")
|
||||
if args.goarch is None:
|
||||
raise parser.error("argument --goarch is required")
|
||||
|
||||
file = zip_asset(args.file,args.destination,"tendermint",args.version,args.goos,args.goarch)
|
||||
print(file)
|
||||
|
@@ -317,7 +317,8 @@ func TestEndBlockValidatorUpdates(t *testing.T) {
|
||||
defer eventBus.Stop()
|
||||
blockExec.SetEventBus(eventBus)
|
||||
|
||||
updatesSub, err := eventBus.Subscribe(context.Background(), "TestEndBlockValidatorUpdates", types.EventQueryValidatorSetUpdates)
|
||||
updatesCh := make(chan interface{}, 1)
|
||||
err = eventBus.Subscribe(context.Background(), "TestEndBlockValidatorUpdates", types.EventQueryValidatorSetUpdates, updatesCh)
|
||||
require.NoError(t, err)
|
||||
|
||||
block := makeBlock(state, 1)
|
||||
@@ -341,15 +342,13 @@ func TestEndBlockValidatorUpdates(t *testing.T) {
|
||||
|
||||
// test we threw an event
|
||||
select {
|
||||
case msg := <-updatesSub.Out():
|
||||
event, ok := msg.Data().(types.EventDataValidatorSetUpdates)
|
||||
require.True(t, ok, "Expected event of type EventDataValidatorSetUpdates, got %T", msg.Data())
|
||||
case e := <-updatesCh:
|
||||
event, ok := e.(types.EventDataValidatorSetUpdates)
|
||||
require.True(t, ok, "Expected event of type EventDataValidatorSetUpdates, got %T", e)
|
||||
if assert.NotEmpty(t, event.ValidatorUpdates) {
|
||||
assert.Equal(t, pubkey, event.ValidatorUpdates[0].PubKey)
|
||||
assert.EqualValues(t, 10, event.ValidatorUpdates[0].VotingPower)
|
||||
}
|
||||
case <-updatesSub.Cancelled():
|
||||
t.Fatalf("updatesSub was cancelled (reason: %v)", updatesSub.Err())
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Fatal("Did not receive EventValidatorSetUpdates within 1 sec.")
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user