mirror of
https://github.com/fluencelabs/tendermint
synced 2025-06-09 03:21:19 +00:00
Merge from panic branch
This commit is contained in:
parent
7196e5ad8e
commit
a7ecdd10de
@ -20,6 +20,7 @@ func SignBytes(chainID string, o Signable) []byte {
|
||||
buf, n, err := new(bytes.Buffer), new(int64), new(error)
|
||||
o.WriteSignBytes(chainID, buf, n, err)
|
||||
if *err != nil {
|
||||
// SOMETHING HAS GONE HORRIBLY WRONG
|
||||
panic(err)
|
||||
}
|
||||
return buf.Bytes()
|
||||
|
@ -35,10 +35,11 @@ func (pubKey PubKeyEd25519) IsNil() bool { return false }
|
||||
// TODO: Or should this just be BinaryRipemd160(key)? (The difference is the TypeByte.)
|
||||
func (pubKey PubKeyEd25519) Address() []byte { return binary.BinaryRipemd160(pubKey) }
|
||||
|
||||
// TODO: Consider returning a reason for failure, or logging a runtime type mismatch.
|
||||
func (pubKey PubKeyEd25519) VerifyBytes(msg []byte, sig_ Signature) bool {
|
||||
sig, ok := sig_.(SignatureEd25519)
|
||||
if !ok {
|
||||
panic("PubKeyEd25519 expects an SignatureEd25519 signature")
|
||||
return false
|
||||
}
|
||||
pubKeyBytes := new([32]byte)
|
||||
copy(pubKeyBytes[:], pubKey)
|
||||
|
@ -2,7 +2,9 @@ package binary
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
. "github.com/tendermint/tendermint/common"
|
||||
"io"
|
||||
"reflect"
|
||||
"time"
|
||||
@ -38,6 +40,7 @@ const (
|
||||
func BasicCodecEncoder(o interface{}, w io.Writer, n *int64, err *error) {
|
||||
switch o := o.(type) {
|
||||
case nil:
|
||||
// SANITY CHECK
|
||||
panic("nil type unsupported")
|
||||
case byte:
|
||||
WriteByte(typeByte, w, n, err)
|
||||
@ -82,12 +85,16 @@ func BasicCodecEncoder(o interface{}, w io.Writer, n *int64, err *error) {
|
||||
WriteByte(typeTime, w, n, err)
|
||||
WriteTime(o, w, n, err)
|
||||
default:
|
||||
// SANITY CHECK
|
||||
panic(fmt.Sprintf("Unsupported type: %v", reflect.TypeOf(o)))
|
||||
}
|
||||
}
|
||||
|
||||
func BasicCodecDecoder(r io.Reader, n *int64, err *error) (o interface{}) {
|
||||
type_ := ReadByte(r, n, err)
|
||||
if *err != nil {
|
||||
return
|
||||
}
|
||||
switch type_ {
|
||||
case typeByte:
|
||||
o = ReadByte(r, n, err)
|
||||
@ -118,15 +125,12 @@ func BasicCodecDecoder(r io.Reader, n *int64, err *error) (o interface{}) {
|
||||
case typeTime:
|
||||
o = ReadTime(r, n, err)
|
||||
default:
|
||||
if *err != nil {
|
||||
panic(*err)
|
||||
} else {
|
||||
panic(fmt.Sprintf("Unsupported type byte: %X", type_))
|
||||
}
|
||||
*err = errors.New(Fmt("Unsupported type byte: %X", type_))
|
||||
}
|
||||
return o
|
||||
return
|
||||
}
|
||||
|
||||
// Contract: Caller must ensure that types match.
|
||||
func BasicCodecComparator(o1 interface{}, o2 interface{}) int {
|
||||
switch o1.(type) {
|
||||
case byte:
|
||||
@ -157,8 +161,10 @@ func BasicCodecComparator(o1 interface{}, o2 interface{}) int {
|
||||
case time.Time:
|
||||
return int(o1.(time.Time).UnixNano() - o2.(time.Time).UnixNano())
|
||||
default:
|
||||
panic(fmt.Sprintf("Unsupported type: %v", reflect.TypeOf(o1)))
|
||||
// SANITY CHECK
|
||||
panic(Fmt("Unsupported type: %v", reflect.TypeOf(o1)))
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
var BasicCodec = Codec{
|
||||
|
@ -70,6 +70,7 @@ func (info StructFieldInfo) unpack() (int, reflect.Type, Options) {
|
||||
func GetTypeFromStructDeclaration(o interface{}) reflect.Type {
|
||||
rt := reflect.TypeOf(o)
|
||||
if rt.NumField() != 1 {
|
||||
// SANITY CHECK
|
||||
panic("Unexpected number of fields in struct-wrapped declaration of type")
|
||||
}
|
||||
return rt.Field(0).Type
|
||||
@ -78,6 +79,7 @@ func GetTypeFromStructDeclaration(o interface{}) reflect.Type {
|
||||
func SetByteForType(typeByte byte, rt reflect.Type) {
|
||||
typeInfo := GetTypeInfo(rt)
|
||||
if typeInfo.Byte != 0x00 && typeInfo.Byte != typeByte {
|
||||
// SANITY CHECK
|
||||
panic(Fmt("Type %v already registered with type byte %X", rt, typeByte))
|
||||
}
|
||||
typeInfo.Byte = typeByte
|
||||
@ -122,6 +124,7 @@ type ConcreteType struct {
|
||||
func RegisterInterface(o interface{}, ctypes ...ConcreteType) *TypeInfo {
|
||||
it := GetTypeFromStructDeclaration(o)
|
||||
if it.Kind() != reflect.Interface {
|
||||
// SANITY CHECK
|
||||
panic("RegisterInterface expects an interface")
|
||||
}
|
||||
toType := make(map[byte]reflect.Type, 0)
|
||||
@ -131,9 +134,11 @@ func RegisterInterface(o interface{}, ctypes ...ConcreteType) *TypeInfo {
|
||||
typeByte := ctype.Byte
|
||||
SetByteForType(typeByte, crt)
|
||||
if typeByte == 0x00 {
|
||||
// SANITY CHECK
|
||||
panic(Fmt("Byte of 0x00 is reserved for nil (%v)", ctype))
|
||||
}
|
||||
if toType[typeByte] != nil {
|
||||
// SANITY CHECK
|
||||
panic(Fmt("Duplicate Byte for type %v and %v", ctype, toType[typeByte]))
|
||||
}
|
||||
toType[typeByte] = crt
|
||||
@ -177,6 +182,8 @@ func MakeTypeInfo(rt reflect.Type) *TypeInfo {
|
||||
return info
|
||||
}
|
||||
|
||||
// Contract: Caller must ensure that rt is supported
|
||||
// (e.g. is recursively composed of supported native types, and structs and slices.)
|
||||
func readReflectBinary(rv reflect.Value, rt reflect.Type, opts Options, r io.Reader, n *int64, err *error) {
|
||||
|
||||
// Get typeInfo
|
||||
@ -360,6 +367,7 @@ func readReflectBinary(rv reflect.Value, rt reflect.Type, opts Options, r io.Rea
|
||||
rv.SetBool(num > 0)
|
||||
|
||||
default:
|
||||
// SANITY CHECK
|
||||
panic(Fmt("Unknown field type %v", rt.Kind()))
|
||||
}
|
||||
}
|
||||
@ -504,6 +512,7 @@ func writeReflectBinary(rv reflect.Value, rt reflect.Type, opts Options, w io.Wr
|
||||
}
|
||||
|
||||
default:
|
||||
// SANITY CHECK
|
||||
panic(Fmt("Unknown field type %v", rt.Kind()))
|
||||
}
|
||||
}
|
||||
@ -526,6 +535,8 @@ func readByteJSON(o interface{}) (typeByte byte, rest interface{}, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// Contract: Caller must ensure that rt is supported
|
||||
// (e.g. is recursively composed of supported native types, and structs and slices.)
|
||||
func readReflectJSON(rv reflect.Value, rt reflect.Type, o interface{}, err *error) {
|
||||
|
||||
// Get typeInfo
|
||||
@ -696,6 +707,7 @@ func readReflectJSON(rv reflect.Value, rt reflect.Type, o interface{}, err *erro
|
||||
rv.SetBool(bl)
|
||||
|
||||
default:
|
||||
// SANITY CHECK
|
||||
panic(Fmt("Unknown field type %v", rt.Kind()))
|
||||
}
|
||||
}
|
||||
@ -821,6 +833,7 @@ func writeReflectJSON(rv reflect.Value, rt reflect.Type, w io.Writer, n *int64,
|
||||
WriteTo(jsonBytes, w, n, err)
|
||||
|
||||
default:
|
||||
// SANITY CHECK
|
||||
panic(Fmt("Unknown field type %v", rt.Kind()))
|
||||
}
|
||||
|
||||
|
@ -6,6 +6,8 @@ import (
|
||||
"github.com/tendermint/tendermint/Godeps/_workspace/src/code.google.com/p/go.crypto/ripemd160"
|
||||
)
|
||||
|
||||
// THESE PANICS ARE SANITY CHECKS
|
||||
|
||||
func BinaryBytes(o interface{}) []byte {
|
||||
w, n, err := new(bytes.Buffer), new(int64), new(error)
|
||||
WriteBinary(o, w, n, err)
|
||||
|
@ -136,9 +136,11 @@ func (pool *BlockPool) PopRequest() {
|
||||
pool.requestsMtx.Lock() // Lock
|
||||
defer pool.requestsMtx.Unlock()
|
||||
|
||||
// SANITY CHECK
|
||||
if r := pool.requests[pool.height]; r == nil || r.block == nil {
|
||||
panic("PopRequest() requires a valid block")
|
||||
}
|
||||
// SANITY CHECK END
|
||||
|
||||
delete(pool.requests, pool.height)
|
||||
pool.height++
|
||||
@ -151,9 +153,11 @@ func (pool *BlockPool) RedoRequest(height int) {
|
||||
defer pool.requestsMtx.Unlock()
|
||||
|
||||
request := pool.requests[height]
|
||||
// SANITY CHECK
|
||||
if request.block == nil {
|
||||
panic("Expected block to be non-nil")
|
||||
}
|
||||
// SANITY CHECK END
|
||||
// TODO: record this malfeasance
|
||||
// maybe punish peer on switch (an invalid block!)
|
||||
pool.RemovePeer(request.peerId) // Lock on peersMtx.
|
||||
|
@ -54,10 +54,12 @@ type BlockchainReactor struct {
|
||||
}
|
||||
|
||||
func NewBlockchainReactor(state *sm.State, store *BlockStore, sync bool) *BlockchainReactor {
|
||||
// SANITY CHECK
|
||||
if state.LastBlockHeight != store.Height() &&
|
||||
state.LastBlockHeight != store.Height()-1 { // XXX double check this logic.
|
||||
panic(Fmt("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height()))
|
||||
}
|
||||
// SANITY CHECK END
|
||||
requestsCh := make(chan BlockRequest, defaultChannelCapacity)
|
||||
timeoutsCh := make(chan string, defaultChannelCapacity)
|
||||
pool := NewBlockPool(
|
||||
|
@ -23,6 +23,8 @@ There are three types of information stored:
|
||||
Currently the precommit signatures are duplicated in the Block parts as
|
||||
well as the Validation. In the future this may change, perhaps by moving
|
||||
the Validation data outside the Block.
|
||||
|
||||
Panics indicate probable corruption in the data
|
||||
*/
|
||||
type BlockStore struct {
|
||||
height int
|
||||
@ -55,10 +57,11 @@ func (bs *BlockStore) LoadBlock(height int) *types.Block {
|
||||
var err error
|
||||
r := bs.GetReader(calcBlockMetaKey(height))
|
||||
if r == nil {
|
||||
panic(Fmt("Block does not exist at height %v", height))
|
||||
return nil
|
||||
}
|
||||
meta := binary.ReadBinary(&types.BlockMeta{}, r, &n, &err).(*types.BlockMeta)
|
||||
if err != nil {
|
||||
// SOMETHING HAS GONE HORRIBLY WRONG
|
||||
panic(Fmt("Error reading block meta: %v", err))
|
||||
}
|
||||
bytez := []byte{}
|
||||
@ -68,6 +71,7 @@ func (bs *BlockStore) LoadBlock(height int) *types.Block {
|
||||
}
|
||||
block := binary.ReadBinary(&types.Block{}, bytes.NewReader(bytez), &n, &err).(*types.Block)
|
||||
if err != nil {
|
||||
// SOMETHING HAS GONE HORRIBLY WRONG
|
||||
panic(Fmt("Error reading block: %v", err))
|
||||
}
|
||||
return block
|
||||
@ -78,10 +82,11 @@ func (bs *BlockStore) LoadBlockPart(height int, index int) *types.Part {
|
||||
var err error
|
||||
r := bs.GetReader(calcBlockPartKey(height, index))
|
||||
if r == nil {
|
||||
panic(Fmt("BlockPart does not exist for height %v index %v", height, index))
|
||||
return nil
|
||||
}
|
||||
part := binary.ReadBinary(&types.Part{}, r, &n, &err).(*types.Part)
|
||||
if err != nil {
|
||||
// SOMETHING HAS GONE HORRIBLY WRONG
|
||||
panic(Fmt("Error reading block part: %v", err))
|
||||
}
|
||||
return part
|
||||
@ -92,10 +97,11 @@ func (bs *BlockStore) LoadBlockMeta(height int) *types.BlockMeta {
|
||||
var err error
|
||||
r := bs.GetReader(calcBlockMetaKey(height))
|
||||
if r == nil {
|
||||
panic(Fmt("BlockMeta does not exist for height %v", height))
|
||||
return nil
|
||||
}
|
||||
meta := binary.ReadBinary(&types.BlockMeta{}, r, &n, &err).(*types.BlockMeta)
|
||||
if err != nil {
|
||||
// SOMETHING HAS GONE HORRIBLY WRONG
|
||||
panic(Fmt("Error reading block meta: %v", err))
|
||||
}
|
||||
return meta
|
||||
@ -108,10 +114,11 @@ func (bs *BlockStore) LoadBlockValidation(height int) *types.Validation {
|
||||
var err error
|
||||
r := bs.GetReader(calcBlockValidationKey(height))
|
||||
if r == nil {
|
||||
panic(Fmt("BlockValidation does not exist for height %v", height))
|
||||
return nil
|
||||
}
|
||||
validation := binary.ReadBinary(&types.Validation{}, r, &n, &err).(*types.Validation)
|
||||
if err != nil {
|
||||
// SOMETHING HAS GONE HORRIBLY WRONG
|
||||
panic(Fmt("Error reading validation: %v", err))
|
||||
}
|
||||
return validation
|
||||
@ -123,10 +130,11 @@ func (bs *BlockStore) LoadSeenValidation(height int) *types.Validation {
|
||||
var err error
|
||||
r := bs.GetReader(calcSeenValidationKey(height))
|
||||
if r == nil {
|
||||
panic(Fmt("SeenValidation does not exist for height %v", height))
|
||||
return nil
|
||||
}
|
||||
validation := binary.ReadBinary(&types.Validation{}, r, &n, &err).(*types.Validation)
|
||||
if err != nil {
|
||||
// SOMETHING HAS GONE HORRIBLY WRONG
|
||||
panic(Fmt("Error reading validation: %v", err))
|
||||
}
|
||||
return validation
|
||||
@ -140,9 +148,11 @@ func (bs *BlockStore) LoadSeenValidation(height int) *types.Validation {
|
||||
func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenValidation *types.Validation) {
|
||||
height := block.Height
|
||||
if height != bs.height+1 {
|
||||
// SANITY CHECK
|
||||
panic(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.height+1, height))
|
||||
}
|
||||
if !blockParts.IsComplete() {
|
||||
// SANITY CHECK
|
||||
panic(Fmt("BlockStore can only save complete block part sets"))
|
||||
}
|
||||
|
||||
@ -172,9 +182,11 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
|
||||
}
|
||||
|
||||
func (bs *BlockStore) saveBlockPart(height int, index int, part *types.Part) {
|
||||
// SANITY CHECK
|
||||
if height != bs.height+1 {
|
||||
panic(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.height+1, height))
|
||||
}
|
||||
// SANITY CHECK END
|
||||
partBytes := binary.BinaryBytes(part)
|
||||
bs.db.Set(calcBlockPartKey(height, index), partBytes)
|
||||
}
|
||||
@ -208,6 +220,7 @@ type BlockStoreStateJSON struct {
|
||||
func (bsj BlockStoreStateJSON) Save(db dbm.DB) {
|
||||
bytes, err := json.Marshal(bsj)
|
||||
if err != nil {
|
||||
// SANITY CHECK
|
||||
panic(Fmt("Could not marshal state bytes: %v", err))
|
||||
}
|
||||
db.Set(blockStoreKey, bytes)
|
||||
@ -223,6 +236,7 @@ func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON {
|
||||
bsj := BlockStoreStateJSON{}
|
||||
err := json.Unmarshal(bytes, &bsj)
|
||||
if err != nil {
|
||||
// SOMETHING HAS GONE HORRIBLY WRONG
|
||||
panic(Fmt("Could not unmarshal bytes: %X", bytes))
|
||||
}
|
||||
return bsj
|
||||
|
@ -63,8 +63,8 @@ func (cache *BlockCache) GetAccount(addr []byte) *ac.Account {
|
||||
|
||||
func (cache *BlockCache) UpdateAccount(acc *ac.Account) {
|
||||
addr := acc.Address
|
||||
// SANITY CHECK
|
||||
_, storage, removed, _ := cache.accounts[string(addr)].unpack()
|
||||
// SANITY CHECK
|
||||
if removed {
|
||||
panic("UpdateAccount on a removed account")
|
||||
}
|
||||
@ -95,9 +95,11 @@ func (cache *BlockCache) GetStorage(addr Word256, key Word256) (value Word256) {
|
||||
|
||||
// Get or load storage
|
||||
acc, storage, removed, dirty := cache.accounts[string(addr.Postfix(20))].unpack()
|
||||
// SANITY CHECK
|
||||
if removed {
|
||||
panic("GetStorage() on removed account")
|
||||
}
|
||||
// SANITY CHECK END
|
||||
if acc != nil && storage == nil {
|
||||
storage = makeStorage(cache.db, acc.StorageRoot)
|
||||
cache.accounts[string(addr.Postfix(20))] = accountInfo{acc, storage, false, dirty}
|
||||
@ -117,10 +119,12 @@ func (cache *BlockCache) GetStorage(addr Word256, key Word256) (value Word256) {
|
||||
|
||||
// NOTE: Set value to zero to removed from the trie.
|
||||
func (cache *BlockCache) SetStorage(addr Word256, key Word256, value Word256) {
|
||||
// SANITY CHECK
|
||||
_, _, removed, _ := cache.accounts[string(addr.Postfix(20))].unpack()
|
||||
if removed {
|
||||
panic("SetStorage() on a removed account")
|
||||
}
|
||||
// SANITY CHECK END
|
||||
cache.storages[Tuple256{addr, key}] = storageInfo{value, true}
|
||||
}
|
||||
|
||||
@ -143,12 +147,6 @@ func (cache *BlockCache) GetNameRegEntry(name string) *types.NameRegEntry {
|
||||
|
||||
func (cache *BlockCache) UpdateNameRegEntry(entry *types.NameRegEntry) {
|
||||
name := entry.Name
|
||||
// SANITY CHECK
|
||||
_, removed, _ := cache.names[name].unpack()
|
||||
if removed {
|
||||
panic("UpdateNameRegEntry on a removed name")
|
||||
}
|
||||
// SANITY CHECK END
|
||||
cache.names[name] = nameInfo{entry, false, true}
|
||||
}
|
||||
|
||||
@ -224,6 +222,7 @@ func (cache *BlockCache) Sync() {
|
||||
if removed {
|
||||
removed := cache.backend.RemoveAccount(acc.Address)
|
||||
if !removed {
|
||||
// SOMETHING HORRIBLE HAS GONE WRONG
|
||||
panic(Fmt("Could not remove account to be removed: %X", acc.Address))
|
||||
}
|
||||
} else {
|
||||
@ -257,6 +256,7 @@ func (cache *BlockCache) Sync() {
|
||||
if removed {
|
||||
removed := cache.backend.RemoveNameRegEntry(nameStr)
|
||||
if !removed {
|
||||
// SOMETHING HORRIBLE HAS GONE WRONG
|
||||
panic(Fmt("Could not remove namereg entry to be removed: %s", nameStr))
|
||||
}
|
||||
} else {
|
||||
|
@ -3,7 +3,6 @@ package state
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/account"
|
||||
. "github.com/tendermint/tendermint/common"
|
||||
@ -22,8 +21,8 @@ func ExecBlock(s *State, block *types.Block, blockPartsHeader types.PartSetHeade
|
||||
// State.Hash should match block.StateHash
|
||||
stateHash := s.Hash()
|
||||
if !bytes.Equal(stateHash, block.StateHash) {
|
||||
return fmt.Errorf("Invalid state hash. Expected %X, got %X",
|
||||
stateHash, block.StateHash)
|
||||
return errors.New(Fmt("Invalid state hash. Expected %X, got %X",
|
||||
stateHash, block.StateHash))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -56,6 +55,7 @@ func execBlock(s *State, block *types.Block, blockPartsHeader types.PartSetHeade
|
||||
}
|
||||
|
||||
// Update Validator.LastCommitHeight as necessary.
|
||||
// If we panic in here, something has gone horribly wrong
|
||||
for i, precommit := range block.LastValidation.Precommits {
|
||||
if precommit == nil {
|
||||
continue
|
||||
@ -194,9 +194,11 @@ func checkInputPubKey(acc *account.Account, in *types.TxInput) error {
|
||||
func validateInputs(accounts map[string]*account.Account, signBytes []byte, ins []*types.TxInput) (total int64, err error) {
|
||||
for _, in := range ins {
|
||||
acc := accounts[string(in.Address)]
|
||||
// SANITY CHECK
|
||||
if acc == nil {
|
||||
panic("validateInputs() expects account in accounts")
|
||||
}
|
||||
// SANITY CHECK END
|
||||
err = validateInput(acc, signBytes, in)
|
||||
if err != nil {
|
||||
return
|
||||
@ -245,12 +247,14 @@ func validateOutputs(outs []*types.TxOutput) (total int64, err error) {
|
||||
func adjustByInputs(accounts map[string]*account.Account, ins []*types.TxInput) {
|
||||
for _, in := range ins {
|
||||
acc := accounts[string(in.Address)]
|
||||
// SANITY CHECK
|
||||
if acc == nil {
|
||||
panic("adjustByInputs() expects account in accounts")
|
||||
}
|
||||
if acc.Balance < in.Amount {
|
||||
panic("adjustByInputs() expects sufficient funds")
|
||||
}
|
||||
// SANITY CHECK END
|
||||
acc.Balance -= in.Amount
|
||||
acc.Sequence += 1
|
||||
}
|
||||
@ -259,9 +263,11 @@ func adjustByInputs(accounts map[string]*account.Account, ins []*types.TxInput)
|
||||
func adjustByOutputs(accounts map[string]*account.Account, outs []*types.TxOutput) {
|
||||
for _, out := range outs {
|
||||
acc := accounts[string(out.Address)]
|
||||
// SANITY CHECK
|
||||
if acc == nil {
|
||||
panic("adjustByOutputs() expects account in accounts")
|
||||
}
|
||||
// SANITY CHECK END
|
||||
acc.Balance += out.Amount
|
||||
}
|
||||
}
|
||||
@ -270,6 +276,14 @@ func adjustByOutputs(accounts map[string]*account.Account, outs []*types.TxOutpu
|
||||
// Unlike ExecBlock(), state will not be altered.
|
||||
func ExecTx(blockCache *BlockCache, tx_ types.Tx, runCall bool, evc events.Fireable) error {
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err := errors.New(Fmt("Recovered from panic in ExecTx", "err", r, "tx", tx_))
|
||||
log.Error(err.Error())
|
||||
// TODO return error
|
||||
}
|
||||
}()
|
||||
|
||||
// TODO: do something with fees
|
||||
fees := int64(0)
|
||||
_s := blockCache.State() // hack to access validators and block height
|
||||
@ -522,7 +536,7 @@ func ExecTx(blockCache *BlockCache, tx_ types.Tx, runCall bool, evc events.Firea
|
||||
// and changing the data
|
||||
if expired {
|
||||
if expiresIn < types.MinNameRegistrationPeriod {
|
||||
return fmt.Errorf("Names must be registered for at least %d blocks", types.MinNameRegistrationPeriod)
|
||||
return errors.New(Fmt("Names must be registered for at least %d blocks", types.MinNameRegistrationPeriod))
|
||||
}
|
||||
entry.Expires = lastBlockHeight + expiresIn
|
||||
entry.Owner = tx.Input.Address
|
||||
@ -534,7 +548,7 @@ func ExecTx(blockCache *BlockCache, tx_ types.Tx, runCall bool, evc events.Firea
|
||||
credit := oldCredit + value
|
||||
expiresIn = int(credit / costPerBlock)
|
||||
if expiresIn < types.MinNameRegistrationPeriod {
|
||||
return fmt.Errorf("Names must be registered for at least %d blocks", types.MinNameRegistrationPeriod)
|
||||
return errors.New(Fmt("Names must be registered for at least %d blocks", types.MinNameRegistrationPeriod))
|
||||
}
|
||||
entry.Expires = lastBlockHeight + expiresIn
|
||||
log.Debug("Updated namereg entry", "name", entry.Name, "expiresIn", expiresIn, "oldCredit", oldCredit, "value", value, "credit", credit)
|
||||
@ -544,7 +558,7 @@ func ExecTx(blockCache *BlockCache, tx_ types.Tx, runCall bool, evc events.Firea
|
||||
}
|
||||
} else {
|
||||
if expiresIn < types.MinNameRegistrationPeriod {
|
||||
return fmt.Errorf("Names must be registered for at least %d blocks", types.MinNameRegistrationPeriod)
|
||||
return errors.New(Fmt("Names must be registered for at least %d blocks", types.MinNameRegistrationPeriod))
|
||||
}
|
||||
// entry does not exist, so create it
|
||||
entry = &types.NameRegEntry{
|
||||
@ -623,6 +637,7 @@ func ExecTx(blockCache *BlockCache, tx_ types.Tx, runCall bool, evc events.Firea
|
||||
Accum: 0,
|
||||
})
|
||||
if !added {
|
||||
// SOMETHING HAS GONE HORRIBLY WRONG
|
||||
panic("Failed to add validator")
|
||||
}
|
||||
if evc != nil {
|
||||
@ -720,6 +735,8 @@ func ExecTx(blockCache *BlockCache, tx_ types.Tx, runCall bool, evc events.Firea
|
||||
return nil
|
||||
|
||||
default:
|
||||
// SANITY CHECK (binary decoding should catch bad tx types
|
||||
// before they get here
|
||||
panic("Unknown Tx type")
|
||||
}
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package state
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/account"
|
||||
@ -34,7 +35,8 @@ func GenesisDocFromJSON(jsonBlob []byte) (genState *GenesisDoc) {
|
||||
var err error
|
||||
binary.ReadJSON(&genState, jsonBlob, &err)
|
||||
if err != nil {
|
||||
panic(Fmt("Couldn't read GenesisDoc: %v", err))
|
||||
log.Error(Fmt("Couldn't read GenesisDoc: %v", err))
|
||||
os.Exit(1)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -42,7 +44,8 @@ func GenesisDocFromJSON(jsonBlob []byte) (genState *GenesisDoc) {
|
||||
func MakeGenesisStateFromFile(db dbm.DB, genDocFile string) *State {
|
||||
jsonBlob, err := ioutil.ReadFile(genDocFile)
|
||||
if err != nil {
|
||||
panic(Fmt("Couldn't read GenesisDoc file: %v", err))
|
||||
log.Error(Fmt("Couldn't read GenesisDoc file: %v", err))
|
||||
os.Exit(1)
|
||||
}
|
||||
genDoc := GenesisDocFromJSON(jsonBlob)
|
||||
return MakeGenesisState(db, genDoc)
|
||||
|
@ -30,6 +30,8 @@ func voteToStep(vote *types.Vote) int8 {
|
||||
case types.VoteTypePrecommit:
|
||||
return stepPrecommit
|
||||
default:
|
||||
// SANITY CHECK (binary decoding should catch bad vote types
|
||||
// before they get here (right?!)
|
||||
panic("Unknown vote type")
|
||||
}
|
||||
}
|
||||
@ -69,7 +71,7 @@ func GenPrivValidator() *PrivValidator {
|
||||
func LoadPrivValidator(filePath string) *PrivValidator {
|
||||
privValJSONBytes, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
Exit(err.Error())
|
||||
}
|
||||
privVal := binary.ReadJSON(&PrivValidator{}, privValJSONBytes, &err).(*PrivValidator)
|
||||
if err != nil {
|
||||
@ -93,6 +95,7 @@ func (privVal *PrivValidator) Save() {
|
||||
|
||||
func (privVal *PrivValidator) save() {
|
||||
if privVal.filePath == "" {
|
||||
// SANITY CHECK
|
||||
panic("Cannot save PrivValidator: filePath not set")
|
||||
}
|
||||
jsonBytes := binary.JSONBytes(privVal)
|
||||
|
@ -2,12 +2,12 @@ package state
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/account"
|
||||
"github.com/tendermint/tendermint/binary"
|
||||
. "github.com/tendermint/tendermint/common"
|
||||
dbm "github.com/tendermint/tendermint/db"
|
||||
"github.com/tendermint/tendermint/events"
|
||||
"github.com/tendermint/tendermint/merkle"
|
||||
@ -67,7 +67,8 @@ func LoadState(db dbm.DB) *State {
|
||||
s.nameReg = merkle.NewIAVLTree(binary.BasicCodec, NameRegCodec, 0, db)
|
||||
s.nameReg.Load(nameRegHash)
|
||||
if *err != nil {
|
||||
panic(*err)
|
||||
// DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED
|
||||
Exit(Fmt("Data has been corrupted or its spec has changed: %v\n", *err))
|
||||
}
|
||||
// TODO: ensure that buf is completely read.
|
||||
}
|
||||
@ -91,6 +92,7 @@ func (s *State) Save() {
|
||||
binary.WriteByteSlice(s.validatorInfos.Hash(), buf, n, err)
|
||||
binary.WriteByteSlice(s.nameReg.Hash(), buf, n, err)
|
||||
if *err != nil {
|
||||
// SOMETHING HAS GONE HORRIBLY WRONG
|
||||
panic(*err)
|
||||
}
|
||||
s.DB.Set(stateKey, buf.Bytes())
|
||||
@ -145,6 +147,7 @@ func (s *State) ComputeBlockStateHash(block *types.Block) error {
|
||||
//-------------------------------------
|
||||
// State.accounts
|
||||
|
||||
// Returns nil if account does not exist with given address.
|
||||
// The returned Account is a copy, so mutating it
|
||||
// has no side effects.
|
||||
// Implements Statelike
|
||||
@ -200,11 +203,13 @@ func (s *State) unbondValidator(val *Validator) {
|
||||
// Move validator to UnbondingValidators
|
||||
val, removed := s.BondedValidators.Remove(val.Address)
|
||||
if !removed {
|
||||
// SOMETHING HAS GONE HORRIBLY WRONG
|
||||
panic("Couldn't remove validator for unbonding")
|
||||
}
|
||||
val.UnbondHeight = s.LastBlockHeight + 1
|
||||
added := s.UnbondingValidators.Add(val)
|
||||
if !added {
|
||||
// SOMETHING HAS GONE HORRIBLY WRONG
|
||||
panic("Couldn't add validator for unbonding")
|
||||
}
|
||||
}
|
||||
@ -213,11 +218,13 @@ func (s *State) rebondValidator(val *Validator) {
|
||||
// Move validator to BondingValidators
|
||||
val, removed := s.UnbondingValidators.Remove(val.Address)
|
||||
if !removed {
|
||||
// SOMETHING HAS GONE HORRIBLY WRONG
|
||||
panic("Couldn't remove validator for rebonding")
|
||||
}
|
||||
val.BondHeight = s.LastBlockHeight + 1
|
||||
added := s.BondedValidators.Add(val)
|
||||
if !added {
|
||||
// SOMETHING HAS GONE HORRIBLY WRONG
|
||||
panic("Couldn't add validator for rebonding")
|
||||
}
|
||||
}
|
||||
@ -225,17 +232,21 @@ func (s *State) rebondValidator(val *Validator) {
|
||||
func (s *State) releaseValidator(val *Validator) {
|
||||
// Update validatorInfo
|
||||
valInfo := s.GetValidatorInfo(val.Address)
|
||||
// SANITY CHECK
|
||||
if valInfo == nil {
|
||||
panic("Couldn't find validatorInfo for release")
|
||||
}
|
||||
// SANITY CHECK END
|
||||
valInfo.ReleasedHeight = s.LastBlockHeight + 1
|
||||
s.SetValidatorInfo(valInfo)
|
||||
|
||||
// Send coins back to UnbondTo outputs
|
||||
accounts, err := getOrMakeAccounts(s, nil, valInfo.UnbondTo)
|
||||
// SANITY CHECK
|
||||
if err != nil {
|
||||
panic("Couldn't get or make unbondTo accounts")
|
||||
}
|
||||
// SANITY CHECK END
|
||||
adjustByOutputs(accounts, valInfo.UnbondTo)
|
||||
for _, acc := range accounts {
|
||||
s.UpdateAccount(acc)
|
||||
@ -244,6 +255,7 @@ func (s *State) releaseValidator(val *Validator) {
|
||||
// Remove validator from UnbondingValidators
|
||||
_, removed := s.UnbondingValidators.Remove(val.Address)
|
||||
if !removed {
|
||||
// SOMETHING HAS GONE HORRIBLY WRONG
|
||||
panic("Couldn't remove validator for release")
|
||||
}
|
||||
}
|
||||
@ -251,9 +263,11 @@ func (s *State) releaseValidator(val *Validator) {
|
||||
func (s *State) destroyValidator(val *Validator) {
|
||||
// Update validatorInfo
|
||||
valInfo := s.GetValidatorInfo(val.Address)
|
||||
// SANITY CHECK
|
||||
if valInfo == nil {
|
||||
panic("Couldn't find validatorInfo for release")
|
||||
}
|
||||
// SANITY CHECK END
|
||||
valInfo.DestroyedHeight = s.LastBlockHeight + 1
|
||||
valInfo.DestroyedAmount = val.VotingPower
|
||||
s.SetValidatorInfo(valInfo)
|
||||
@ -263,6 +277,7 @@ func (s *State) destroyValidator(val *Validator) {
|
||||
if !removed {
|
||||
_, removed := s.UnbondingValidators.Remove(val.Address)
|
||||
if !removed {
|
||||
// SOMETHING HAS GONE HORRIBLY WRONG
|
||||
panic("Couldn't remove validator for destruction")
|
||||
}
|
||||
}
|
||||
@ -334,5 +349,5 @@ type InvalidTxError struct {
|
||||
}
|
||||
|
||||
func (txErr InvalidTxError) Error() string {
|
||||
return fmt.Sprintf("Invalid tx: [%v] reason: [%v]", txErr.Tx, txErr.Reason)
|
||||
return Fmt("Invalid tx: [%v] reason: [%v]", txErr.Tx, txErr.Reason)
|
||||
}
|
||||
|
@ -83,6 +83,7 @@ func (cache *TxCache) CreateAccount(creator *vm.Account) *vm.Account {
|
||||
cache.accounts[addr] = vmAccountInfo{account, false}
|
||||
return account
|
||||
} else {
|
||||
// NONCE HANDLING SANITY CHECK OR SHA3 IS BROKEN
|
||||
panic(Fmt("Could not create account, address already exists: %X", addr))
|
||||
}
|
||||
}
|
||||
@ -104,10 +105,12 @@ func (cache *TxCache) GetStorage(addr Word256, key Word256) Word256 {
|
||||
|
||||
// NOTE: Set value to zero to removed from the trie.
|
||||
func (cache *TxCache) SetStorage(addr Word256, key Word256, value Word256) {
|
||||
// SANITY CHECK
|
||||
_, removed := vmUnpack(cache.accounts[addr])
|
||||
if removed {
|
||||
panic("SetStorage() on a removed account")
|
||||
}
|
||||
// SANITY CHECK END
|
||||
cache.storages[Tuple256{addr, key}] = value
|
||||
}
|
||||
|
||||
|
@ -76,6 +76,7 @@ func (v *Validator) CompareAccum(other *Validator) *Validator {
|
||||
} else if bytes.Compare(v.Address, other.Address) > 0 {
|
||||
return other
|
||||
} else {
|
||||
// SANITY CHECK
|
||||
panic("Cannot compare identical validators")
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user