Merge branch 'statecache' into rpc

This commit is contained in:
Ethan Buchman
2015-03-29 18:03:03 -07:00
45 changed files with 2668 additions and 2037 deletions

View File

@ -38,13 +38,13 @@ type Account struct {
StorageRoot []byte // VM storage merkle root. StorageRoot []byte // VM storage merkle root.
} }
func (account *Account) Copy() *Account { func (acc *Account) Copy() *Account {
accountCopy := *account accCopy := *acc
return &accountCopy return &accCopy
} }
func (account *Account) String() string { func (acc *Account) String() string {
return fmt.Sprintf("Account{%X:%v C:%v S:%X}", account.Address, account.PubKey, len(account.Code), account.StorageRoot) return fmt.Sprintf("Account{%X:%v C:%v S:%X}", acc.Address, acc.PubKey, len(acc.Code), acc.StorageRoot)
} }
func AccountEncoder(o interface{}, w io.Writer, n *int64, err *error) { func AccountEncoder(o interface{}, w io.Writer, n *int64, err *error) {

View File

@ -2,6 +2,7 @@ package binary
import ( import (
"bytes" "bytes"
"fmt"
"reflect" "reflect"
"testing" "testing"
"time" "time"
@ -58,6 +59,35 @@ var _ = RegisterInterface(
ConcreteType{&Viper{}}, ConcreteType{&Viper{}},
) )
func TestAnimalInterface(t *testing.T) {
var foo Animal
// Type of pointer to Animal
rt := reflect.TypeOf(&foo)
fmt.Printf("rt: %v\n", rt)
// Type of Animal itself.
// NOTE: normally this is acquired through other means
// like introspecting on method signatures, or struct fields.
rte := rt.Elem()
fmt.Printf("rte: %v\n", rte)
// Get a new pointer to the interface
// NOTE: calling .Interface() is to get the actual value,
// instead of reflection values.
ptr := reflect.New(rte).Interface()
fmt.Printf("ptr: %v", ptr)
// Make a binary byteslice that represents a snake.
snakeBytes := BinaryBytes(Snake([]byte("snake")))
snakeReader := bytes.NewReader(snakeBytes)
// Now you can read it.
n, err := new(int64), new(error)
it := *ReadBinary(ptr, snakeReader, n, err).(*Animal)
fmt.Println(it, reflect.TypeOf(it))
}
//------------------------------------- //-------------------------------------
type Constructor func() interface{} type Constructor func() interface{}
@ -287,9 +317,9 @@ func validateComplexArray(o interface{}, t *testing.T) {
var testCases = []TestCase{} var testCases = []TestCase{}
func init() { func init() {
//testCases = append(testCases, TestCase{constructBasic, instantiateBasic, validateBasic}) testCases = append(testCases, TestCase{constructBasic, instantiateBasic, validateBasic})
//testCases = append(testCases, TestCase{constructComplex, instantiateComplex, validateComplex}) testCases = append(testCases, TestCase{constructComplex, instantiateComplex, validateComplex})
//testCases = append(testCases, TestCase{constructComplex2, instantiateComplex2, validateComplex2}) testCases = append(testCases, TestCase{constructComplex2, instantiateComplex2, validateComplex2})
testCases = append(testCases, TestCase{constructComplexArray, instantiateComplexArray, validateComplexArray}) testCases = append(testCases, TestCase{constructComplexArray, instantiateComplexArray, validateComplexArray})
} }

View File

@ -1,7 +1,7 @@
package blockchain package blockchain
import ( import (
"math/rand" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
@ -10,345 +10,367 @@ import (
) )
const ( const (
maxOutstandingRequestsPerPeer = 10 maxTries = 3
eventsChannelCapacity = 100 inputsChannelCapacity = 200
requestTimeoutSeconds = 10 requestIntervalMS = 500
maxTries = 3 maxPendingRequests = 200
requestIntervalMS = 500 maxTotalRequests = 300
requestBatchSize = 50 maxRequestsPerPeer = 300
maxPendingRequests = 50
maxTotalRequests = 100
maxPeersPerRequest = 1
) )
type BlockRequest struct { var (
Height uint requestTimeoutSeconds = time.Duration(1)
PeerId string )
}
type BlockPool struct { type BlockPool struct {
peers map[string]*bpPeer // block requests
blockInfos map[uint]*bpBlockInfo requestsMtx sync.Mutex
height uint // the lowest key in blockInfos. requests map[uint]*bpRequest
started int32 // atomic height uint // the lowest key in requests.
stopped int32 // atomic numPending int32
numPending int32 numTotal int32
numTotal int32
eventsCh chan interface{} // internal events. // peers
requestsCh chan<- BlockRequest // output of new requests to make. peersMtx sync.Mutex
timeoutsCh chan<- string // output of peers that timed out. peers map[string]*bpPeer
blocksCh chan<- *types.Block // output of ordered blocks.
repeater *RepeatTimer // for requesting more bocks. requestsCh chan<- BlockRequest
quit chan struct{} timeoutsCh chan<- string
repeater *RepeatTimer
running int32 // atomic
} }
func NewBlockPool(start uint, timeoutsCh chan<- string, requestsCh chan<- BlockRequest, blocksCh chan<- *types.Block) *BlockPool { func NewBlockPool(start uint, requestsCh chan<- BlockRequest, timeoutsCh chan<- string) *BlockPool {
return &BlockPool{ return &BlockPool{
peers: make(map[string]*bpPeer), peers: make(map[string]*bpPeer),
blockInfos: make(map[uint]*bpBlockInfo),
requests: make(map[uint]*bpRequest),
height: start, height: start,
started: 0,
stopped: 0,
numPending: 0, numPending: 0,
numTotal: 0, numTotal: 0,
quit: make(chan struct{}),
eventsCh: make(chan interface{}, eventsChannelCapacity),
requestsCh: requestsCh, requestsCh: requestsCh,
timeoutsCh: timeoutsCh, timeoutsCh: timeoutsCh,
blocksCh: blocksCh,
repeater: NewRepeatTimer("", requestIntervalMS*time.Millisecond), repeater: NewRepeatTimer("", requestIntervalMS*time.Millisecond),
running: 0,
} }
} }
func (bp *BlockPool) Start() { func (pool *BlockPool) Start() {
if atomic.CompareAndSwapInt32(&bp.started, 0, 1) { if atomic.CompareAndSwapInt32(&pool.running, 0, 1) {
log.Info("Starting BlockPool") log.Info("Starting BlockPool")
go bp.run() go pool.run()
} }
} }
func (bp *BlockPool) Stop() { func (pool *BlockPool) Stop() {
if atomic.CompareAndSwapInt32(&bp.stopped, 0, 1) { if atomic.CompareAndSwapInt32(&pool.running, 1, 0) {
log.Info("Stopping BlockPool") log.Info("Stopping BlockPool")
close(bp.quit) pool.repeater.Stop()
close(bp.eventsCh)
close(bp.requestsCh)
close(bp.timeoutsCh)
close(bp.blocksCh)
bp.repeater.Stop()
} }
} }
// AddBlock should be called when a block is received. func (pool *BlockPool) IsRunning() bool {
func (bp *BlockPool) AddBlock(block *types.Block, peerId string) { return atomic.LoadInt32(&pool.running) == 1
bp.eventsCh <- bpBlockResponse{block, peerId}
} }
func (bp *BlockPool) SetPeerStatus(peerId string, height uint) { // Run spawns requests as needed.
bp.eventsCh <- bpPeerStatus{peerId, height} func (pool *BlockPool) run() {
} RUN_LOOP:
// Runs in a goroutine and processes messages.
func (bp *BlockPool) run() {
FOR_LOOP:
for { for {
select { if atomic.LoadInt32(&pool.running) == 0 {
case msg := <-bp.eventsCh: break RUN_LOOP
bp.handleEvent(msg)
case <-bp.repeater.Ch:
bp.makeMoreBlockInfos()
bp.requestBlocksFromRandomPeers(10)
case <-bp.quit:
break FOR_LOOP
} }
} _, numPending, numTotal := pool.GetStatus()
} if numPending >= maxPendingRequests {
// sleep for a bit.
func (bp *BlockPool) handleEvent(event_ interface{}) { time.Sleep(requestIntervalMS * time.Millisecond)
switch event := event_.(type) { } else if numTotal >= maxTotalRequests {
case bpBlockResponse: // sleep for a bit.
peer := bp.peers[event.peerId] time.Sleep(requestIntervalMS * time.Millisecond)
blockInfo := bp.blockInfos[event.block.Height]
if blockInfo == nil {
// block was unwanted.
if peer != nil {
peer.bad++
}
} else { } else {
// block was wanted. // request for more blocks.
if peer != nil { height := pool.nextHeight()
peer.good++ pool.makeRequest(height)
}
delete(peer.requests, event.block.Height)
if blockInfo.block == nil {
// peer is the first to give it to us.
blockInfo.block = event.block
blockInfo.blockBy = peer.id
bp.numPending--
if event.block.Height == bp.height {
go bp.pushBlocksFromStart()
}
}
}
case bpPeerStatus: // updated or new status from peer
// request blocks if possible.
peer := bp.peers[event.peerId]
if peer == nil {
peer = bpNewPeer(event.peerId, event.height)
bp.peers[peer.id] = peer
}
bp.requestBlocksFromPeer(peer)
case bpRequestTimeout: // unconditional timeout for each peer's request.
peer := bp.peers[event.peerId]
if peer == nil {
// cleanup was already handled.
return
}
height := event.height
request := peer.requests[height]
if request == nil || request.block != nil {
// the request was fulfilled by some peer or this peer.
return
} }
}
}
// A request for peer timed out. func (pool *BlockPool) GetStatus() (uint, int32, int32) {
peer.bad++ pool.requestsMtx.Lock() // Lock
if request.tries < maxTries { defer pool.requestsMtx.Unlock()
log.Warn("Timeout: Trying again.", "tries", request.tries, "peerId", peer.id)
// try again. return pool.height, pool.numPending, pool.numTotal
select { }
case bp.requestsCh <- BlockRequest{height, peer.id}:
request.startAndTimeoutTo(bp.eventsCh) // also bumps request.tries // We need to see the second block's Validation to validate the first block.
default: // So we peek two blocks at a time.
// The request cannot be made because requestCh is full. func (pool *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block) {
// Just delete the request. pool.requestsMtx.Lock() // Lock
delete(peer.requests, height) defer pool.requestsMtx.Unlock()
}
if r := pool.requests[pool.height]; r != nil {
first = r.block
}
if r := pool.requests[pool.height+1]; r != nil {
second = r.block
}
return
}
// Pop the first block at pool.height
// It must have been validated by 'second'.Validation from PeekTwoBlocks().
func (pool *BlockPool) PopRequest() {
pool.requestsMtx.Lock() // Lock
defer pool.requestsMtx.Unlock()
if r := pool.requests[pool.height]; r == nil || r.block == nil {
panic("PopRequest() requires a valid block")
}
delete(pool.requests, pool.height)
pool.height++
pool.numTotal--
}
// Invalidates the block at pool.height.
// Remove the peer and request from others.
func (pool *BlockPool) RedoRequest(height uint) {
pool.requestsMtx.Lock() // Lock
defer pool.requestsMtx.Unlock()
request := pool.requests[height]
if request.block == nil {
panic("Expected block to be non-nil")
}
pool.RemovePeer(request.peerId) // Lock on peersMtx.
request.block = nil
request.peerId = ""
pool.numPending++
go requestRoutine(pool, height)
}
func (pool *BlockPool) hasBlock(height uint) bool {
pool.requestsMtx.Lock() // Lock
defer pool.requestsMtx.Unlock()
request := pool.requests[height]
return request != nil && request.block != nil
}
func (pool *BlockPool) setPeerForRequest(height uint, peerId string) {
pool.requestsMtx.Lock() // Lock
defer pool.requestsMtx.Unlock()
request := pool.requests[height]
if request == nil {
return
}
request.peerId = peerId
}
func (pool *BlockPool) AddBlock(block *types.Block, peerId string) {
pool.requestsMtx.Lock() // Lock
defer pool.requestsMtx.Unlock()
request := pool.requests[block.Height]
if request == nil {
return
}
if request.peerId != peerId {
return
}
if request.block != nil {
return
}
request.block = block
pool.numPending--
}
func (pool *BlockPool) getPeer(peerId string) *bpPeer {
pool.peersMtx.Lock() // Lock
defer pool.peersMtx.Unlock()
peer := pool.peers[peerId]
return peer
}
// Sets the peer's blockchain height.
func (pool *BlockPool) SetPeerHeight(peerId string, height uint) {
pool.peersMtx.Lock() // Lock
defer pool.peersMtx.Unlock()
peer := pool.peers[peerId]
if peer != nil {
peer.height = height
} else {
peer = &bpPeer{
height: height,
id: peerId,
numRequests: 0,
}
pool.peers[peerId] = peer
}
}
func (pool *BlockPool) RemovePeer(peerId string) {
pool.peersMtx.Lock() // Lock
defer pool.peersMtx.Unlock()
delete(pool.peers, peerId)
}
// Pick an available peer with at least the given minHeight.
// If no peers are available, returns nil.
func (pool *BlockPool) pickIncrAvailablePeer(minHeight uint) *bpPeer {
pool.peersMtx.Lock()
defer pool.peersMtx.Unlock()
for _, peer := range pool.peers {
if peer.numRequests >= maxRequestsPerPeer {
continue
}
if peer.height < minHeight {
continue
}
peer.numRequests++
return peer
}
return nil
}
func (pool *BlockPool) decrPeer(peerId string) {
pool.peersMtx.Lock()
defer pool.peersMtx.Unlock()
peer := pool.peers[peerId]
if peer == nil {
return
}
peer.numRequests--
}
func (pool *BlockPool) nextHeight() uint {
pool.requestsMtx.Lock() // Lock
defer pool.requestsMtx.Unlock()
return pool.height + uint(pool.numTotal)
}
func (pool *BlockPool) makeRequest(height uint) {
pool.requestsMtx.Lock() // Lock
defer pool.requestsMtx.Unlock()
request := &bpRequest{
height: height,
peerId: "",
block: nil,
}
pool.requests[height] = request
nextHeight := pool.height + uint(pool.numTotal)
if nextHeight == height {
pool.numTotal++
pool.numPending++
}
go requestRoutine(pool, height)
}
func (pool *BlockPool) sendRequest(height uint, peerId string) {
if atomic.LoadInt32(&pool.running) == 0 {
return
}
pool.requestsCh <- BlockRequest{height, peerId}
}
func (pool *BlockPool) sendTimeout(peerId string) {
if atomic.LoadInt32(&pool.running) == 0 {
return
}
pool.timeoutsCh <- peerId
}
func (pool *BlockPool) debug() string {
pool.requestsMtx.Lock() // Lock
defer pool.requestsMtx.Unlock()
str := ""
for h := pool.height; h < pool.height+uint(pool.numTotal); h++ {
if pool.requests[h] == nil {
str += Fmt("H(%v):X ", h)
} else { } else {
log.Warn("Timeout: Deleting request") str += Fmt("H(%v):", h)
// delete the request. str += Fmt("B?(%v) ", pool.requests[h].block != nil)
delete(peer.requests, height)
blockInfo := bp.blockInfos[height]
if blockInfo != nil {
delete(blockInfo.requests, peer.id)
}
select {
case bp.timeoutsCh <- peer.id:
default:
}
} }
} }
} return str
// NOTE: This function is sufficient, but we should find pending blocks
// and sample the peers in one go rather than the current O(n^2) impl.
func (bp *BlockPool) requestBlocksFromRandomPeers(maxPeers int) {
chosen := bp.pickAvailablePeers(maxPeers)
log.Debug("requestBlocksFromRandomPeers", "chosen", len(chosen))
for _, peer := range chosen {
bp.requestBlocksFromPeer(peer)
}
}
func (bp *BlockPool) requestBlocksFromPeer(peer *bpPeer) {
// If peer is available and can provide something...
for height := bp.height; peer.available(); height++ {
blockInfo := bp.blockInfos[height]
if blockInfo == nil {
// We're out of range.
return
}
needsMorePeers := blockInfo.needsMorePeers()
alreadyAskedPeer := blockInfo.requests[peer.id] != nil
if needsMorePeers && !alreadyAskedPeer {
select {
case bp.requestsCh <- BlockRequest{height, peer.id}:
// Create a new request and start the timer.
request := &bpBlockRequest{
height: height,
peer: peer,
}
blockInfo.requests[peer.id] = request
peer.requests[height] = request
request.startAndTimeoutTo(bp.eventsCh) // also bumps request.tries
default:
// The request cannot be made because requestCh is full.
// Just stop.
return
}
}
}
}
func (bp *BlockPool) makeMoreBlockInfos() {
// make more requests if necessary.
for i := 0; i < requestBatchSize; i++ {
//log.Debug("Confused?",
// "numPending", bp.numPending, "maxPendingRequests", maxPendingRequests, "numtotal", bp.numTotal, "maxTotalRequests", maxTotalRequests)
if bp.numPending < maxPendingRequests && bp.numTotal < maxTotalRequests {
// Make a request for the next block height
requestHeight := bp.height + uint(bp.numTotal)
log.Debug("New blockInfo", "height", requestHeight)
blockInfo := bpNewBlockInfo(requestHeight)
bp.blockInfos[requestHeight] = blockInfo
bp.numPending++
bp.numTotal++
} else {
break
}
}
}
func (bp *BlockPool) pickAvailablePeers(choose int) []*bpPeer {
available := []*bpPeer{}
for _, peer := range bp.peers {
if peer.available() {
available = append(available, peer)
}
}
perm := rand.Perm(MinInt(choose, len(available)))
chosen := make([]*bpPeer, len(perm))
for i, idx := range perm {
chosen[i] = available[idx]
}
return chosen
}
// blocking
func (bp *BlockPool) pushBlocksFromStart() {
for height := bp.height; ; height++ {
// push block to blocksCh.
blockInfo := bp.blockInfos[height]
if blockInfo == nil || blockInfo.block == nil {
break
}
bp.numTotal--
bp.height++
delete(bp.blockInfos, height)
bp.blocksCh <- blockInfo.block
}
}
//-----------------------------------------------------------------------------
type bpBlockInfo struct {
height uint
requests map[string]*bpBlockRequest
block *types.Block // first block received
blockBy string // peerId of source
}
func bpNewBlockInfo(height uint) *bpBlockInfo {
return &bpBlockInfo{
height: height,
requests: make(map[string]*bpBlockRequest),
}
}
func (blockInfo *bpBlockInfo) needsMorePeers() bool {
return len(blockInfo.requests) < maxPeersPerRequest
}
//-------------------------------------
type bpBlockRequest struct {
peer *bpPeer
height uint
block *types.Block
tries int
}
// bump tries++ and set timeout.
// NOTE: the timer is unconditional.
func (request *bpBlockRequest) startAndTimeoutTo(eventsCh chan<- interface{}) {
request.tries++
time.AfterFunc(requestTimeoutSeconds*time.Second, func() {
eventsCh <- bpRequestTimeout{
peerId: request.peer.id,
height: request.height,
}
})
} }
//------------------------------------- //-------------------------------------
type bpPeer struct { type bpPeer struct {
id string id string
height uint height uint
requests map[uint]*bpBlockRequest numRequests int32
// Count good/bad events from peer.
good uint
bad uint
} }
func bpNewPeer(peerId string, height uint) *bpPeer { type bpRequest struct {
return &bpPeer{ height uint
id: peerId, peerId string
height: height, block *types.Block
requests: make(map[uint]*bpBlockRequest),
}
}
func (peer *bpPeer) available() bool {
return len(peer.requests) < maxOutstandingRequestsPerPeer
} }
//------------------------------------- //-------------------------------------
// bp.eventsCh messages
type bpBlockResponse struct { // Responsible for making more requests as necessary
block *types.Block // Returns when a block is found (e.g. AddBlock() is called)
peerId string func requestRoutine(pool *BlockPool, height uint) {
for {
var peer *bpPeer = nil
PICK_LOOP:
for {
if !pool.IsRunning() {
log.Debug("BlockPool not running. Stopping requestRoutine", "height", height)
return
}
peer = pool.pickIncrAvailablePeer(height)
if peer == nil {
//log.Debug("No peers available", "height", height)
time.Sleep(requestIntervalMS * time.Millisecond)
continue PICK_LOOP
}
break PICK_LOOP
}
pool.setPeerForRequest(height, peer.id)
for try := 0; try < maxTries; try++ {
pool.sendRequest(height, peer.id)
time.Sleep(requestTimeoutSeconds * time.Second)
if pool.hasBlock(height) {
pool.decrPeer(peer.id)
return
}
bpHeight, _, _ := pool.GetStatus()
if height < bpHeight {
pool.decrPeer(peer.id)
return
}
}
pool.RemovePeer(peer.id)
pool.sendTimeout(peer.id)
}
} }
type bpPeerStatus struct { //-------------------------------------
peerId string
height uint // blockchain tip of peer
}
type bpRequestTimeout struct { type BlockRequest struct {
peerId string Height uint
height uint PeerId string
} }

View File

@ -3,6 +3,7 @@ package blockchain
import ( import (
"math/rand" "math/rand"
"testing" "testing"
"time"
. "github.com/tendermint/tendermint/common" . "github.com/tendermint/tendermint/common"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
@ -24,26 +25,34 @@ func makePeers(numPeers int, minHeight, maxHeight uint) map[string]testPeer {
} }
func TestBasic(t *testing.T) { func TestBasic(t *testing.T) {
// 100 peers anywhere at height 0 to 1000. peers := makePeers(10, 0, 1000)
peers := makePeers(100, 0, 1000)
start := uint(42) start := uint(42)
maxHeight := uint(300)
timeoutsCh := make(chan string, 100) timeoutsCh := make(chan string, 100)
requestsCh := make(chan BlockRequest, 100) requestsCh := make(chan BlockRequest, 100)
blocksCh := make(chan *types.Block, 100) pool := NewBlockPool(start, requestsCh, timeoutsCh)
pool := NewBlockPool(start, timeoutsCh, requestsCh, blocksCh)
pool.Start() pool.Start()
// Introduce each peer. // Introduce each peer.
go func() { go func() {
for _, peer := range peers { for _, peer := range peers {
pool.SetPeerStatus(peer.id, peer.height) pool.SetPeerHeight(peer.id, peer.height)
} }
}() }()
lastSeenBlock := uint(41) // Start a goroutine to pull blocks
go func() {
for {
if !pool.IsRunning() {
return
}
first, second := pool.PeekTwoBlocks()
if first != nil && second != nil {
pool.PopRequest()
} else {
time.Sleep(1 * time.Second)
}
}
}()
// Pull from channels // Pull from channels
for { for {
@ -52,21 +61,15 @@ func TestBasic(t *testing.T) {
t.Errorf("timeout: %v", peerId) t.Errorf("timeout: %v", peerId)
case request := <-requestsCh: case request := <-requestsCh:
log.Debug("TEST: Pulled new BlockRequest", "request", request) log.Debug("TEST: Pulled new BlockRequest", "request", request)
// After a while, pretend like we got a block from the peer. if request.Height == 300 {
return // Done!
}
// Request desired, pretend like we got the block immediately.
go func() { go func() {
block := &types.Block{Header: &types.Header{Height: request.Height}} block := &types.Block{Header: &types.Header{Height: request.Height}}
pool.AddBlock(block, request.PeerId) pool.AddBlock(block, request.PeerId)
log.Debug("TEST: Added block", "block", request.Height, "peer", request.PeerId) log.Debug("TEST: Added block", "block", request.Height, "peer", request.PeerId)
}() }()
case block := <-blocksCh:
log.Debug("TEST: Pulled new Block", "height", block.Height)
if block.Height != lastSeenBlock+1 {
t.Fatalf("Wrong order of blocks seen. Expected: %v Got: %v", lastSeenBlock+1, block.Height)
}
lastSeenBlock++
if block.Height == maxHeight {
return // Done!
}
} }
} }
@ -74,39 +77,52 @@ func TestBasic(t *testing.T) {
} }
func TestTimeout(t *testing.T) { func TestTimeout(t *testing.T) {
peers := makePeers(100, 0, 1000) peers := makePeers(10, 0, 1000)
start := uint(42) start := uint(42)
timeoutsCh := make(chan string, 10) timeoutsCh := make(chan string, 100)
requestsCh := make(chan BlockRequest, 10) requestsCh := make(chan BlockRequest, 100)
blocksCh := make(chan *types.Block, 100) pool := NewBlockPool(start, requestsCh, timeoutsCh)
pool := NewBlockPool(start, timeoutsCh, requestsCh, blocksCh)
pool.Start() pool.Start()
// Introduce each peer. // Introduce each peer.
go func() { go func() {
for _, peer := range peers { for _, peer := range peers {
pool.SetPeerStatus(peer.id, peer.height) pool.SetPeerHeight(peer.id, peer.height)
}
}()
// Start a goroutine to pull blocks
go func() {
for {
if !pool.IsRunning() {
return
}
first, second := pool.PeekTwoBlocks()
if first != nil && second != nil {
pool.PopRequest()
} else {
time.Sleep(1 * time.Second)
}
} }
}() }()
// Pull from channels // Pull from channels
counter := 0
timedOut := map[string]struct{}{}
for { for {
select { select {
case peerId := <-timeoutsCh: case peerId := <-timeoutsCh:
// Timed out. Done! log.Debug("Timeout", "peerId", peerId)
if peers[peerId].id != peerId { if _, ok := timedOut[peerId]; !ok {
t.Errorf("Unexpected peer from timeoutsCh") counter++
if counter == len(peers) {
return // Done!
}
} }
return case request := <-requestsCh:
case _ = <-requestsCh: log.Debug("TEST: Pulled new BlockRequest", "request", request)
// Don't do anything, let it time out.
case _ = <-blocksCh:
t.Errorf("Got block when none expected")
return
} }
} }
pool.Stop() pool.Stop()
} }

304
blockchain/reactor.go Normal file
View File

@ -0,0 +1,304 @@
package blockchain
import (
"bytes"
"errors"
"fmt"
"sync/atomic"
"time"
"github.com/tendermint/tendermint/binary"
. "github.com/tendermint/tendermint/common"
"github.com/tendermint/tendermint/p2p"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
const (
BlockchainChannel = byte(0x40)
defaultChannelCapacity = 100
defaultSleepIntervalMS = 500
trySyncIntervalMS = 100
// stop syncing when last block's time is
// within this much of the system time.
stopSyncingDurationMinutes = 10
)
type stateResetter interface {
ResetToState(*sm.State)
}
// BlockchainReactor handles long-term catchup syncing.
type BlockchainReactor struct {
sw *p2p.Switch
state *sm.State
store *BlockStore
pool *BlockPool
sync bool
requestsCh chan BlockRequest
timeoutsCh chan string
lastBlock *types.Block
quit chan struct{}
running uint32
}
func NewBlockchainReactor(state *sm.State, store *BlockStore, sync bool) *BlockchainReactor {
if state.LastBlockHeight != store.Height() {
panic(Fmt("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height()))
}
requestsCh := make(chan BlockRequest, defaultChannelCapacity)
timeoutsCh := make(chan string, defaultChannelCapacity)
pool := NewBlockPool(
store.Height()+1,
requestsCh,
timeoutsCh,
)
bcR := &BlockchainReactor{
state: state,
store: store,
pool: pool,
sync: sync,
requestsCh: requestsCh,
timeoutsCh: timeoutsCh,
quit: make(chan struct{}),
running: uint32(0),
}
return bcR
}
// Implements Reactor
func (bcR *BlockchainReactor) Start(sw *p2p.Switch) {
if atomic.CompareAndSwapUint32(&bcR.running, 0, 1) {
log.Info("Starting BlockchainReactor")
bcR.sw = sw
bcR.pool.Start()
if bcR.sync {
go bcR.poolRoutine()
}
}
}
// Implements Reactor
func (bcR *BlockchainReactor) Stop() {
if atomic.CompareAndSwapUint32(&bcR.running, 1, 0) {
log.Info("Stopping BlockchainReactor")
close(bcR.quit)
bcR.pool.Stop()
}
}
// Implements Reactor
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{
&p2p.ChannelDescriptor{
Id: BlockchainChannel,
Priority: 5,
SendQueueCapacity: 100,
},
}
}
// Implements Reactor
func (bcR *BlockchainReactor) AddPeer(peer *p2p.Peer) {
// Send peer our state.
peer.Send(BlockchainChannel, bcPeerStatusMessage{bcR.store.Height()})
}
// Implements Reactor
func (bcR *BlockchainReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
// Remove peer from the pool.
bcR.pool.RemovePeer(peer.Key)
}
// Implements Reactor
func (bcR *BlockchainReactor) Receive(chId byte, src *p2p.Peer, msgBytes []byte) {
_, msg_, err := DecodeMessage(msgBytes)
if err != nil {
log.Warn("Error decoding message", "error", err)
return
}
log.Info("Received message", "msg", msg_)
switch msg := msg_.(type) {
case bcBlockRequestMessage:
// Got a request for a block. Respond with block if we have it.
block := bcR.store.LoadBlock(msg.Height)
if block != nil {
msg := bcBlockResponseMessage{Block: block}
queued := src.TrySend(BlockchainChannel, msg)
if !queued {
// queue is full, just ignore.
}
} else {
// TODO peer is asking for things we don't have.
}
case bcBlockResponseMessage:
// Got a block.
bcR.pool.AddBlock(msg.Block, src.Key)
case bcPeerStatusMessage:
// Got a peer status.
bcR.pool.SetPeerHeight(src.Key, msg.Height)
default:
// Ignore unknown message
}
}
// Handle messages from the poolReactor telling the reactor what to do.
func (bcR *BlockchainReactor) poolRoutine() {
trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
FOR_LOOP:
for {
select {
case request := <-bcR.requestsCh: // chan BlockRequest
peer := bcR.sw.Peers().Get(request.PeerId)
if peer == nil {
// We can't fulfill the request.
continue FOR_LOOP
}
msg := bcBlockRequestMessage{request.Height}
queued := peer.TrySend(BlockchainChannel, msg)
if !queued {
// We couldn't queue the request.
time.Sleep(defaultSleepIntervalMS * time.Millisecond)
continue FOR_LOOP
}
case peerId := <-bcR.timeoutsCh: // chan string
// Peer timed out.
peer := bcR.sw.Peers().Get(peerId)
if peer != nil {
bcR.sw.StopPeerForError(peer, errors.New("BlockchainReactor Timeout"))
}
case _ = <-trySyncTicker.C: // chan time
//var lastValidatedBlock *types.Block
SYNC_LOOP:
for i := 0; i < 10; i++ {
// See if there are any blocks to sync.
first, second := bcR.pool.PeekTwoBlocks()
//log.Debug("TrySync peeked", "first", first, "second", second)
if first == nil || second == nil {
// We need both to sync the first block.
break SYNC_LOOP
}
firstParts := first.MakePartSet()
firstPartsHeader := firstParts.Header()
// Finally, verify the first block using the second's validation.
err := bcR.state.BondedValidators.VerifyValidation(
first.Hash(), firstPartsHeader, first.Height, second.Validation)
if err != nil {
log.Debug("error in validation", "error", err)
bcR.pool.RedoRequest(first.Height)
break SYNC_LOOP
} else {
bcR.pool.PopRequest()
err := sm.ExecBlock(bcR.state, first, firstPartsHeader)
if err != nil {
// TODO This is bad, are we zombie?
panic(Fmt("Failed to process committed block: %v", err))
}
bcR.store.SaveBlock(first, firstParts, second.Validation)
bcR.state.Save()
//lastValidatedBlock = first
}
}
/*
// We're done syncing for now (will do again shortly)
// See if we want to stop syncing and turn on the
// consensus reactor.
// TODO: use other heuristics too besides blocktime.
// It's not a security concern, as it only needs to happen
// upon node sync, and there's also a second (slower)
// method of syncing in the consensus reactor.
if lastValidatedBlock != nil && time.Now().Sub(lastValidatedBlock.Time) < stopSyncingDurationMinutes*time.Minute {
go func() {
log.Info("Stopping blockpool syncing, turning on consensus...")
trySyncTicker.Stop() // Just stop the block requests. Still serve blocks to others.
conR := bcR.sw.Reactor("CONSENSUS")
conR.(stateResetter).ResetToState(bcR.state)
conR.Start(bcR.sw)
for _, peer := range bcR.sw.Peers().List() {
conR.AddPeer(peer)
}
}()
break FOR_LOOP
}
*/
continue FOR_LOOP
case <-bcR.quit:
break FOR_LOOP
}
}
}
func (bcR *BlockchainReactor) BroadcastStatus() error {
bcR.sw.Broadcast(BlockchainChannel, bcPeerStatusMessage{bcR.store.Height()})
return nil
}
//-----------------------------------------------------------------------------
// Messages
const (
msgTypeUnknown = byte(0x00)
msgTypeBlockRequest = byte(0x10)
msgTypeBlockResponse = byte(0x11)
msgTypePeerStatus = byte(0x20)
)
// TODO: check for unnecessary extra bytes at the end.
func DecodeMessage(bz []byte) (msgType byte, msg interface{}, err error) {
n := new(int64)
msgType = bz[0]
r := bytes.NewReader(bz)
switch msgType {
case msgTypeBlockRequest:
msg = binary.ReadBinary(bcBlockRequestMessage{}, r, n, &err)
case msgTypeBlockResponse:
msg = binary.ReadBinary(bcBlockResponseMessage{}, r, n, &err)
case msgTypePeerStatus:
msg = binary.ReadBinary(bcPeerStatusMessage{}, r, n, &err)
default:
msg = nil
}
return
}
//-------------------------------------
type bcBlockRequestMessage struct {
Height uint
}
func (m bcBlockRequestMessage) TypeByte() byte { return msgTypeBlockRequest }
func (m bcBlockRequestMessage) String() string {
return fmt.Sprintf("[bcBlockRequestMessage %v]", m.Height)
}
//-------------------------------------
type bcBlockResponseMessage struct {
Block *types.Block
}
func (m bcBlockResponseMessage) TypeByte() byte { return msgTypeBlockResponse }
func (m bcBlockResponseMessage) String() string {
return fmt.Sprintf("[bcBlockResponseMessage %v]", m.Block.Height)
}
//-------------------------------------
type bcPeerStatusMessage struct {
Height uint
}
func (m bcPeerStatusMessage) TypeByte() byte { return msgTypePeerStatus }
func (m bcPeerStatusMessage) String() string {
return fmt.Sprintf("[bcPeerStatusMessage %v]", m.Height)
}

View File

@ -57,7 +57,7 @@ func (bs *BlockStore) LoadBlock(height uint) *types.Block {
if r == nil { if r == nil {
panic(Fmt("Block does not exist at height %v", height)) panic(Fmt("Block does not exist at height %v", height))
} }
meta := binary.ReadBinary(&BlockMeta{}, r, &n, &err).(*BlockMeta) meta := binary.ReadBinary(&types.BlockMeta{}, r, &n, &err).(*types.BlockMeta)
if err != nil { if err != nil {
panic(Fmt("Error reading block meta: %v", err)) panic(Fmt("Error reading block meta: %v", err))
} }
@ -87,14 +87,14 @@ func (bs *BlockStore) LoadBlockPart(height uint, index uint) *types.Part {
return part return part
} }
func (bs *BlockStore) LoadBlockMeta(height uint) *BlockMeta { func (bs *BlockStore) LoadBlockMeta(height uint) *types.BlockMeta {
var n int64 var n int64
var err error var err error
r := bs.GetReader(calcBlockMetaKey(height)) r := bs.GetReader(calcBlockMetaKey(height))
if r == nil { if r == nil {
panic(Fmt("BlockMeta does not exist for height %v", height)) panic(Fmt("BlockMeta does not exist for height %v", height))
} }
meta := binary.ReadBinary(&BlockMeta{}, r, &n, &err).(*BlockMeta) meta := binary.ReadBinary(&types.BlockMeta{}, r, &n, &err).(*types.BlockMeta)
if err != nil { if err != nil {
panic(Fmt("Error reading block meta: %v", err)) panic(Fmt("Error reading block meta: %v", err))
} }
@ -150,7 +150,7 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
} }
// Save block meta // Save block meta
meta := makeBlockMeta(block, blockParts) meta := types.NewBlockMeta(block, blockParts)
metaBytes := binary.BinaryBytes(meta) metaBytes := binary.BinaryBytes(meta)
bs.db.Set(calcBlockMetaKey(height), metaBytes) bs.db.Set(calcBlockMetaKey(height), metaBytes)
@ -184,22 +184,6 @@ func (bs *BlockStore) saveBlockPart(height uint, index uint, part *types.Part) {
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
type BlockMeta struct {
Hash []byte // The block hash
Header *types.Header // The block's Header
Parts types.PartSetHeader // The PartSetHeader, for transfer
}
func makeBlockMeta(block *types.Block, blockParts *types.PartSet) *BlockMeta {
return &BlockMeta{
Hash: block.Hash(),
Header: block.Header,
Parts: blockParts.Header(),
}
}
//-----------------------------------------------------------------------------
func calcBlockMetaKey(height uint) []byte { func calcBlockMetaKey(height uint) []byte {
return []byte(fmt.Sprintf("H:%v", height)) return []byte(fmt.Sprintf("H:%v", height))
} }

View File

@ -1,6 +1,7 @@
package common package common
import ( import (
"encoding/binary"
"sort" "sort"
) )
@ -18,3 +19,13 @@ func SearchUint64s(a []uint64, x uint64) int {
} }
func (p Uint64Slice) Search(x uint64) int { return SearchUint64s(p, x) } func (p Uint64Slice) Search(x uint64) int { return SearchUint64s(p, x) }
//-----------------------------------------------------------------------------
func PutUint64(dest []byte, i uint64) {
binary.LittleEndian.PutUint64(dest, i)
}
func GetUint64(src []byte) uint64 {
return binary.LittleEndian.Uint64(src)
}

View File

@ -1,44 +1,65 @@
package common package common
import "time" import "time"
import "sync"
/* /*
RepeatTimer repeatedly sends a struct{}{} to .Ch after each "dur" period. RepeatTimer repeatedly sends a struct{}{} to .Ch after each "dur" period.
It's good for keeping connections alive. It's good for keeping connections alive.
*/ */
type RepeatTimer struct { type RepeatTimer struct {
Name string Ch chan time.Time
Ch chan struct{}
quit chan struct{} mtx sync.Mutex
dur time.Duration name string
timer *time.Timer ticker *time.Ticker
quit chan struct{}
dur time.Duration
} }
func NewRepeatTimer(name string, dur time.Duration) *RepeatTimer { func NewRepeatTimer(name string, dur time.Duration) *RepeatTimer {
var ch = make(chan struct{}) var t = &RepeatTimer{
var quit = make(chan struct{}) Ch: make(chan time.Time),
var t = &RepeatTimer{Name: name, Ch: ch, dur: dur, quit: quit} ticker: time.NewTicker(dur),
t.timer = time.AfterFunc(dur, t.fireRoutine) quit: make(chan struct{}),
name: name,
dur: dur,
}
go t.fireRoutine(t.ticker)
return t return t
} }
func (t *RepeatTimer) fireRoutine() { func (t *RepeatTimer) fireRoutine(ticker *time.Ticker) {
select { for {
case t.Ch <- struct{}{}: select {
t.timer.Reset(t.dur) case t_ := <-ticker.C:
case <-t.quit: t.Ch <- t_
// do nothing case <-t.quit:
default: return
t.timer.Reset(t.dur) }
} }
} }
// Wait the duration again before firing. // Wait the duration again before firing.
func (t *RepeatTimer) Reset() { func (t *RepeatTimer) Reset() {
t.timer.Reset(t.dur) t.mtx.Lock() // Lock
defer t.mtx.Unlock()
if t.ticker != nil {
t.ticker.Stop()
}
t.ticker = time.NewTicker(t.dur)
go t.fireRoutine(t.ticker)
} }
func (t *RepeatTimer) Stop() bool { func (t *RepeatTimer) Stop() bool {
close(t.quit) t.mtx.Lock() // Lock
return t.timer.Stop() defer t.mtx.Unlock()
exists := t.ticker != nil
if exists {
t.ticker.Stop()
t.ticker = nil
}
return exists
} }

78
common/word.go Normal file
View File

@ -0,0 +1,78 @@
package common
import (
"bytes"
"encoding/binary"
"sort"
)
var (
Zero256 = Word256{0}
One256 = Word256{1}
)
type Word256 [32]byte
func (w Word256) String() string { return string(w[:]) }
func (w Word256) Copy() Word256 { return w }
func (w Word256) Bytes() []byte { return w[:] } // copied.
func (w Word256) Prefix(n int) []byte { return w[:n] }
func (w Word256) IsZero() bool {
accum := byte(0)
for _, byt := range w {
accum |= byt
}
return accum == 0
}
func (w Word256) Compare(other Word256) int {
return bytes.Compare(w[:], other[:])
}
func Uint64ToWord256(i uint64) Word256 {
word := Word256{}
PutUint64(word[:], i)
return word
}
func RightPadWord256(bz []byte) (word Word256) {
copy(word[:], bz)
return
}
func LeftPadWord256(bz []byte) (word Word256) {
copy(word[32-len(bz):], bz)
return
}
func Uint64FromWord256(word Word256) uint64 {
return binary.LittleEndian.Uint64(word[:])
}
//-------------------------------------
type Tuple256 struct {
First Word256
Second Word256
}
func (tuple Tuple256) Compare(other Tuple256) int {
firstCompare := tuple.First.Compare(other.First)
if firstCompare == 0 {
return tuple.Second.Compare(other.Second)
} else {
return firstCompare
}
}
func Tuple256Split(t Tuple256) (Word256, Word256) {
return t.First, t.Second
}
type Tuple256Slice []Tuple256
func (p Tuple256Slice) Len() int { return len(p) }
func (p Tuple256Slice) Less(i, j int) bool {
return p[i].Compare(p[j]) < 0
}
func (p Tuple256Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p Tuple256Slice) Sort() { sort.Sort(p) }

View File

@ -104,6 +104,8 @@ func initDefaults(rootDir string) {
app.SetDefault("GenesisFile", rootDir+"/genesis.json") app.SetDefault("GenesisFile", rootDir+"/genesis.json")
app.SetDefault("AddrBookFile", rootDir+"/addrbook.json") app.SetDefault("AddrBookFile", rootDir+"/addrbook.json")
app.SetDefault("PrivValidatorfile", rootDir+"/priv_validator.json") app.SetDefault("PrivValidatorfile", rootDir+"/priv_validator.json")
app.SetDefault("FastSync", false)
} }
func Init(rootDir string) { func Init(rootDir string) {
@ -161,6 +163,7 @@ func ParseFlags(args []string) {
flags.BoolVar(&printHelp, "help", false, "Print this help message.") flags.BoolVar(&printHelp, "help", false, "Print this help message.")
flags.String("listen_addr", app.GetString("ListenAddr"), "Listen address. (0.0.0.0:0 means any interface, any port)") flags.String("listen_addr", app.GetString("ListenAddr"), "Listen address. (0.0.0.0:0 means any interface, any port)")
flags.String("seed_node", app.GetString("SeedNode"), "Address of seed node") flags.String("seed_node", app.GetString("SeedNode"), "Address of seed node")
flags.Bool("fast_sync", app.GetBool("FastSync"), "Fast blockchain syncing")
flags.String("rpc_http_listen_addr", app.GetString("RPC.HTTP.ListenAddr"), "RPC listen address. Port required") flags.String("rpc_http_listen_addr", app.GetString("RPC.HTTP.ListenAddr"), "RPC listen address. Port required")
flags.Parse(args) flags.Parse(args)
if printHelp { if printHelp {
@ -171,6 +174,7 @@ func ParseFlags(args []string) {
// Merge parsed flag values onto app. // Merge parsed flag values onto app.
app.BindPFlag("ListenAddr", flags.Lookup("listen_addr")) app.BindPFlag("ListenAddr", flags.Lookup("listen_addr"))
app.BindPFlag("SeedNode", flags.Lookup("seed_node")) app.BindPFlag("SeedNode", flags.Lookup("seed_node"))
app.BindPFlag("FastSync", flags.Lookup("fast_sync"))
app.BindPFlag("RPC.HTTP.ListenAddr", flags.Lookup("rpc_http_listen_addr")) app.BindPFlag("RPC.HTTP.ListenAddr", flags.Lookup("rpc_http_listen_addr"))
// Confused? // Confused?

View File

@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"github.com/tendermint/tendermint/account" "github.com/tendermint/tendermint/account"
"github.com/tendermint/tendermint/binary"
. "github.com/tendermint/tendermint/common" . "github.com/tendermint/tendermint/common"
sm "github.com/tendermint/tendermint/state" sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
@ -94,3 +95,7 @@ func (pol *POL) StringShort() string {
Fingerprint(pol.BlockHash), pol.BlockParts) Fingerprint(pol.BlockHash), pol.BlockParts)
} }
} }
func (pol *POL) MakePartSet() *types.PartSet {
return types.NewPartSetFromData(binary.BinaryBytes(pol))
}

View File

@ -9,6 +9,7 @@ import (
"time" "time"
"github.com/tendermint/tendermint/binary" "github.com/tendermint/tendermint/binary"
bc "github.com/tendermint/tendermint/blockchain"
. "github.com/tendermint/tendermint/common" . "github.com/tendermint/tendermint/common"
. "github.com/tendermint/tendermint/consensus/types" . "github.com/tendermint/tendermint/consensus/types"
"github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p"
@ -17,9 +18,9 @@ import (
) )
const ( const (
StateCh = byte(0x20) StateChannel = byte(0x20)
DataCh = byte(0x21) DataChannel = byte(0x21)
VoteCh = byte(0x22) VoteChannel = byte(0x22)
peerStateKey = "ConsensusReactor.peerState" peerStateKey = "ConsensusReactor.peerState"
@ -28,17 +29,18 @@ const (
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// The reactor's underlying ConsensusState may change state at any time.
// We atomically copy the RoundState struct before using it.
type ConsensusReactor struct { type ConsensusReactor struct {
sw *p2p.Switch sw *p2p.Switch
started uint32 running uint32
stopped uint32
quit chan struct{} quit chan struct{}
blockStore *types.BlockStore blockStore *bc.BlockStore
conS *ConsensusState conS *ConsensusState
} }
func NewConsensusReactor(consensusState *ConsensusState, blockStore *types.BlockStore) *ConsensusReactor { func NewConsensusReactor(consensusState *ConsensusState, blockStore *bc.BlockStore) *ConsensusReactor {
conR := &ConsensusReactor{ conR := &ConsensusReactor{
blockStore: blockStore, blockStore: blockStore,
quit: make(chan struct{}), quit: make(chan struct{}),
@ -49,7 +51,7 @@ func NewConsensusReactor(consensusState *ConsensusState, blockStore *types.Block
// Implements Reactor // Implements Reactor
func (conR *ConsensusReactor) Start(sw *p2p.Switch) { func (conR *ConsensusReactor) Start(sw *p2p.Switch) {
if atomic.CompareAndSwapUint32(&conR.started, 0, 1) { if atomic.CompareAndSwapUint32(&conR.running, 0, 1) {
log.Info("Starting ConsensusReactor") log.Info("Starting ConsensusReactor")
conR.sw = sw conR.sw = sw
conR.conS.Start() conR.conS.Start()
@ -59,15 +61,15 @@ func (conR *ConsensusReactor) Start(sw *p2p.Switch) {
// Implements Reactor // Implements Reactor
func (conR *ConsensusReactor) Stop() { func (conR *ConsensusReactor) Stop() {
if atomic.CompareAndSwapUint32(&conR.stopped, 0, 1) { if atomic.CompareAndSwapUint32(&conR.running, 1, 0) {
log.Info("Stopping ConsensusReactor") log.Info("Stopping ConsensusReactor")
conR.conS.Stop() conR.conS.Stop()
close(conR.quit) close(conR.quit)
} }
} }
func (conR *ConsensusReactor) IsStopped() bool { func (conR *ConsensusReactor) IsRunning() bool {
return atomic.LoadUint32(&conR.stopped) == 1 return atomic.LoadUint32(&conR.running) == 1
} }
// Implements Reactor // Implements Reactor
@ -75,15 +77,15 @@ func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor {
// TODO optimize // TODO optimize
return []*p2p.ChannelDescriptor{ return []*p2p.ChannelDescriptor{
&p2p.ChannelDescriptor{ &p2p.ChannelDescriptor{
Id: StateCh, Id: StateChannel,
Priority: 5, Priority: 5,
}, },
&p2p.ChannelDescriptor{ &p2p.ChannelDescriptor{
Id: DataCh, Id: DataChannel,
Priority: 5, Priority: 5,
}, },
&p2p.ChannelDescriptor{ &p2p.ChannelDescriptor{
Id: VoteCh, Id: VoteChannel,
Priority: 5, Priority: 5,
}, },
} }
@ -91,6 +93,10 @@ func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor {
// Implements Reactor // Implements Reactor
func (conR *ConsensusReactor) AddPeer(peer *p2p.Peer) { func (conR *ConsensusReactor) AddPeer(peer *p2p.Peer) {
if !conR.IsRunning() {
return
}
// Create peerState for peer // Create peerState for peer
peerState := NewPeerState(peer) peerState := NewPeerState(peer)
peer.Data.Set(peerStateKey, peerState) peer.Data.Set(peerStateKey, peerState)
@ -105,11 +111,18 @@ func (conR *ConsensusReactor) AddPeer(peer *p2p.Peer) {
// Implements Reactor // Implements Reactor
func (conR *ConsensusReactor) RemovePeer(peer *p2p.Peer, reason interface{}) { func (conR *ConsensusReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
if !conR.IsRunning() {
return
}
//peer.Data.Get(peerStateKey).(*PeerState).Disconnect() //peer.Data.Get(peerStateKey).(*PeerState).Disconnect()
} }
// Implements Reactor // Implements Reactor
func (conR *ConsensusReactor) Receive(chId byte, peer *p2p.Peer, msgBytes []byte) { func (conR *ConsensusReactor) Receive(chId byte, peer *p2p.Peer, msgBytes []byte) {
if !conR.IsRunning() {
return
}
// Get round state // Get round state
rs := conR.conS.GetRoundState() rs := conR.conS.GetRoundState()
@ -122,7 +135,7 @@ func (conR *ConsensusReactor) Receive(chId byte, peer *p2p.Peer, msgBytes []byte
log.Debug("Receive", "channel", chId, "peer", peer, "msg", msg_, "bytes", msgBytes) log.Debug("Receive", "channel", chId, "peer", peer, "msg", msg_, "bytes", msgBytes)
switch chId { switch chId {
case StateCh: case StateChannel:
switch msg := msg_.(type) { switch msg := msg_.(type) {
case *NewRoundStepMessage: case *NewRoundStepMessage:
ps.ApplyNewRoundStepMessage(msg, rs) ps.ApplyNewRoundStepMessage(msg, rs)
@ -134,7 +147,7 @@ func (conR *ConsensusReactor) Receive(chId byte, peer *p2p.Peer, msgBytes []byte
// Ignore unknown message // Ignore unknown message
} }
case DataCh: case DataChannel:
switch msg := msg_.(type) { switch msg := msg_.(type) {
case *Proposal: case *Proposal:
ps.SetHasProposal(msg) ps.SetHasProposal(msg)
@ -155,7 +168,7 @@ func (conR *ConsensusReactor) Receive(chId byte, peer *p2p.Peer, msgBytes []byte
// Ignore unknown message // Ignore unknown message
} }
case VoteCh: case VoteChannel:
switch msg := msg_.(type) { switch msg := msg_.(type) {
case *VoteMessage: case *VoteMessage:
vote := msg.Vote vote := msg.Vote
@ -192,7 +205,7 @@ func (conR *ConsensusReactor) Receive(chId byte, peer *p2p.Peer, msgBytes []byte
Type: vote.Type, Type: vote.Type,
Index: index, Index: index,
} }
conR.sw.Broadcast(StateCh, msg) conR.sw.Broadcast(StateChannel, msg)
} }
default: default:
@ -212,6 +225,11 @@ func (conR *ConsensusReactor) SetPrivValidator(priv *sm.PrivValidator) {
conR.conS.SetPrivValidator(priv) conR.conS.SetPrivValidator(priv)
} }
// Reset to some state.
func (conR *ConsensusReactor) ResetToState(state *sm.State) {
conR.conS.updateToState(state, false)
}
//-------------------------------------- //--------------------------------------
func makeRoundStepMessages(rs *RoundState) (nrsMsg *NewRoundStepMessage, csMsg *CommitStepMessage) { func makeRoundStepMessages(rs *RoundState) (nrsMsg *NewRoundStepMessage, csMsg *CommitStepMessage) {
@ -252,10 +270,10 @@ func (conR *ConsensusReactor) broadcastNewRoundStepRoutine() {
nrsMsg, csMsg := makeRoundStepMessages(rs) nrsMsg, csMsg := makeRoundStepMessages(rs)
if nrsMsg != nil { if nrsMsg != nil {
conR.sw.Broadcast(StateCh, nrsMsg) conR.sw.Broadcast(StateChannel, nrsMsg)
} }
if csMsg != nil { if csMsg != nil {
conR.sw.Broadcast(StateCh, csMsg) conR.sw.Broadcast(StateChannel, csMsg)
} }
} }
} }
@ -264,10 +282,10 @@ func (conR *ConsensusReactor) sendNewRoundStepRoutine(peer *p2p.Peer) {
rs := conR.conS.GetRoundState() rs := conR.conS.GetRoundState()
nrsMsg, csMsg := makeRoundStepMessages(rs) nrsMsg, csMsg := makeRoundStepMessages(rs)
if nrsMsg != nil { if nrsMsg != nil {
peer.Send(StateCh, nrsMsg) peer.Send(StateChannel, nrsMsg)
} }
if csMsg != nil { if csMsg != nil {
peer.Send(StateCh, nrsMsg) peer.Send(StateChannel, nrsMsg)
} }
} }
@ -276,7 +294,7 @@ func (conR *ConsensusReactor) gossipDataRoutine(peer *p2p.Peer, ps *PeerState) {
OUTER_LOOP: OUTER_LOOP:
for { for {
// Manage disconnects from self or peer. // Manage disconnects from self or peer.
if peer.IsStopped() || conR.IsStopped() { if !peer.IsRunning() || !conR.IsRunning() {
log.Info(Fmt("Stopping gossipDataRoutine for %v.", peer)) log.Info(Fmt("Stopping gossipDataRoutine for %v.", peer))
return return
} }
@ -296,7 +314,7 @@ OUTER_LOOP:
Type: partTypeProposalBlock, Type: partTypeProposalBlock,
Part: part, Part: part,
} }
peer.Send(DataCh, msg) peer.Send(DataChannel, msg)
ps.SetHasProposalBlockPart(rs.Height, rs.Round, index) ps.SetHasProposalBlockPart(rs.Height, rs.Round, index)
continue OUTER_LOOP continue OUTER_LOOP
} }
@ -306,7 +324,7 @@ OUTER_LOOP:
if 0 < prs.Height && prs.Height < rs.Height { if 0 < prs.Height && prs.Height < rs.Height {
//log.Debug("Data catchup", "height", rs.Height, "peerHeight", prs.Height, "peerProposalBlockBitArray", prs.ProposalBlockBitArray) //log.Debug("Data catchup", "height", rs.Height, "peerHeight", prs.Height, "peerProposalBlockBitArray", prs.ProposalBlockBitArray)
if index, ok := prs.ProposalBlockBitArray.Not().PickRandom(); ok { if index, ok := prs.ProposalBlockBitArray.Not().PickRandom(); ok {
// Ensure that the peer's PartSetHeaeder is correct // Ensure that the peer's PartSetHeader is correct
blockMeta := conR.blockStore.LoadBlockMeta(prs.Height) blockMeta := conR.blockStore.LoadBlockMeta(prs.Height)
if !blockMeta.Parts.Equals(prs.ProposalBlockParts) { if !blockMeta.Parts.Equals(prs.ProposalBlockParts) {
log.Debug("Peer ProposalBlockParts mismatch, sleeping", log.Debug("Peer ProposalBlockParts mismatch, sleeping",
@ -329,7 +347,7 @@ OUTER_LOOP:
Type: partTypeProposalBlock, Type: partTypeProposalBlock,
Part: part, Part: part,
} }
peer.Send(DataCh, msg) peer.Send(DataChannel, msg)
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
continue OUTER_LOOP continue OUTER_LOOP
} else { } else {
@ -349,7 +367,7 @@ OUTER_LOOP:
// Send proposal? // Send proposal?
if rs.Proposal != nil && !prs.Proposal { if rs.Proposal != nil && !prs.Proposal {
msg := p2p.TypedMessage{msgTypeProposal, rs.Proposal} msg := p2p.TypedMessage{msgTypeProposal, rs.Proposal}
peer.Send(DataCh, msg) peer.Send(DataChannel, msg)
ps.SetHasProposal(rs.Proposal) ps.SetHasProposal(rs.Proposal)
continue OUTER_LOOP continue OUTER_LOOP
} }
@ -363,7 +381,7 @@ OUTER_LOOP:
Type: partTypeProposalPOL, Type: partTypeProposalPOL,
Part: rs.ProposalPOLParts.GetPart(index), Part: rs.ProposalPOLParts.GetPart(index),
} }
peer.Send(DataCh, msg) peer.Send(DataChannel, msg)
ps.SetHasProposalPOLPart(rs.Height, rs.Round, index) ps.SetHasProposalPOLPart(rs.Height, rs.Round, index)
continue OUTER_LOOP continue OUTER_LOOP
} }
@ -379,7 +397,7 @@ func (conR *ConsensusReactor) gossipVotesRoutine(peer *p2p.Peer, ps *PeerState)
OUTER_LOOP: OUTER_LOOP:
for { for {
// Manage disconnects from self or peer. // Manage disconnects from self or peer.
if peer.IsStopped() || conR.IsStopped() { if !peer.IsRunning() || !conR.IsRunning() {
log.Info(Fmt("Stopping gossipVotesRoutine for %v.", peer)) log.Info(Fmt("Stopping gossipVotesRoutine for %v.", peer))
return return
} }
@ -397,7 +415,7 @@ OUTER_LOOP:
vote := voteSet.GetByIndex(index) vote := voteSet.GetByIndex(index)
// NOTE: vote may be a commit. // NOTE: vote may be a commit.
msg := &VoteMessage{index, vote} msg := &VoteMessage{index, vote}
peer.Send(VoteCh, msg) peer.Send(VoteChannel, msg)
ps.SetHasVote(vote, index) ps.SetHasVote(vote, index)
return true return true
} }
@ -421,7 +439,7 @@ OUTER_LOOP:
Signature: commit.Signature, Signature: commit.Signature,
} }
msg := &VoteMessage{index, vote} msg := &VoteMessage{index, vote}
peer.Send(VoteCh, msg) peer.Send(VoteChannel, msg)
ps.SetHasVote(vote, index) ps.SetHasVote(vote, index)
return true return true
} }

View File

@ -62,6 +62,7 @@ import (
"github.com/tendermint/tendermint/account" "github.com/tendermint/tendermint/account"
"github.com/tendermint/tendermint/binary" "github.com/tendermint/tendermint/binary"
bc "github.com/tendermint/tendermint/blockchain"
. "github.com/tendermint/tendermint/common" . "github.com/tendermint/tendermint/common"
"github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/config"
. "github.com/tendermint/tendermint/consensus/types" . "github.com/tendermint/tendermint/consensus/types"
@ -234,7 +235,7 @@ type ConsensusState struct {
stopped uint32 stopped uint32
quit chan struct{} quit chan struct{}
blockStore *types.BlockStore blockStore *bc.BlockStore
mempoolReactor *mempl.MempoolReactor mempoolReactor *mempl.MempoolReactor
runActionCh chan RoundAction runActionCh chan RoundAction
newStepCh chan *RoundState newStepCh chan *RoundState
@ -247,7 +248,7 @@ type ConsensusState struct {
lastCommitVoteHeight uint // Last called commitVoteBlock() or saveCommitVoteBlock() on. lastCommitVoteHeight uint // Last called commitVoteBlock() or saveCommitVoteBlock() on.
} }
func NewConsensusState(state *sm.State, blockStore *types.BlockStore, mempoolReactor *mempl.MempoolReactor) *ConsensusState { func NewConsensusState(state *sm.State, blockStore *bc.BlockStore, mempoolReactor *mempl.MempoolReactor) *ConsensusState {
cs := &ConsensusState{ cs := &ConsensusState{
quit: make(chan struct{}), quit: make(chan struct{}),
blockStore: blockStore, blockStore: blockStore,
@ -255,7 +256,7 @@ func NewConsensusState(state *sm.State, blockStore *types.BlockStore, mempoolRea
runActionCh: make(chan RoundAction, 1), runActionCh: make(chan RoundAction, 1),
newStepCh: make(chan *RoundState, 1), newStepCh: make(chan *RoundState, 1),
} }
cs.updateToState(state) cs.updateToState(state, true)
return cs return cs
} }
@ -456,9 +457,9 @@ ACTION_LOOP:
// If calculated round is greater than 0 (based on BlockTime or calculated StartTime) // If calculated round is greater than 0 (based on BlockTime or calculated StartTime)
// then also sets up the appropriate round, and cs.Step becomes RoundStepNewRound. // then also sets up the appropriate round, and cs.Step becomes RoundStepNewRound.
// Otherwise the round is 0 and cs.Step becomes RoundStepNewHeight. // Otherwise the round is 0 and cs.Step becomes RoundStepNewHeight.
func (cs *ConsensusState) updateToState(state *sm.State) { func (cs *ConsensusState) updateToState(state *sm.State, contiguous bool) {
// Sanity check state. // Sanity check state.
if cs.Height > 0 && cs.Height != state.LastBlockHeight { if contiguous && cs.Height > 0 && cs.Height != state.LastBlockHeight {
panic(Fmt("updateToState() expected state height of %v but found %v", panic(Fmt("updateToState() expected state height of %v but found %v",
cs.Height, state.LastBlockHeight)) cs.Height, state.LastBlockHeight))
} }
@ -466,6 +467,8 @@ func (cs *ConsensusState) updateToState(state *sm.State) {
// Reset fields based on state. // Reset fields based on state.
validators := state.BondedValidators validators := state.BondedValidators
height := state.LastBlockHeight + 1 // next desired block height height := state.LastBlockHeight + 1 // next desired block height
// RoundState fields
cs.Height = height cs.Height = height
cs.Round = 0 cs.Round = 0
cs.Step = RoundStepNewHeight cs.Step = RoundStepNewHeight
@ -641,12 +644,12 @@ func (cs *ConsensusState) RunActionPropose(height uint, round uint) {
return return
} }
blockParts = types.NewPartSetFromData(binary.BinaryBytes(block)) blockParts = block.MakePartSet()
pol = cs.LockedPOL // If exists, is a PoUnlock. pol = cs.LockedPOL // If exists, is a PoUnlock.
} }
if pol != nil { if pol != nil {
polParts = types.NewPartSetFromData(binary.BinaryBytes(pol)) polParts = pol.MakePartSet()
} }
// Make proposal // Make proposal
@ -856,7 +859,7 @@ func (cs *ConsensusState) TryFinalizeCommit(height uint) bool {
// We have the block, so save/stage/sign-commit-vote. // We have the block, so save/stage/sign-commit-vote.
cs.saveCommitVoteBlock(cs.ProposalBlock, cs.ProposalBlockParts, cs.Commits) cs.saveCommitVoteBlock(cs.ProposalBlock, cs.ProposalBlockParts, cs.Commits)
// Increment height. // Increment height.
cs.updateToState(cs.stagedState) cs.updateToState(cs.stagedState, true)
// cs.Step is now RoundStepNewHeight or RoundStepNewRound // cs.Step is now RoundStepNewHeight or RoundStepNewRound
cs.newStepCh <- cs.getRoundState() cs.newStepCh <- cs.getRoundState()
return true return true
@ -1012,7 +1015,8 @@ func (cs *ConsensusState) stageBlock(block *types.Block, blockParts *types.PartS
} }
// Already staged? // Already staged?
if cs.stagedBlock == block { blockHash := block.Hash()
if cs.stagedBlock != nil && len(blockHash) != 0 && bytes.Equal(cs.stagedBlock.Hash(), blockHash) {
return nil return nil
} }
@ -1021,7 +1025,7 @@ func (cs *ConsensusState) stageBlock(block *types.Block, blockParts *types.PartS
// Commit block onto the copied state. // Commit block onto the copied state.
// NOTE: Basic validation is done in state.AppendBlock(). // NOTE: Basic validation is done in state.AppendBlock().
err := stateCopy.AppendBlock(block, blockParts.Header()) err := sm.ExecBlock(stateCopy, block, blockParts.Header())
if err != nil { if err != nil {
return err return err
} else { } else {

View File

@ -3,15 +3,15 @@ package consensus
import ( import (
"sort" "sort"
bc "github.com/tendermint/tendermint/blockchain"
dbm "github.com/tendermint/tendermint/db" dbm "github.com/tendermint/tendermint/db"
mempl "github.com/tendermint/tendermint/mempool" mempl "github.com/tendermint/tendermint/mempool"
sm "github.com/tendermint/tendermint/state" sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
) )
func randConsensusState() (*ConsensusState, []*sm.PrivValidator) { func randConsensusState() (*ConsensusState, []*sm.PrivValidator) {
state, _, privValidators := sm.RandGenesisState(20, false, 1000, 10, false, 1000) state, _, privValidators := sm.RandGenesisState(20, false, 1000, 10, false, 1000)
blockStore := types.NewBlockStore(dbm.NewMemDB()) blockStore := bc.NewBlockStore(dbm.NewMemDB())
mempool := mempl.NewMempool(state) mempool := mempl.NewMempool(state)
mempoolReactor := mempl.NewMempoolReactor(mempool) mempoolReactor := mempl.NewMempoolReactor(mempool)
cs := NewConsensusState(state, blockStore, mempoolReactor) cs := NewConsensusState(state, blockStore, mempoolReactor)

View File

@ -34,7 +34,7 @@ type VoteSet struct {
maj23Exists bool maj23Exists bool
} }
// Constructs a new VoteSet struct used to accumulate votes for each round. // Constructs a new VoteSet struct used to accumulate votes for given height/round.
func NewVoteSet(height uint, round uint, type_ byte, valSet *sm.ValidatorSet) *VoteSet { func NewVoteSet(height uint, round uint, type_ byte, valSet *sm.ValidatorSet) *VoteSet {
if height == 0 { if height == 0 {
panic("Cannot make VoteSet for height == 0, doesn't make sense.") panic("Cannot make VoteSet for height == 0, doesn't make sense.")

View File

@ -4,6 +4,7 @@ import (
"os" "os"
"os/signal" "os/signal"
bc "github.com/tendermint/tendermint/blockchain"
. "github.com/tendermint/tendermint/common" . "github.com/tendermint/tendermint/common"
"github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/consensus" "github.com/tendermint/tendermint/consensus"
@ -13,15 +14,15 @@ import (
"github.com/tendermint/tendermint/rpc" "github.com/tendermint/tendermint/rpc"
"github.com/tendermint/tendermint/rpc/core" "github.com/tendermint/tendermint/rpc/core"
sm "github.com/tendermint/tendermint/state" sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
) )
type Node struct { type Node struct {
lz []p2p.Listener lz []p2p.Listener
sw *p2p.Switch sw *p2p.Switch
book *p2p.AddrBook book *p2p.AddrBook
blockStore *bc.BlockStore
pexReactor *p2p.PEXReactor pexReactor *p2p.PEXReactor
blockStore *types.BlockStore bcReactor *bc.BlockchainReactor
mempoolReactor *mempl.MempoolReactor mempoolReactor *mempl.MempoolReactor
consensusState *consensus.ConsensusState consensusState *consensus.ConsensusState
consensusReactor *consensus.ConsensusReactor consensusReactor *consensus.ConsensusReactor
@ -31,7 +32,7 @@ type Node struct {
func NewNode() *Node { func NewNode() *Node {
// Get BlockStore // Get BlockStore
blockStoreDB := dbm.GetDB("blockstore") blockStoreDB := dbm.GetDB("blockstore")
blockStore := types.NewBlockStore(blockStoreDB) blockStore := bc.NewBlockStore(blockStoreDB)
// Get State // Get State
stateDB := dbm.GetDB("state") stateDB := dbm.GetDB("state")
@ -54,6 +55,9 @@ func NewNode() *Node {
book := p2p.NewAddrBook(config.App().GetString("AddrBookFile")) book := p2p.NewAddrBook(config.App().GetString("AddrBookFile"))
pexReactor := p2p.NewPEXReactor(book) pexReactor := p2p.NewPEXReactor(book)
// Get BlockchainReactor
bcReactor := bc.NewBlockchainReactor(state, blockStore, config.App().GetBool("FastSync"))
// Get MempoolReactor // Get MempoolReactor
mempool := mempl.NewMempool(state.Copy()) mempool := mempl.NewMempool(state.Copy())
mempoolReactor := mempl.NewMempoolReactor(mempool) mempoolReactor := mempl.NewMempoolReactor(mempool)
@ -65,14 +69,23 @@ func NewNode() *Node {
consensusReactor.SetPrivValidator(privValidator) consensusReactor.SetPrivValidator(privValidator)
} }
sw := p2p.NewSwitch([]p2p.Reactor{pexReactor, mempoolReactor, consensusReactor}) sw := p2p.NewSwitch()
sw.SetChainId(state.Hash(), config.App().GetString("Network")) sw.SetNetwork(config.App().GetString("Network"))
sw.AddReactor("PEX", pexReactor).Start(sw)
sw.AddReactor("MEMPOOL", mempoolReactor).Start(sw)
sw.AddReactor("BLOCKCHAIN", bcReactor).Start(sw)
if !config.App().GetBool("FastSync") {
sw.AddReactor("CONSENSUS", consensusReactor).Start(sw)
} else {
sw.AddReactor("CONSENSUS", consensusReactor)
}
return &Node{ return &Node{
sw: sw, sw: sw,
book: book, book: book,
pexReactor: pexReactor,
blockStore: blockStore, blockStore: blockStore,
pexReactor: pexReactor,
bcReactor: bcReactor,
mempoolReactor: mempoolReactor, mempoolReactor: mempoolReactor,
consensusState: consensusState, consensusState: consensusState,
consensusReactor: consensusReactor, consensusReactor: consensusReactor,
@ -86,7 +99,7 @@ func (n *Node) Start() {
go n.inboundConnectionRoutine(l) go n.inboundConnectionRoutine(l)
} }
n.book.Start() n.book.Start()
n.sw.Start() //n.sw.StartReactors()
} }
func (n *Node) Stop() { func (n *Node) Stop() {

View File

@ -19,12 +19,14 @@ import (
type Mempool struct { type Mempool struct {
mtx sync.Mutex mtx sync.Mutex
state *sm.State state *sm.State
cache *sm.BlockCache
txs []types.Tx txs []types.Tx
} }
func NewMempool(state *sm.State) *Mempool { func NewMempool(state *sm.State) *Mempool {
return &Mempool{ return &Mempool{
state: state, state: state,
cache: sm.NewBlockCache(state),
} }
} }
@ -36,7 +38,7 @@ func (mem *Mempool) GetState() *sm.State {
func (mem *Mempool) AddTx(tx types.Tx) (err error) { func (mem *Mempool) AddTx(tx types.Tx) (err error) {
mem.mtx.Lock() mem.mtx.Lock()
defer mem.mtx.Unlock() defer mem.mtx.Unlock()
err = mem.state.ExecTx(tx, false) err = sm.ExecTx(mem.cache, tx, false)
if err != nil { if err != nil {
log.Debug("AddTx() error", "tx", tx, "error", err) log.Debug("AddTx() error", "tx", tx, "error", err)
return err return err
@ -62,6 +64,7 @@ func (mem *Mempool) ResetForBlockAndState(block *types.Block, state *sm.State) {
mem.mtx.Lock() mem.mtx.Lock()
defer mem.mtx.Unlock() defer mem.mtx.Unlock()
mem.state = state.Copy() mem.state = state.Copy()
mem.cache = sm.NewBlockCache(mem.state)
// First, create a lookup map of txns in new block. // First, create a lookup map of txns in new block.
blockTxsMap := make(map[string]struct{}) blockTxsMap := make(map[string]struct{})
@ -86,7 +89,7 @@ func (mem *Mempool) ResetForBlockAndState(block *types.Block, state *sm.State) {
// Next, filter all txs that aren't valid given new state. // Next, filter all txs that aren't valid given new state.
validTxs := []types.Tx{} validTxs := []types.Tx{}
for _, tx := range txs { for _, tx := range txs {
err := mem.state.ExecTx(tx, false) err := sm.ExecTx(mem.cache, tx, false)
if err == nil { if err == nil {
log.Debug("Filter in, valid", "tx", tx) log.Debug("Filter in, valid", "tx", tx)
validTxs = append(validTxs, tx) validTxs = append(validTxs, tx)

View File

@ -11,7 +11,7 @@ import (
) )
var ( var (
MempoolCh = byte(0x30) MempoolChannel = byte(0x30)
) )
// MempoolReactor handles mempool tx broadcasting amongst peers. // MempoolReactor handles mempool tx broadcasting amongst peers.
@ -52,7 +52,7 @@ func (memR *MempoolReactor) Stop() {
func (memR *MempoolReactor) GetChannels() []*p2p.ChannelDescriptor { func (memR *MempoolReactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{ return []*p2p.ChannelDescriptor{
&p2p.ChannelDescriptor{ &p2p.ChannelDescriptor{
Id: MempoolCh, Id: MempoolChannel,
Priority: 5, Priority: 5,
}, },
} }
@ -92,7 +92,7 @@ func (memR *MempoolReactor) Receive(chId byte, src *p2p.Peer, msgBytes []byte) {
if peer.Key == src.Key { if peer.Key == src.Key {
continue continue
} }
peer.TrySend(MempoolCh, msg) peer.TrySend(MempoolChannel, msg)
} }
default: default:
@ -106,7 +106,7 @@ func (memR *MempoolReactor) BroadcastTx(tx types.Tx) error {
return err return err
} }
msg := &TxMessage{Tx: tx} msg := &TxMessage{Tx: tx}
memR.sw.Broadcast(MempoolCh, msg) memR.sw.Broadcast(MempoolChannel, msg)
return nil return nil
} }

View File

@ -381,7 +381,7 @@ out:
for { for {
select { select {
case <-dumpAddressTicker.C: case <-dumpAddressTicker.C:
log.Debug("Saving book to file", "size", a.Size()) log.Debug("Saving AddrBook to file", "size", a.Size())
a.saveToFile(a.filePath) a.saveToFile(a.filePath)
case <-a.quit: case <-a.quit:
break out break out

View File

@ -50,8 +50,9 @@ There are two methods for sending messages:
func (m MConnection) TrySend(chId byte, msg interface{}) bool {} func (m MConnection) TrySend(chId byte, msg interface{}) bool {}
`Send(chId, msg)` is a blocking call that waits until `msg` is successfully queued `Send(chId, msg)` is a blocking call that waits until `msg` is successfully queued
for the channel with the given id byte `chId`. The message `msg` is serialized for the channel with the given id byte `chId`, or until the request times out.
using the `tendermint/binary` submodule's `WriteBinary()` reflection routine. The message `msg` is serialized using the `tendermint/binary` submodule's
`WriteBinary()` reflection routine.
`TrySend(chId, msg)` is a nonblocking call that returns false if the channel's `TrySend(chId, msg)` is a nonblocking call that returns false if the channel's
queue is full. queue is full.
@ -416,6 +417,7 @@ FOR_LOOP:
} }
msgBytes := channel.recvMsgPacket(pkt) msgBytes := channel.recvMsgPacket(pkt)
if msgBytes != nil { if msgBytes != nil {
log.Debug("Received bytes", "chId", pkt.ChannelId, "msgBytes", msgBytes)
c.onReceive(pkt.ChannelId, msgBytes) c.onReceive(pkt.ChannelId, msgBytes)
} }
default: default:
@ -437,8 +439,19 @@ FOR_LOOP:
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
type ChannelDescriptor struct { type ChannelDescriptor struct {
Id byte Id byte
Priority uint Priority uint
SendQueueCapacity uint
RecvBufferCapacity uint
}
func (chDesc *ChannelDescriptor) FillDefaults() {
if chDesc.SendQueueCapacity == 0 {
chDesc.SendQueueCapacity = defaultSendQueueCapacity
}
if chDesc.RecvBufferCapacity == 0 {
chDesc.RecvBufferCapacity = defaultRecvBufferCapacity
}
} }
// TODO: lowercase. // TODO: lowercase.
@ -448,7 +461,7 @@ type Channel struct {
desc *ChannelDescriptor desc *ChannelDescriptor
id byte id byte
sendQueue chan []byte sendQueue chan []byte
sendQueueSize uint32 sendQueueSize uint32 // atomic.
recving []byte recving []byte
sending []byte sending []byte
priority uint priority uint
@ -456,6 +469,7 @@ type Channel struct {
} }
func newChannel(conn *MConnection, desc *ChannelDescriptor) *Channel { func newChannel(conn *MConnection, desc *ChannelDescriptor) *Channel {
desc.FillDefaults()
if desc.Priority <= 0 { if desc.Priority <= 0 {
panic("Channel default priority must be a postive integer") panic("Channel default priority must be a postive integer")
} }
@ -463,8 +477,8 @@ func newChannel(conn *MConnection, desc *ChannelDescriptor) *Channel {
conn: conn, conn: conn,
desc: desc, desc: desc,
id: desc.Id, id: desc.Id,
sendQueue: make(chan []byte, defaultSendQueueCapacity), sendQueue: make(chan []byte, desc.SendQueueCapacity),
recving: make([]byte, 0, defaultRecvBufferCapacity), recving: make([]byte, 0, desc.RecvBufferCapacity),
priority: desc.Priority, priority: desc.Priority,
} }
} }

View File

@ -13,8 +13,7 @@ import (
type Peer struct { type Peer struct {
outbound bool outbound bool
mconn *MConnection mconn *MConnection
started uint32 running uint32
stopped uint32
Key string Key string
Data *CMap // User data. Data *CMap // User data.
@ -37,7 +36,7 @@ func newPeer(conn net.Conn, outbound bool, reactorsByCh map[byte]Reactor, chDesc
p = &Peer{ p = &Peer{
outbound: outbound, outbound: outbound,
mconn: mconn, mconn: mconn,
stopped: 0, running: 0,
Key: mconn.RemoteAddress.String(), Key: mconn.RemoteAddress.String(),
Data: NewCMap(), Data: NewCMap(),
} }
@ -45,21 +44,21 @@ func newPeer(conn net.Conn, outbound bool, reactorsByCh map[byte]Reactor, chDesc
} }
func (p *Peer) start() { func (p *Peer) start() {
if atomic.CompareAndSwapUint32(&p.started, 0, 1) { if atomic.CompareAndSwapUint32(&p.running, 0, 1) {
log.Debug("Starting Peer", "peer", p) log.Debug("Starting Peer", "peer", p)
p.mconn.Start() p.mconn.Start()
} }
} }
func (p *Peer) stop() { func (p *Peer) stop() {
if atomic.CompareAndSwapUint32(&p.stopped, 0, 1) { if atomic.CompareAndSwapUint32(&p.running, 1, 0) {
log.Debug("Stopping Peer", "peer", p) log.Debug("Stopping Peer", "peer", p)
p.mconn.Stop() p.mconn.Stop()
} }
} }
func (p *Peer) IsStopped() bool { func (p *Peer) IsRunning() bool {
return atomic.LoadUint32(&p.stopped) == 1 return atomic.LoadUint32(&p.running) == 1
} }
func (p *Peer) Connection() *MConnection { func (p *Peer) Connection() *MConnection {
@ -71,21 +70,21 @@ func (p *Peer) IsOutbound() bool {
} }
func (p *Peer) Send(chId byte, msg interface{}) bool { func (p *Peer) Send(chId byte, msg interface{}) bool {
if atomic.LoadUint32(&p.stopped) == 1 { if atomic.LoadUint32(&p.running) == 0 {
return false return false
} }
return p.mconn.Send(chId, msg) return p.mconn.Send(chId, msg)
} }
func (p *Peer) TrySend(chId byte, msg interface{}) bool { func (p *Peer) TrySend(chId byte, msg interface{}) bool {
if atomic.LoadUint32(&p.stopped) == 1 { if atomic.LoadUint32(&p.running) == 0 {
return false return false
} }
return p.mconn.TrySend(chId, msg) return p.mconn.TrySend(chId, msg)
} }
func (p *Peer) CanSend(chId byte) bool { func (p *Peer) CanSend(chId byte) bool {
if atomic.LoadUint32(&p.stopped) == 1 { if atomic.LoadUint32(&p.running) == 0 {
return false return false
} }
return p.mconn.CanSend(chId) return p.mconn.CanSend(chId)

View File

@ -7,6 +7,7 @@ import (
// IPeerSet has a (immutable) subset of the methods of PeerSet. // IPeerSet has a (immutable) subset of the methods of PeerSet.
type IPeerSet interface { type IPeerSet interface {
Has(key string) bool Has(key string) bool
Get(key string) *Peer
List() []*Peer List() []*Peer
Size() int Size() int
} }
@ -55,6 +56,17 @@ func (ps *PeerSet) Has(peerKey string) bool {
return ok return ok
} }
func (ps *PeerSet) Get(peerKey string) *Peer {
ps.mtx.Lock()
defer ps.mtx.Unlock()
item, ok := ps.lookup[peerKey]
if ok {
return item.peer
} else {
return nil
}
}
func (ps *PeerSet) Remove(peer *Peer) { func (ps *PeerSet) Remove(peer *Peer) {
ps.mtx.Lock() ps.mtx.Lock()
defer ps.mtx.Unlock() defer ps.mtx.Unlock()

View File

@ -14,7 +14,7 @@ import (
var pexErrInvalidMessage = errors.New("Invalid PEX message") var pexErrInvalidMessage = errors.New("Invalid PEX message")
const ( const (
PexCh = byte(0x00) PexChannel = byte(0x00)
ensurePeersPeriodSeconds = 30 ensurePeersPeriodSeconds = 30
minNumOutboundPeers = 10 minNumOutboundPeers = 10
maxNumPeers = 50 maxNumPeers = 50
@ -62,8 +62,9 @@ func (pexR *PEXReactor) Stop() {
func (pexR *PEXReactor) GetChannels() []*ChannelDescriptor { func (pexR *PEXReactor) GetChannels() []*ChannelDescriptor {
return []*ChannelDescriptor{ return []*ChannelDescriptor{
&ChannelDescriptor{ &ChannelDescriptor{
Id: PexCh, Id: PexChannel,
Priority: 1, Priority: 1,
SendQueueCapacity: 10,
}, },
} }
} }
@ -97,9 +98,9 @@ func (pexR *PEXReactor) Receive(chId byte, src *Peer, msgBytes []byte) {
switch msg.(type) { switch msg.(type) {
case *pexHandshakeMessage: case *pexHandshakeMessage:
chainId := msg.(*pexHandshakeMessage).ChainId network := msg.(*pexHandshakeMessage).Network
if chainId != pexR.sw.chainId { if network != pexR.sw.network {
err := fmt.Sprintf("Peer is on a different chain/network. Got %s, expected %s", chainId, pexR.sw.chainId) err := fmt.Sprintf("Peer is on a different chain/network. Got %s, expected %s", network, pexR.sw.network)
pexR.sw.StopPeerForError(src, err) pexR.sw.StopPeerForError(src, err)
} }
case *pexRequestMessage: case *pexRequestMessage:
@ -122,11 +123,11 @@ func (pexR *PEXReactor) Receive(chId byte, src *Peer, msgBytes []byte) {
// Asks peer for more addresses. // Asks peer for more addresses.
func (pexR *PEXReactor) RequestPEX(peer *Peer) { func (pexR *PEXReactor) RequestPEX(peer *Peer) {
peer.Send(PexCh, &pexRequestMessage{}) peer.Send(PexChannel, &pexRequestMessage{})
} }
func (pexR *PEXReactor) SendAddrs(peer *Peer, addrs []*NetAddress) { func (pexR *PEXReactor) SendAddrs(peer *Peer, addrs []*NetAddress) {
peer.Send(PexCh, &pexAddrsMessage{Addrs: addrs}) peer.Send(PexChannel, &pexAddrsMessage{Addrs: addrs})
} }
// Ensures that sufficient peers are connected. (continuous) // Ensures that sufficient peers are connected. (continuous)
@ -175,10 +176,12 @@ func (pexR *PEXReactor) ensurePeers() {
alreadyDialing := pexR.sw.IsDialing(try) alreadyDialing := pexR.sw.IsDialing(try)
alreadyConnected := pexR.sw.Peers().Has(try.String()) alreadyConnected := pexR.sw.Peers().Has(try.String())
if alreadySelected || alreadyDialing || alreadyConnected { if alreadySelected || alreadyDialing || alreadyConnected {
log.Debug("Cannot dial address", "addr", try, /*
"alreadySelected", alreadySelected, log.Debug("Cannot dial address", "addr", try,
"alreadyDialing", alreadyDialing, "alreadySelected", alreadySelected,
"alreadyConnected", alreadyConnected) "alreadyDialing", alreadyDialing,
"alreadyConnected", alreadyConnected)
*/
continue continue
} else { } else {
log.Debug("Will dial address", "addr", try) log.Debug("Will dial address", "addr", try)
@ -237,7 +240,7 @@ func DecodeMessage(bz []byte) (msg interface{}, err error) {
A pexHandshakeMessage contains the peer's chainId A pexHandshakeMessage contains the peer's chainId
*/ */
type pexHandshakeMessage struct { type pexHandshakeMessage struct {
ChainId string Network string
} }
func (m *pexHandshakeMessage) TypeByte() byte { return msgTypeHandshake } func (m *pexHandshakeMessage) TypeByte() byte { return msgTypeHandshake }

View File

@ -1,11 +1,9 @@
package p2p package p2p
import ( import (
"encoding/hex"
"errors" "errors"
"fmt" "fmt"
"net" "net"
"sync/atomic"
"time" "time"
. "github.com/tendermint/tendermint/common" . "github.com/tendermint/tendermint/common"
@ -29,20 +27,16 @@ or more `Channels`. So while sending outgoing messages is typically performed o
incoming messages are received on the reactor. incoming messages are received on the reactor.
*/ */
type Switch struct { type Switch struct {
reactors []Reactor network string
reactors map[string]Reactor
chDescs []*ChannelDescriptor chDescs []*ChannelDescriptor
reactorsByCh map[byte]Reactor reactorsByCh map[byte]Reactor
peers *PeerSet peers *PeerSet
dialing *CMap dialing *CMap
listeners *CMap // listenerName -> chan interface{} listeners *CMap // listenerName -> chan interface{}
quit chan struct{}
started uint32
stopped uint32
chainId string
} }
var ( var (
ErrSwitchStopped = errors.New("Switch already stopped")
ErrSwitchDuplicatePeer = errors.New("Duplicate peer") ErrSwitchDuplicatePeer = errors.New("Duplicate peer")
) )
@ -50,71 +44,83 @@ const (
peerDialTimeoutSeconds = 3 peerDialTimeoutSeconds = 3
) )
func NewSwitch(reactors []Reactor) *Switch { func NewSwitch() *Switch {
// Validate the reactors. no two reactors can share the same channel.
chDescs := []*ChannelDescriptor{}
reactorsByCh := make(map[byte]Reactor)
for _, reactor := range reactors {
reactorChannels := reactor.GetChannels()
for _, chDesc := range reactorChannels {
chId := chDesc.Id
if reactorsByCh[chId] != nil {
panic(fmt.Sprintf("Channel %X has multiple reactors %v & %v", chId, reactorsByCh[chId], reactor))
}
chDescs = append(chDescs, chDesc)
reactorsByCh[chId] = reactor
}
}
sw := &Switch{ sw := &Switch{
reactors: reactors, network: "",
chDescs: chDescs, reactors: make(map[string]Reactor),
reactorsByCh: reactorsByCh, chDescs: make([]*ChannelDescriptor, 0),
reactorsByCh: make(map[byte]Reactor),
peers: NewPeerSet(), peers: NewPeerSet(),
dialing: NewCMap(), dialing: NewCMap(),
listeners: NewCMap(), listeners: NewCMap(),
quit: make(chan struct{}),
stopped: 0,
} }
return sw return sw
} }
func (sw *Switch) Start() { // Not goroutine safe.
if atomic.CompareAndSwapUint32(&sw.started, 0, 1) { func (sw *Switch) SetNetwork(network string) {
log.Info("Starting Switch") sw.network = network
for _, reactor := range sw.reactors { }
reactor.Start(sw)
// Not goroutine safe.
func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor {
// Validate the reactor.
// No two reactors can share the same channel.
reactorChannels := reactor.GetChannels()
for _, chDesc := range reactorChannels {
chId := chDesc.Id
if sw.reactorsByCh[chId] != nil {
panic(fmt.Sprintf("Channel %X has multiple reactors %v & %v", chId, sw.reactorsByCh[chId], reactor))
} }
sw.chDescs = append(sw.chDescs, chDesc)
sw.reactorsByCh[chId] = reactor
}
sw.reactors[name] = reactor
return reactor
}
func (sw *Switch) Reactor(name string) Reactor {
return sw.reactors[name]
}
// Convenience function
func (sw *Switch) StartReactors() {
for _, reactor := range sw.reactors {
reactor.Start(sw)
} }
} }
// Convenience function
func (sw *Switch) StopReactors() {
// Stop all reactors.
for _, reactor := range sw.reactors {
reactor.Stop()
}
}
// Convenience function
func (sw *Switch) StopPeers() {
// Stop each peer.
for _, peer := range sw.peers.List() {
peer.stop()
}
sw.peers = NewPeerSet()
}
// Convenience function
func (sw *Switch) Stop() { func (sw *Switch) Stop() {
if atomic.CompareAndSwapUint32(&sw.stopped, 0, 1) { sw.StopPeers()
log.Info("Stopping Switch") sw.StopReactors()
close(sw.quit)
// Stop each peer.
for _, peer := range sw.peers.List() {
peer.stop()
}
sw.peers = NewPeerSet()
// Stop all reactors.
for _, reactor := range sw.reactors {
reactor.Stop()
}
}
} }
func (sw *Switch) Reactors() []Reactor { // Not goroutine safe to modify.
func (sw *Switch) Reactors() map[string]Reactor {
return sw.reactors return sw.reactors
} }
func (sw *Switch) AddPeerWithConnection(conn net.Conn, outbound bool) (*Peer, error) { func (sw *Switch) AddPeerWithConnection(conn net.Conn, outbound bool) (*Peer, error) {
if atomic.LoadUint32(&sw.stopped) == 1 {
return nil, ErrSwitchStopped
}
peer := newPeer(conn, outbound, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError) peer := newPeer(conn, outbound, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError)
// Add the peer to .peers // Add the peer to .peers
@ -126,23 +132,19 @@ func (sw *Switch) AddPeerWithConnection(conn net.Conn, outbound bool) (*Peer, er
} }
// Start the peer // Start the peer
go peer.start() peer.start()
// Notify listeners. // Notify listeners.
sw.doAddPeer(peer) sw.doAddPeer(peer)
// Send handshake // Send handshake
msg := &pexHandshakeMessage{ChainId: sw.chainId} msg := &pexHandshakeMessage{Network: sw.network}
peer.Send(PexCh, msg) peer.Send(PexChannel, msg)
return peer, nil return peer, nil
} }
func (sw *Switch) DialPeerWithAddress(addr *NetAddress) (*Peer, error) { func (sw *Switch) DialPeerWithAddress(addr *NetAddress) (*Peer, error) {
if atomic.LoadUint32(&sw.stopped) == 1 {
return nil, ErrSwitchStopped
}
log.Debug("Dialing address", "address", addr) log.Debug("Dialing address", "address", addr)
sw.dialing.Set(addr.String(), addr) sw.dialing.Set(addr.String(), addr)
conn, err := addr.DialTimeout(peerDialTimeoutSeconds * time.Second) conn, err := addr.DialTimeout(peerDialTimeoutSeconds * time.Second)
@ -164,13 +166,10 @@ func (sw *Switch) IsDialing(addr *NetAddress) bool {
return sw.dialing.Has(addr.String()) return sw.dialing.Has(addr.String())
} }
// Broadcast runs a go routine for each attemptted send, which will block // Broadcast runs a go routine for each attempted send, which will block
// trying to send for defaultSendTimeoutSeconds. Returns a channel // trying to send for defaultSendTimeoutSeconds. Returns a channel
// which receives success values for each attempted send (false if times out) // which receives success values for each attempted send (false if times out)
func (sw *Switch) Broadcast(chId byte, msg interface{}) chan bool { func (sw *Switch) Broadcast(chId byte, msg interface{}) chan bool {
if atomic.LoadUint32(&sw.stopped) == 1 {
return nil
}
successChan := make(chan bool, len(sw.peers.List())) successChan := make(chan bool, len(sw.peers.List()))
log.Debug("Broadcast", "channel", chId, "msg", msg) log.Debug("Broadcast", "channel", chId, "msg", msg)
for _, peer := range sw.peers.List() { for _, peer := range sw.peers.List() {
@ -223,14 +222,6 @@ func (sw *Switch) StopPeerGracefully(peer *Peer) {
sw.doRemovePeer(peer, nil) sw.doRemovePeer(peer, nil)
} }
func (sw *Switch) GetChainId() string {
return sw.chainId
}
func (sw *Switch) SetChainId(hash []byte, network string) {
sw.chainId = hex.EncodeToString(hash) + "-" + network
}
func (sw *Switch) IsListening() bool { func (sw *Switch) IsListening() bool {
return sw.listeners.Size() > 0 return sw.listeners.Size() > 0
} }

View File

@ -68,12 +68,12 @@ func (tr *TestReactor) Receive(chId byte, peer *Peer, msgBytes []byte) {
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// convenience method for creating two switches connected to each other. // convenience method for creating bar switches connected to each other.
func makeSwitchPair(t testing.TB, reactorsGenerator func() []Reactor) (*Switch, *Switch) { func makeSwitchPair(t testing.TB, initSwitch func(*Switch) *Switch) (*Switch, *Switch) {
// Create two switches that will be interconnected. // Create bar switches that will be interconnected.
s1 := NewSwitch(reactorsGenerator()) s1 := initSwitch(NewSwitch())
s2 := NewSwitch(reactorsGenerator()) s2 := initSwitch(NewSwitch())
// Create a listener for s1 // Create a listener for s1
l := NewDefaultListener("tcp", ":8001", true) l := NewDefaultListener("tcp", ":8001", true)
@ -104,18 +104,17 @@ func makeSwitchPair(t testing.TB, reactorsGenerator func() []Reactor) (*Switch,
} }
func TestSwitches(t *testing.T) { func TestSwitches(t *testing.T) {
s1, s2 := makeSwitchPair(t, func() []Reactor { s1, s2 := makeSwitchPair(t, func(sw *Switch) *Switch {
// Make two reactors of two channels each // Make bar reactors of bar channels each
reactors := make([]Reactor, 2) sw.AddReactor("foo", NewTestReactor([]*ChannelDescriptor{
reactors[0] = NewTestReactor([]*ChannelDescriptor{
&ChannelDescriptor{Id: byte(0x00), Priority: 10}, &ChannelDescriptor{Id: byte(0x00), Priority: 10},
&ChannelDescriptor{Id: byte(0x01), Priority: 10}, &ChannelDescriptor{Id: byte(0x01), Priority: 10},
}, true) }, true)).Start(sw) // Start the reactor
reactors[1] = NewTestReactor([]*ChannelDescriptor{ sw.AddReactor("bar", NewTestReactor([]*ChannelDescriptor{
&ChannelDescriptor{Id: byte(0x02), Priority: 10}, &ChannelDescriptor{Id: byte(0x02), Priority: 10},
&ChannelDescriptor{Id: byte(0x03), Priority: 10}, &ChannelDescriptor{Id: byte(0x03), Priority: 10},
}, true) }, true)).Start(sw) // Start the reactor
return reactors return sw
}) })
defer s1.Stop() defer s1.Stop()
defer s2.Stop() defer s2.Stop()
@ -129,8 +128,8 @@ func TestSwitches(t *testing.T) {
} }
ch0Msg := "channel zero" ch0Msg := "channel zero"
ch1Msg := "channel one" ch1Msg := "channel foo"
ch2Msg := "channel two" ch2Msg := "channel bar"
s1.Broadcast(byte(0x00), ch0Msg) s1.Broadcast(byte(0x00), ch0Msg)
s1.Broadcast(byte(0x01), ch1Msg) s1.Broadcast(byte(0x01), ch1Msg)
@ -140,7 +139,7 @@ func TestSwitches(t *testing.T) {
time.Sleep(5000 * time.Millisecond) time.Sleep(5000 * time.Millisecond)
// Check message on ch0 // Check message on ch0
ch0Msgs := s2.Reactors()[0].(*TestReactor).msgsReceived[byte(0x00)] ch0Msgs := s2.Reactor("foo").(*TestReactor).msgsReceived[byte(0x00)]
if len(ch0Msgs) != 2 { if len(ch0Msgs) != 2 {
t.Errorf("Expected to have received 1 message in ch0") t.Errorf("Expected to have received 1 message in ch0")
} }
@ -149,7 +148,7 @@ func TestSwitches(t *testing.T) {
} }
// Check message on ch1 // Check message on ch1
ch1Msgs := s2.Reactors()[0].(*TestReactor).msgsReceived[byte(0x01)] ch1Msgs := s2.Reactor("foo").(*TestReactor).msgsReceived[byte(0x01)]
if len(ch1Msgs) != 1 { if len(ch1Msgs) != 1 {
t.Errorf("Expected to have received 1 message in ch1") t.Errorf("Expected to have received 1 message in ch1")
} }
@ -158,7 +157,7 @@ func TestSwitches(t *testing.T) {
} }
// Check message on ch2 // Check message on ch2
ch2Msgs := s2.Reactors()[1].(*TestReactor).msgsReceived[byte(0x02)] ch2Msgs := s2.Reactor("bar").(*TestReactor).msgsReceived[byte(0x02)]
if len(ch2Msgs) != 1 { if len(ch2Msgs) != 1 {
t.Errorf("Expected to have received 1 message in ch2") t.Errorf("Expected to have received 1 message in ch2")
} }
@ -172,18 +171,17 @@ func BenchmarkSwitches(b *testing.B) {
b.StopTimer() b.StopTimer()
s1, s2 := makeSwitchPair(b, func() []Reactor { s1, s2 := makeSwitchPair(b, func(sw *Switch) *Switch {
// Make two reactors of two channels each // Make bar reactors of bar channels each
reactors := make([]Reactor, 2) sw.AddReactor("foo", NewTestReactor([]*ChannelDescriptor{
reactors[0] = NewTestReactor([]*ChannelDescriptor{
&ChannelDescriptor{Id: byte(0x00), Priority: 10}, &ChannelDescriptor{Id: byte(0x00), Priority: 10},
&ChannelDescriptor{Id: byte(0x01), Priority: 10}, &ChannelDescriptor{Id: byte(0x01), Priority: 10},
}, false) }, false))
reactors[1] = NewTestReactor([]*ChannelDescriptor{ sw.AddReactor("bar", NewTestReactor([]*ChannelDescriptor{
&ChannelDescriptor{Id: byte(0x02), Priority: 10}, &ChannelDescriptor{Id: byte(0x02), Priority: 10},
&ChannelDescriptor{Id: byte(0x03), Priority: 10}, &ChannelDescriptor{Id: byte(0x03), Priority: 10},
}, false) }, false))
return reactors return sw
}) })
defer s1.Stop() defer s1.Stop()
defer s2.Stop() defer s2.Stop()
@ -194,7 +192,7 @@ func BenchmarkSwitches(b *testing.B) {
numSuccess, numFailure := 0, 0 numSuccess, numFailure := 0, 0
// Send random message from one channel to another // Send random message from foo channel to another
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
chId := byte(i % 4) chId := byte(i % 4)
successChan := s1.Broadcast(chId, "test data") successChan := s1.Broadcast(chId, "test data")

View File

@ -1,18 +1,18 @@
package core package core
import ( import (
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/consensus" "github.com/tendermint/tendermint/consensus"
mempl "github.com/tendermint/tendermint/mempool" mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
) )
var blockStore *types.BlockStore var blockStore *bc.BlockStore
var consensusState *consensus.ConsensusState var consensusState *consensus.ConsensusState
var mempoolReactor *mempl.MempoolReactor var mempoolReactor *mempl.MempoolReactor
var p2pSwitch *p2p.Switch var p2pSwitch *p2p.Switch
func SetPipeBlockStore(bs *types.BlockStore) { func SetRPCBlockStore(bs *bc.BlockStore) {
blockStore = bs blockStore = bs
} }

221
state/block_cache.go Normal file
View File

@ -0,0 +1,221 @@
package state
import (
"bytes"
"sort"
ac "github.com/tendermint/tendermint/account"
"github.com/tendermint/tendermint/binary"
. "github.com/tendermint/tendermint/common"
dbm "github.com/tendermint/tendermint/db"
"github.com/tendermint/tendermint/merkle"
)
func makeStorage(db dbm.DB, root []byte) merkle.Tree {
storage := merkle.NewIAVLTree(
binary.BasicCodec,
binary.BasicCodec,
1024,
db,
)
storage.Load(root)
return storage
}
type BlockCache struct {
db dbm.DB
backend *State
accounts map[string]accountInfo
storages map[Tuple256]storageInfo
}
func NewBlockCache(backend *State) *BlockCache {
return &BlockCache{
db: backend.DB,
backend: backend,
accounts: make(map[string]accountInfo),
storages: make(map[Tuple256]storageInfo),
}
}
func (cache *BlockCache) State() *State {
return cache.backend
}
//-------------------------------------
// BlockCache.account
func (cache *BlockCache) GetAccount(addr []byte) *ac.Account {
acc, _, removed, _ := cache.accounts[string(addr)].unpack()
if removed {
return nil
} else if acc != nil {
return acc
} else {
acc = cache.backend.GetAccount(addr)
cache.accounts[string(addr)] = accountInfo{acc, nil, false, false}
return acc
}
}
func (cache *BlockCache) UpdateAccount(acc *ac.Account) {
addr := acc.Address
// SANITY CHECK
_, storage, removed, _ := cache.accounts[string(addr)].unpack()
if removed {
panic("UpdateAccount on a removed account")
}
// SANITY CHECK END
cache.accounts[string(addr)] = accountInfo{acc, storage, false, true}
}
func (cache *BlockCache) RemoveAccount(addr []byte) {
// SANITY CHECK
_, _, removed, _ := cache.accounts[string(addr)].unpack()
if removed {
panic("RemoveAccount on a removed account")
}
// SANITY CHECK END
cache.accounts[string(addr)] = accountInfo{nil, nil, true, false}
}
// BlockCache.account
//-------------------------------------
// BlockCache.storage
func (cache *BlockCache) GetStorage(addr Word256, key Word256) (value Word256) {
// Check cache
info, ok := cache.storages[Tuple256{addr, key}]
if ok {
return info.value
}
// Get or load storage
acc, storage, removed, dirty := cache.accounts[string(addr.Prefix(20))].unpack()
if removed {
panic("GetStorage() on removed account")
}
if storage == nil {
storage = makeStorage(cache.db, acc.StorageRoot)
cache.accounts[string(addr.Prefix(20))] = accountInfo{acc, storage, false, dirty}
}
// Load and set cache
_, val_ := storage.Get(key.Bytes())
value = Zero256
if val_ != nil {
value = RightPadWord256(val_.([]byte))
}
cache.storages[Tuple256{addr, key}] = storageInfo{value, false}
return value
}
// NOTE: Set value to zero to removed from the trie.
func (cache *BlockCache) SetStorage(addr Word256, key Word256, value Word256) {
_, _, removed, _ := cache.accounts[string(addr.Prefix(20))].unpack()
if removed {
panic("SetStorage() on a removed account")
}
cache.storages[Tuple256{addr, key}] = storageInfo{value, true}
}
// BlockCache.storage
//-------------------------------------
// CONTRACT the updates are in deterministic order.
func (cache *BlockCache) Sync() {
// Determine order for storage updates
// The address comes first so it'll be grouped.
storageKeys := make([]Tuple256, 0, len(cache.storages))
for keyTuple := range cache.storages {
storageKeys = append(storageKeys, keyTuple)
}
Tuple256Slice(storageKeys).Sort()
// Update storage for all account/key.
// Later we'll iterate over all the users and save storage + update storage root.
var (
curAddr Word256
curAcc *ac.Account
curAccRemoved bool
curStorage merkle.Tree
)
for _, storageKey := range storageKeys {
addr, key := Tuple256Split(storageKey)
if addr != curAddr || curAcc == nil {
acc, storage, removed, _ := cache.accounts[string(addr.Prefix(20))].unpack()
curAddr = addr
curAcc = acc
curAccRemoved = removed
curStorage = storage
}
if curAccRemoved {
continue
}
value, dirty := cache.storages[storageKey].unpack()
if !dirty {
continue
}
if value.IsZero() {
curStorage.Remove(key.Bytes())
} else {
curStorage.Set(key.Bytes(), value.Bytes())
}
}
// Determine order for accounts
addrStrs := []string{}
for addrStr := range cache.accounts {
addrStrs = append(addrStrs, addrStr)
}
sort.Strings(addrStrs)
// Update or delete accounts.
for _, addrStr := range addrStrs {
acc, storage, removed, dirty := cache.accounts[addrStr].unpack()
if removed {
removed := cache.backend.RemoveAccount(acc.Address)
if !removed {
panic(Fmt("Could not remove account to be removed: %X", acc.Address))
}
} else {
if acc == nil {
panic(Fmt("Account should not be nil for addr: %X", acc.Address))
}
if storage != nil {
newStorageRoot := storage.Save()
if !bytes.Equal(newStorageRoot, acc.StorageRoot) {
acc.StorageRoot = newStorageRoot
dirty = true
}
}
if dirty {
cache.backend.UpdateAccount(acc)
}
}
}
}
//-----------------------------------------------------------------------------
type accountInfo struct {
account *ac.Account
storage merkle.Tree
removed bool
dirty bool
}
func (accInfo accountInfo) unpack() (*ac.Account, merkle.Tree, bool, bool) {
return accInfo.account, accInfo.storage, accInfo.removed, accInfo.dirty
}
type storageInfo struct {
value Word256
dirty bool
}
func (stjInfo storageInfo) unpack() (Word256, bool) {
return stjInfo.value, stjInfo.dirty
}

18
state/common.go Normal file
View File

@ -0,0 +1,18 @@
package state
import (
ac "github.com/tendermint/tendermint/account"
. "github.com/tendermint/tendermint/common"
"github.com/tendermint/tendermint/vm"
)
type AccountGetter interface {
GetAccount(addr []byte) *ac.Account
}
type VMAccountState interface {
GetAccount(addr Word256) *vm.Account
UpdateAccount(acc *vm.Account)
RemoveAccount(acc *vm.Account)
CreateAccount(creator *vm.Account) *vm.Account
}

593
state/execution.go Normal file
View File

@ -0,0 +1,593 @@
package state
import (
"bytes"
"errors"
"github.com/tendermint/tendermint/account"
. "github.com/tendermint/tendermint/common"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/vm"
)
// NOTE: If an error occurs during block execution, state will be left
// at an invalid state. Copy the state before calling ExecBlock!
func ExecBlock(s *State, block *types.Block, blockPartsHeader types.PartSetHeader) error {
err := execBlock(s, block, blockPartsHeader)
if err != nil {
return err
}
// State.Hash should match block.StateHash
stateHash := s.Hash()
if !bytes.Equal(stateHash, block.StateHash) {
return Errorf("Invalid state hash. Expected %X, got %X",
stateHash, block.StateHash)
}
return nil
}
// executes transactions of a block, does not check block.StateHash
// NOTE: If an error occurs during block execution, state will be left
// at an invalid state. Copy the state before calling execBlock!
func execBlock(s *State, block *types.Block, blockPartsHeader types.PartSetHeader) error {
// Basic block validation.
err := block.ValidateBasic(s.LastBlockHeight, s.LastBlockHash, s.LastBlockParts, s.LastBlockTime)
if err != nil {
return err
}
// Validate block Validation.
if block.Height == 1 {
if len(block.Validation.Commits) != 0 {
return errors.New("Block at height 1 (first block) should have no Validation commits")
}
} else {
if uint(len(block.Validation.Commits)) != s.LastBondedValidators.Size() {
return errors.New(Fmt("Invalid block validation size. Expected %v, got %v",
s.LastBondedValidators.Size(), len(block.Validation.Commits)))
}
var sumVotingPower uint64
s.LastBondedValidators.Iterate(func(index uint, val *Validator) bool {
commit := block.Validation.Commits[index]
if commit.IsZero() {
return false
} else {
vote := &types.Vote{
Height: block.Height - 1,
Round: commit.Round,
Type: types.VoteTypeCommit,
BlockHash: block.LastBlockHash,
BlockParts: block.LastBlockParts,
}
if val.PubKey.VerifyBytes(account.SignBytes(vote), commit.Signature) {
sumVotingPower += val.VotingPower
return false
} else {
log.Warn(Fmt("Invalid validation signature.\nval: %v\nvote: %v", val, vote))
err = errors.New("Invalid validation signature")
return true
}
}
})
if err != nil {
return err
}
if sumVotingPower <= s.LastBondedValidators.TotalVotingPower()*2/3 {
return errors.New("Insufficient validation voting power")
}
}
// Update Validator.LastCommitHeight as necessary.
for i, commit := range block.Validation.Commits {
if commit.IsZero() {
continue
}
_, val := s.LastBondedValidators.GetByIndex(uint(i))
if val == nil {
panic(Fmt("Failed to fetch validator at index %v", i))
}
if _, val_ := s.BondedValidators.GetByAddress(val.Address); val_ != nil {
val_.LastCommitHeight = block.Height - 1
updated := s.BondedValidators.Update(val_)
if !updated {
panic("Failed to update bonded validator LastCommitHeight")
}
} else if _, val_ := s.UnbondingValidators.GetByAddress(val.Address); val_ != nil {
val_.LastCommitHeight = block.Height - 1
updated := s.UnbondingValidators.Update(val_)
if !updated {
panic("Failed to update unbonding validator LastCommitHeight")
}
} else {
panic("Could not find validator")
}
}
// Remember LastBondedValidators
s.LastBondedValidators = s.BondedValidators.Copy()
// Create BlockCache to cache changes to state.
blockCache := NewBlockCache(s)
// Commit each tx
for _, tx := range block.Data.Txs {
err := ExecTx(blockCache, tx, true)
if err != nil {
return InvalidTxError{tx, err}
}
}
// Now sync the BlockCache to the backend.
blockCache.Sync()
// If any unbonding periods are over,
// reward account with bonded coins.
toRelease := []*Validator{}
s.UnbondingValidators.Iterate(func(index uint, val *Validator) bool {
if val.UnbondHeight+unbondingPeriodBlocks < block.Height {
toRelease = append(toRelease, val)
}
return false
})
for _, val := range toRelease {
s.releaseValidator(val)
}
// If any validators haven't signed in a while,
// unbond them, they have timed out.
toTimeout := []*Validator{}
s.BondedValidators.Iterate(func(index uint, val *Validator) bool {
lastActivityHeight := MaxUint(val.BondHeight, val.LastCommitHeight)
if lastActivityHeight+validatorTimeoutBlocks < block.Height {
log.Info("Validator timeout", "validator", val, "height", block.Height)
toTimeout = append(toTimeout, val)
}
return false
})
for _, val := range toTimeout {
s.unbondValidator(val)
}
// Increment validator AccumPowers
s.BondedValidators.IncrementAccum(1)
s.LastBlockHeight = block.Height
s.LastBlockHash = block.Hash()
s.LastBlockParts = blockPartsHeader
s.LastBlockTime = block.Time
return nil
}
// The accounts from the TxInputs must either already have
// account.PubKey.(type) != PubKeyNil, (it must be known),
// or it must be specified in the TxInput. If redeclared,
// the TxInput is modified and input.PubKey set to PubKeyNil.
func getOrMakeAccounts(state AccountGetter, ins []*types.TxInput, outs []*types.TxOutput) (map[string]*account.Account, error) {
accounts := map[string]*account.Account{}
for _, in := range ins {
// Account shouldn't be duplicated
if _, ok := accounts[string(in.Address)]; ok {
return nil, types.ErrTxDuplicateAddress
}
acc := state.GetAccount(in.Address)
if acc == nil {
return nil, types.ErrTxInvalidAddress
}
// PubKey should be present in either "account" or "in"
if err := checkInputPubKey(acc, in); err != nil {
return nil, err
}
accounts[string(in.Address)] = acc
}
for _, out := range outs {
// Account shouldn't be duplicated
if _, ok := accounts[string(out.Address)]; ok {
return nil, types.ErrTxDuplicateAddress
}
acc := state.GetAccount(out.Address)
// output account may be nil (new)
if acc == nil {
acc = &account.Account{
Address: out.Address,
PubKey: account.PubKeyNil{},
Sequence: 0,
Balance: 0,
}
}
accounts[string(out.Address)] = acc
}
return accounts, nil
}
func checkInputPubKey(acc *account.Account, in *types.TxInput) error {
if _, isNil := acc.PubKey.(account.PubKeyNil); isNil {
if _, isNil := in.PubKey.(account.PubKeyNil); isNil {
return types.ErrTxUnknownPubKey
}
if !bytes.Equal(in.PubKey.Address(), acc.Address) {
return types.ErrTxInvalidPubKey
}
acc.PubKey = in.PubKey
} else {
in.PubKey = account.PubKeyNil{}
}
return nil
}
func validateInputs(accounts map[string]*account.Account, signBytes []byte, ins []*types.TxInput) (total uint64, err error) {
for _, in := range ins {
acc := accounts[string(in.Address)]
if acc == nil {
panic("validateInputs() expects account in accounts")
}
err = validateInput(acc, signBytes, in)
if err != nil {
return
}
// Good. Add amount to total
total += in.Amount
}
return total, nil
}
func validateInput(acc *account.Account, signBytes []byte, in *types.TxInput) (err error) {
// Check TxInput basic
if err := in.ValidateBasic(); err != nil {
return err
}
// Check signatures
if !acc.PubKey.VerifyBytes(signBytes, in.Signature) {
return types.ErrTxInvalidSignature
}
// Check sequences
if acc.Sequence+1 != in.Sequence {
return types.ErrTxInvalidSequence{
Got: uint64(in.Sequence),
Expected: uint64(acc.Sequence + 1),
}
}
// Check amount
if acc.Balance < in.Amount {
return types.ErrTxInsufficientFunds
}
return nil
}
func validateOutputs(outs []*types.TxOutput) (total uint64, err error) {
for _, out := range outs {
// Check TxOutput basic
if err := out.ValidateBasic(); err != nil {
return 0, err
}
// Good. Add amount to total
total += out.Amount
}
return total, nil
}
func adjustByInputs(accounts map[string]*account.Account, ins []*types.TxInput) {
for _, in := range ins {
acc := accounts[string(in.Address)]
if acc == nil {
panic("adjustByInputs() expects account in accounts")
}
if acc.Balance < in.Amount {
panic("adjustByInputs() expects sufficient funds")
}
acc.Balance -= in.Amount
acc.Sequence += 1
}
}
func adjustByOutputs(accounts map[string]*account.Account, outs []*types.TxOutput) {
for _, out := range outs {
acc := accounts[string(out.Address)]
if acc == nil {
panic("adjustByOutputs() expects account in accounts")
}
acc.Balance += out.Amount
}
}
// If the tx is invalid, an error will be returned.
// Unlike ExecBlock(), state will not be altered.
func ExecTx(blockCache *BlockCache, tx_ types.Tx, runCall bool) error {
// TODO: do something with fees
fees := uint64(0)
_s := blockCache.State() // hack to access validators.
// Exec tx
switch tx := tx_.(type) {
case *types.SendTx:
accounts, err := getOrMakeAccounts(blockCache, tx.Inputs, tx.Outputs)
if err != nil {
return err
}
signBytes := account.SignBytes(tx)
inTotal, err := validateInputs(accounts, signBytes, tx.Inputs)
if err != nil {
return err
}
outTotal, err := validateOutputs(tx.Outputs)
if err != nil {
return err
}
if outTotal > inTotal {
return types.ErrTxInsufficientFunds
}
fee := inTotal - outTotal
fees += fee
// Good! Adjust accounts
adjustByInputs(accounts, tx.Inputs)
adjustByOutputs(accounts, tx.Outputs)
for _, acc := range accounts {
blockCache.UpdateAccount(acc)
}
return nil
case *types.CallTx:
var inAcc, outAcc *account.Account
// Validate input
inAcc = blockCache.GetAccount(tx.Input.Address)
if inAcc == nil {
log.Debug(Fmt("Can't find in account %X", tx.Input.Address))
return types.ErrTxInvalidAddress
}
// pubKey should be present in either "inAcc" or "tx.Input"
if err := checkInputPubKey(inAcc, tx.Input); err != nil {
log.Debug(Fmt("Can't find pubkey for %X", tx.Input.Address))
return err
}
signBytes := account.SignBytes(tx)
err := validateInput(inAcc, signBytes, tx.Input)
if err != nil {
log.Debug(Fmt("validateInput failed on %X:", tx.Input.Address))
return err
}
if tx.Input.Amount < tx.Fee {
log.Debug(Fmt("Sender did not send enough to cover the fee %X", tx.Input.Address))
return types.ErrTxInsufficientFunds
}
createAccount := len(tx.Address) == 0
if !createAccount {
// Validate output
if len(tx.Address) != 20 {
log.Debug(Fmt("Destination address is not 20 bytes %X", tx.Address))
return types.ErrTxInvalidAddress
}
// this may be nil if we are still in mempool and contract was created in same block as this tx
// but that's fine, because the account will be created properly when the create tx runs in the block
// and then this won't return nil. otherwise, we take their fee
outAcc = blockCache.GetAccount(tx.Address)
}
log.Debug(Fmt("Out account: %v", outAcc))
// Good!
value := tx.Input.Amount - tx.Fee
inAcc.Sequence += 1
if runCall {
var (
gas uint64 = tx.GasLimit
err error = nil
caller *vm.Account = toVMAccount(inAcc)
callee *vm.Account = nil
code []byte = nil
txCache = NewTxCache(blockCache)
params = vm.Params{
BlockHeight: uint64(_s.LastBlockHeight),
BlockHash: RightPadWord256(_s.LastBlockHash),
BlockTime: _s.LastBlockTime.Unix(),
GasLimit: 10000000,
}
)
// Maybe create a new callee account if
// this transaction is creating a new contract.
if !createAccount {
if outAcc == nil {
// take fees (sorry pal)
inAcc.Balance -= tx.Fee
blockCache.UpdateAccount(inAcc)
log.Debug(Fmt("Cannot find destination address %X. Deducting fee from caller", tx.Address))
return types.ErrTxInvalidAddress
}
callee = toVMAccount(outAcc)
code = callee.Code
log.Debug(Fmt("Calling contract %X with code %X", callee.Address, callee.Code))
} else {
callee = txCache.CreateAccount(caller)
log.Debug(Fmt("Created new account %X", callee.Address))
code = tx.Data
}
log.Debug(Fmt("Code for this contract: %X", code))
txCache.UpdateAccount(caller) // because we adjusted by input above, and bumped nonce maybe.
txCache.UpdateAccount(callee) // because we adjusted by input above.
vmach := vm.NewVM(txCache, params, caller.Address)
// NOTE: Call() transfers the value from caller to callee iff call succeeds.
ret, err := vmach.Call(caller, callee, code, tx.Data, value, &gas)
if err != nil {
// Failure. Charge the gas fee. The 'value' was otherwise not transferred.
log.Debug(Fmt("Error on execution: %v", err))
inAcc.Balance -= tx.Fee
blockCache.UpdateAccount(inAcc)
// Throw away 'txCache' which holds incomplete updates (don't sync it).
} else {
log.Debug("Successful execution")
// Success
if createAccount {
callee.Code = ret
}
txCache.Sync()
}
// Create a receipt from the ret and whether errored.
log.Info("VM call complete", "caller", caller, "callee", callee, "return", ret, "err", err)
} else {
// The mempool does not call txs until
// the proposer determines the order of txs.
// So mempool will skip the actual .Call(),
// and only deduct from the caller's balance.
inAcc.Balance -= value
if createAccount {
inAcc.Sequence += 1
}
blockCache.UpdateAccount(inAcc)
}
return nil
case *types.BondTx:
valInfo := blockCache.State().GetValidatorInfo(tx.PubKey.Address())
if valInfo != nil {
// TODO: In the future, check that the validator wasn't destroyed,
// add funds, merge UnbondTo outputs, and unbond validator.
return errors.New("Adding coins to existing validators not yet supported")
}
accounts, err := getOrMakeAccounts(blockCache, tx.Inputs, nil)
if err != nil {
return err
}
signBytes := account.SignBytes(tx)
inTotal, err := validateInputs(accounts, signBytes, tx.Inputs)
if err != nil {
return err
}
if err := tx.PubKey.ValidateBasic(); err != nil {
return err
}
outTotal, err := validateOutputs(tx.UnbondTo)
if err != nil {
return err
}
if outTotal > inTotal {
return types.ErrTxInsufficientFunds
}
fee := inTotal - outTotal
fees += fee
// Good! Adjust accounts
adjustByInputs(accounts, tx.Inputs)
for _, acc := range accounts {
blockCache.UpdateAccount(acc)
}
// Add ValidatorInfo
_s.SetValidatorInfo(&ValidatorInfo{
Address: tx.PubKey.Address(),
PubKey: tx.PubKey,
UnbondTo: tx.UnbondTo,
FirstBondHeight: _s.LastBlockHeight + 1,
FirstBondAmount: outTotal,
})
// Add Validator
added := _s.BondedValidators.Add(&Validator{
Address: tx.PubKey.Address(),
PubKey: tx.PubKey,
BondHeight: _s.LastBlockHeight + 1,
VotingPower: outTotal,
Accum: 0,
})
if !added {
panic("Failed to add validator")
}
return nil
case *types.UnbondTx:
// The validator must be active
_, val := _s.BondedValidators.GetByAddress(tx.Address)
if val == nil {
return types.ErrTxInvalidAddress
}
// Verify the signature
signBytes := account.SignBytes(tx)
if !val.PubKey.VerifyBytes(signBytes, tx.Signature) {
return types.ErrTxInvalidSignature
}
// tx.Height must be greater than val.LastCommitHeight
if tx.Height <= val.LastCommitHeight {
return errors.New("Invalid unbond height")
}
// Good!
_s.unbondValidator(val)
return nil
case *types.RebondTx:
// The validator must be inactive
_, val := _s.UnbondingValidators.GetByAddress(tx.Address)
if val == nil {
return types.ErrTxInvalidAddress
}
// Verify the signature
signBytes := account.SignBytes(tx)
if !val.PubKey.VerifyBytes(signBytes, tx.Signature) {
return types.ErrTxInvalidSignature
}
// tx.Height must be equal to the next height
if tx.Height != _s.LastBlockHeight+1 {
return errors.New(Fmt("Invalid rebond height. Expected %v, got %v", _s.LastBlockHeight+1, tx.Height))
}
// Good!
_s.rebondValidator(val)
return nil
case *types.DupeoutTx:
// Verify the signatures
_, accused := _s.BondedValidators.GetByAddress(tx.Address)
if accused == nil {
_, accused = _s.UnbondingValidators.GetByAddress(tx.Address)
if accused == nil {
return types.ErrTxInvalidAddress
}
}
voteASignBytes := account.SignBytes(&tx.VoteA)
voteBSignBytes := account.SignBytes(&tx.VoteB)
if !accused.PubKey.VerifyBytes(voteASignBytes, tx.VoteA.Signature) ||
!accused.PubKey.VerifyBytes(voteBSignBytes, tx.VoteB.Signature) {
return types.ErrTxInvalidSignature
}
// Verify equivocation
// TODO: in the future, just require one vote from a previous height that
// doesn't exist on this chain.
if tx.VoteA.Height != tx.VoteB.Height {
return errors.New("DupeoutTx heights don't match")
}
if tx.VoteA.Type == types.VoteTypeCommit && tx.VoteA.Round < tx.VoteB.Round {
// Check special case (not an error, validator must be slashed!)
// Validators should not sign another vote after committing.
} else if tx.VoteB.Type == types.VoteTypeCommit && tx.VoteB.Round < tx.VoteA.Round {
// We need to check both orderings of the votes
} else {
if tx.VoteA.Round != tx.VoteB.Round {
return errors.New("DupeoutTx rounds don't match")
}
if tx.VoteA.Type != tx.VoteB.Type {
return errors.New("DupeoutTx types don't match")
}
if bytes.Equal(tx.VoteA.BlockHash, tx.VoteB.BlockHash) {
return errors.New("DupeoutTx blockhashes shouldn't match")
}
}
// Good! (Bad validator!)
_s.destroyValidator(accused)
return nil
default:
panic("Unknown Tx type")
}
}

View File

@ -2,17 +2,14 @@ package state
import ( import (
"bytes" "bytes"
"errors"
"fmt" "fmt"
"time" "time"
"github.com/tendermint/tendermint/account" "github.com/tendermint/tendermint/account"
"github.com/tendermint/tendermint/binary" "github.com/tendermint/tendermint/binary"
. "github.com/tendermint/tendermint/common"
dbm "github.com/tendermint/tendermint/db" dbm "github.com/tendermint/tendermint/db"
"github.com/tendermint/tendermint/merkle" "github.com/tendermint/tendermint/merkle"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/vm"
) )
var ( var (
@ -25,17 +22,6 @@ var (
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
type InvalidTxError struct {
Tx types.Tx
Reason error
}
func (txErr InvalidTxError) Error() string {
return fmt.Sprintf("Invalid tx: [%v] reason: [%v]", txErr.Tx, txErr.Reason)
}
//-----------------------------------------------------------------------------
// NOTE: not goroutine-safe. // NOTE: not goroutine-safe.
type State struct { type State struct {
DB dbm.DB DB dbm.DB
@ -78,7 +64,6 @@ func LoadState(db dbm.DB) *State {
return s return s
} }
// Save this state into the db.
func (s *State) Save() { func (s *State) Save() {
s.accounts.Save() s.accounts.Save()
s.validatorInfos.Save() s.validatorInfos.Save()
@ -98,6 +83,9 @@ func (s *State) Save() {
s.DB.Set(stateKey, buf.Bytes()) s.DB.Set(stateKey, buf.Bytes())
} }
// CONTRACT:
// Copy() is a cheap way to take a snapshot,
// as if State were copied by value.
func (s *State) Copy() *State { func (s *State) Copy() *State {
return &State{ return &State{
DB: s.DB, DB: s.DB,
@ -113,437 +101,81 @@ func (s *State) Copy() *State {
} }
} }
// The accounts from the TxInputs must either already have // Returns a hash that represents the state data, excluding Last*
// account.PubKey.(type) != PubKeyNil, (it must be known), func (s *State) Hash() []byte {
// or it must be specified in the TxInput. If redeclared, hashables := []merkle.Hashable{
// the TxInput is modified and input.PubKey set to PubKeyNil. s.BondedValidators,
func (s *State) GetOrMakeAccounts(ins []*types.TxInput, outs []*types.TxOutput) (map[string]*account.Account, error) { s.UnbondingValidators,
accounts := map[string]*account.Account{} s.accounts,
for _, in := range ins { s.validatorInfos,
// Account shouldn't be duplicated
if _, ok := accounts[string(in.Address)]; ok {
return nil, types.ErrTxDuplicateAddress
}
acc := s.GetAccount(in.Address)
if acc == nil {
return nil, types.ErrTxInvalidAddress
}
// PubKey should be present in either "account" or "in"
if err := checkInputPubKey(acc, in); err != nil {
return nil, err
}
accounts[string(in.Address)] = acc
} }
for _, out := range outs { return merkle.HashFromHashables(hashables)
// Account shouldn't be duplicated
if _, ok := accounts[string(out.Address)]; ok {
return nil, types.ErrTxDuplicateAddress
}
acc := s.GetAccount(out.Address)
// output account may be nil (new)
if acc == nil {
acc = &account.Account{
Address: out.Address,
PubKey: account.PubKeyNil{},
Sequence: 0,
Balance: 0,
}
}
accounts[string(out.Address)] = acc
}
return accounts, nil
} }
func checkInputPubKey(acc *account.Account, in *types.TxInput) error { // Mutates the block in place and updates it with new state hash.
if _, isNil := acc.PubKey.(account.PubKeyNil); isNil { func (s *State) SetBlockStateHash(block *types.Block) error {
if _, isNil := in.PubKey.(account.PubKeyNil); isNil { sCopy := s.Copy()
return types.ErrTxUnknownPubKey err := execBlock(sCopy, block, types.PartSetHeader{})
} if err != nil {
if !bytes.Equal(in.PubKey.Address(), acc.Address) {
return types.ErrTxInvalidPubKey
}
acc.PubKey = in.PubKey
} else {
in.PubKey = account.PubKeyNil{}
}
return nil
}
func (s *State) ValidateInputs(accounts map[string]*account.Account, signBytes []byte, ins []*types.TxInput) (total uint64, err error) {
for _, in := range ins {
acc := accounts[string(in.Address)]
if acc == nil {
panic("ValidateInputs() expects account in accounts")
}
err = s.ValidateInput(acc, signBytes, in)
if err != nil {
return
}
// Good. Add amount to total
total += in.Amount
}
return total, nil
}
func (s *State) ValidateInput(acc *account.Account, signBytes []byte, in *types.TxInput) (err error) {
// Check TxInput basic
if err := in.ValidateBasic(); err != nil {
return err return err
} }
// Check signatures // Set block.StateHash
if !acc.PubKey.VerifyBytes(signBytes, in.Signature) { block.StateHash = sCopy.Hash()
return types.ErrTxInvalidSignature
}
// Check sequences
if acc.Sequence+1 != in.Sequence {
return types.ErrTxInvalidSequence{
Got: uint64(in.Sequence),
Expected: uint64(acc.Sequence + 1),
}
}
// Check amount
if acc.Balance < in.Amount {
return types.ErrTxInsufficientFunds
}
return nil return nil
} }
func (s *State) ValidateOutputs(outs []*types.TxOutput) (total uint64, err error) { //-------------------------------------
for _, out := range outs { // State.accounts
// Check TxOutput basic
if err := out.ValidateBasic(); err != nil { // The returned Account is a copy, so mutating it
return 0, err // has no side effects.
} // Implements Statelike
// Good. Add amount to total func (s *State) GetAccount(address []byte) *account.Account {
total += out.Amount _, acc := s.accounts.Get(address)
if acc == nil {
return nil
} }
return total, nil return acc.(*account.Account).Copy()
} }
func (s *State) AdjustByInputs(accounts map[string]*account.Account, ins []*types.TxInput) { // The account is copied before setting, so mutating it
for _, in := range ins { // afterwards has no side effects.
acc := accounts[string(in.Address)] // Implements Statelike
if acc == nil { func (s *State) UpdateAccount(account *account.Account) bool {
panic("AdjustByInputs() expects account in accounts") return s.accounts.Set(account.Address, account.Copy())
}
if acc.Balance < in.Amount {
panic("AdjustByInputs() expects sufficient funds")
}
acc.Balance -= in.Amount
acc.Sequence += 1
}
} }
func (s *State) AdjustByOutputs(accounts map[string]*account.Account, outs []*types.TxOutput) { // Implements Statelike
for _, out := range outs { func (s *State) RemoveAccount(address []byte) bool {
acc := accounts[string(out.Address)] _, removed := s.accounts.Remove(address)
if acc == nil { return removed
panic("AdjustByOutputs() expects account in accounts")
}
acc.Balance += out.Amount
}
} }
// If the tx is invalid, an error will be returned. // The returned Account is a copy, so mutating it
// Unlike AppendBlock(), state will not be altered. // has no side effects.
func (s *State) ExecTx(tx_ types.Tx, runCall bool) error { func (s *State) GetAccounts() merkle.Tree {
return s.accounts.Copy()
}
// TODO: do something with fees // State.accounts
fees := uint64(0) //-------------------------------------
// State.validators
// Exec tx // The returned ValidatorInfo is a copy, so mutating it
switch tx := tx_.(type) { // has no side effects.
case *types.SendTx: func (s *State) GetValidatorInfo(address []byte) *ValidatorInfo {
accounts, err := s.GetOrMakeAccounts(tx.Inputs, tx.Outputs) _, valInfo := s.validatorInfos.Get(address)
if err != nil { if valInfo == nil {
return err
}
signBytes := account.SignBytes(tx)
inTotal, err := s.ValidateInputs(accounts, signBytes, tx.Inputs)
if err != nil {
return err
}
outTotal, err := s.ValidateOutputs(tx.Outputs)
if err != nil {
return err
}
if outTotal > inTotal {
return types.ErrTxInsufficientFunds
}
fee := inTotal - outTotal
fees += fee
// Good! Adjust accounts
s.AdjustByInputs(accounts, tx.Inputs)
s.AdjustByOutputs(accounts, tx.Outputs)
s.UpdateAccounts(accounts)
return nil return nil
case *types.CallTx:
var inAcc, outAcc *account.Account
// Validate input
inAcc = s.GetAccount(tx.Input.Address)
if inAcc == nil {
log.Debug(Fmt("Can't find in account %X", tx.Input.Address))
return types.ErrTxInvalidAddress
}
// pubKey should be present in either "inAcc" or "tx.Input"
if err := checkInputPubKey(inAcc, tx.Input); err != nil {
log.Debug(Fmt("Can't find pubkey for %X", tx.Input.Address))
return err
}
signBytes := account.SignBytes(tx)
err := s.ValidateInput(inAcc, signBytes, tx.Input)
if err != nil {
log.Debug(Fmt("ValidateInput failed on %X:", tx.Input.Address))
return err
}
if tx.Input.Amount < tx.Fee {
log.Debug(Fmt("Sender did not send enough to cover the fee %X", tx.Input.Address))
return types.ErrTxInsufficientFunds
}
createAccount := len(tx.Address) == 0
if !createAccount {
// Validate output
if len(tx.Address) != 20 {
log.Debug(Fmt("Destination address is not 20 bytes %X", tx.Address))
return types.ErrTxInvalidAddress
}
// this may be nil if we are still in mempool and contract was created in same block as this tx
// but that's fine, because the account will be created properly when the create tx runs in the block
// and then this won't return nil. otherwise, we take their fee
outAcc = s.GetAccount(tx.Address)
}
log.Debug(Fmt("Out account: %v", outAcc))
// Good!
value := tx.Input.Amount - tx.Fee
inAcc.Sequence += 1
if runCall {
var (
gas uint64 = tx.GasLimit
err error = nil
caller *vm.Account = toVMAccount(inAcc)
callee *vm.Account = nil
code []byte = nil
appState = NewVMAppState(s) // TODO: confusing.
params = vm.Params{
BlockHeight: uint64(s.LastBlockHeight),
BlockHash: vm.BytesToWord(s.LastBlockHash),
BlockTime: s.LastBlockTime.Unix(),
GasLimit: 10000000,
}
)
// Maybe create a new callee account if
// this transaction is creating a new contract.
if !createAccount {
if outAcc == nil {
// take fees (sorry pal)
inAcc.Balance -= tx.Fee
s.UpdateAccount(inAcc)
log.Debug(Fmt("Cannot find destination address %X. Deducting fee from caller", tx.Address))
return types.ErrTxInvalidAddress
}
callee = toVMAccount(outAcc)
code = callee.Code
log.Debug(Fmt("Calling contract %X with code %X", callee.Address.Address(), callee.Code))
} else {
callee, err = appState.CreateAccount(caller)
if err != nil {
log.Debug(Fmt("Error creating account"))
return err
}
log.Debug(Fmt("Created new account %X", callee.Address.Address()))
code = tx.Data
}
log.Debug(Fmt("Code for this contract: %X", code))
appState.UpdateAccount(caller) // because we adjusted by input above, and bumped nonce maybe.
appState.UpdateAccount(callee) // because we adjusted by input above.
vmach := vm.NewVM(appState, params, caller.Address)
// NOTE: Call() transfers the value from caller to callee iff call succeeds.
ret, err := vmach.Call(caller, callee, code, tx.Data, value, &gas)
if err != nil {
// Failure. Charge the gas fee. The 'value' was otherwise not transferred.
log.Debug(Fmt("Error on execution: %v", err))
inAcc.Balance -= tx.Fee
s.UpdateAccount(inAcc)
// Throw away 'appState' which holds incomplete updates (don't sync it).
} else {
log.Debug("Successful execution")
// Success
if createAccount {
callee.Code = ret
}
appState.Sync()
}
// Create a receipt from the ret and whether errored.
log.Info("VM call complete", "caller", caller, "callee", callee, "return", ret, "err", err)
} else {
// The mempool does not call txs until
// the proposer determines the order of txs.
// So mempool will skip the actual .Call(),
// and only deduct from the caller's balance.
inAcc.Balance -= value
if createAccount {
inAcc.Sequence += 1
}
s.UpdateAccount(inAcc)
}
return nil
case *types.BondTx:
valInfo := s.GetValidatorInfo(tx.PubKey.Address())
if valInfo != nil {
// TODO: In the future, check that the validator wasn't destroyed,
// add funds, merge UnbondTo outputs, and unbond validator.
return errors.New("Adding coins to existing validators not yet supported")
}
accounts, err := s.GetOrMakeAccounts(tx.Inputs, nil)
if err != nil {
return err
}
signBytes := account.SignBytes(tx)
inTotal, err := s.ValidateInputs(accounts, signBytes, tx.Inputs)
if err != nil {
return err
}
if err := tx.PubKey.ValidateBasic(); err != nil {
return err
}
outTotal, err := s.ValidateOutputs(tx.UnbondTo)
if err != nil {
return err
}
if outTotal > inTotal {
return types.ErrTxInsufficientFunds
}
fee := inTotal - outTotal
fees += fee
// Good! Adjust accounts
s.AdjustByInputs(accounts, tx.Inputs)
s.UpdateAccounts(accounts)
// Add ValidatorInfo
s.SetValidatorInfo(&ValidatorInfo{
Address: tx.PubKey.Address(),
PubKey: tx.PubKey,
UnbondTo: tx.UnbondTo,
FirstBondHeight: s.LastBlockHeight + 1,
FirstBondAmount: outTotal,
})
// Add Validator
added := s.BondedValidators.Add(&Validator{
Address: tx.PubKey.Address(),
PubKey: tx.PubKey,
BondHeight: s.LastBlockHeight + 1,
VotingPower: outTotal,
Accum: 0,
})
if !added {
panic("Failed to add validator")
}
return nil
case *types.UnbondTx:
// The validator must be active
_, val := s.BondedValidators.GetByAddress(tx.Address)
if val == nil {
return types.ErrTxInvalidAddress
}
// Verify the signature
signBytes := account.SignBytes(tx)
if !val.PubKey.VerifyBytes(signBytes, tx.Signature) {
return types.ErrTxInvalidSignature
}
// tx.Height must be greater than val.LastCommitHeight
if tx.Height <= val.LastCommitHeight {
return errors.New("Invalid unbond height")
}
// Good!
s.unbondValidator(val)
return nil
case *types.RebondTx:
// The validator must be inactive
_, val := s.UnbondingValidators.GetByAddress(tx.Address)
if val == nil {
return types.ErrTxInvalidAddress
}
// Verify the signature
signBytes := account.SignBytes(tx)
if !val.PubKey.VerifyBytes(signBytes, tx.Signature) {
return types.ErrTxInvalidSignature
}
// tx.Height must be equal to the next height
if tx.Height != s.LastBlockHeight+1 {
return errors.New(Fmt("Invalid rebond height. Expected %v, got %v", s.LastBlockHeight+1, tx.Height))
}
// Good!
s.rebondValidator(val)
return nil
case *types.DupeoutTx:
// Verify the signatures
_, accused := s.BondedValidators.GetByAddress(tx.Address)
if accused == nil {
_, accused = s.UnbondingValidators.GetByAddress(tx.Address)
if accused == nil {
return types.ErrTxInvalidAddress
}
}
voteASignBytes := account.SignBytes(&tx.VoteA)
voteBSignBytes := account.SignBytes(&tx.VoteB)
if !accused.PubKey.VerifyBytes(voteASignBytes, tx.VoteA.Signature) ||
!accused.PubKey.VerifyBytes(voteBSignBytes, tx.VoteB.Signature) {
return types.ErrTxInvalidSignature
}
// Verify equivocation
// TODO: in the future, just require one vote from a previous height that
// doesn't exist on this chain.
if tx.VoteA.Height != tx.VoteB.Height {
return errors.New("DupeoutTx heights don't match")
}
if tx.VoteA.Type == types.VoteTypeCommit && tx.VoteA.Round < tx.VoteB.Round {
// Check special case (not an error, validator must be slashed!)
// Validators should not sign another vote after committing.
} else if tx.VoteB.Type == types.VoteTypeCommit && tx.VoteB.Round < tx.VoteA.Round {
// We need to check both orderings of the votes
} else {
if tx.VoteA.Round != tx.VoteB.Round {
return errors.New("DupeoutTx rounds don't match")
}
if tx.VoteA.Type != tx.VoteB.Type {
return errors.New("DupeoutTx types don't match")
}
if bytes.Equal(tx.VoteA.BlockHash, tx.VoteB.BlockHash) {
return errors.New("DupeoutTx blockhashes shouldn't match")
}
}
// Good! (Bad validator!)
s.destroyValidator(accused)
return nil
default:
panic("Unknown Tx type")
} }
return valInfo.(*ValidatorInfo).Copy()
}
// Returns false if new, true if updated.
// The valInfo is copied before setting, so mutating it
// afterwards has no side effects.
func (s *State) SetValidatorInfo(valInfo *ValidatorInfo) (updated bool) {
return s.validatorInfos.Set(valInfo.Address, valInfo.Copy())
} }
func (s *State) unbondValidator(val *Validator) { func (s *State) unbondValidator(val *Validator) {
@ -582,12 +214,14 @@ func (s *State) releaseValidator(val *Validator) {
s.SetValidatorInfo(valInfo) s.SetValidatorInfo(valInfo)
// Send coins back to UnbondTo outputs // Send coins back to UnbondTo outputs
accounts, err := s.GetOrMakeAccounts(nil, valInfo.UnbondTo) accounts, err := getOrMakeAccounts(s, nil, valInfo.UnbondTo)
if err != nil { if err != nil {
panic("Couldn't get or make unbondTo accounts") panic("Couldn't get or make unbondTo accounts")
} }
s.AdjustByOutputs(accounts, valInfo.UnbondTo) adjustByOutputs(accounts, valInfo.UnbondTo)
s.UpdateAccounts(accounts) for _, acc := range accounts {
s.UpdateAccount(acc)
}
// Remove validator from UnbondingValidators // Remove validator from UnbondingValidators
_, removed := s.UnbondingValidators.Remove(val.Address) _, removed := s.UnbondingValidators.Remove(val.Address)
@ -617,219 +251,26 @@ func (s *State) destroyValidator(val *Validator) {
} }
// NOTE: If an error occurs during block execution, state will be left // State.validators
// at an invalid state. Copy the state before calling AppendBlock! //-------------------------------------
func (s *State) AppendBlock(block *types.Block, blockPartsHeader types.PartSetHeader) error { // State.storage
err := s.appendBlock(block, blockPartsHeader)
if err != nil { func (s *State) LoadStorage(hash []byte) (storage merkle.Tree) {
return err storage = merkle.NewIAVLTree(binary.BasicCodec, binary.BasicCodec, 1024, s.DB)
} storage.Load(hash)
// State.Hash should match block.StateHash return storage
stateHash := s.Hash()
if !bytes.Equal(stateHash, block.StateHash) {
return Errorf("Invalid state hash. Expected %X, got %X",
stateHash, block.StateHash)
}
return nil
} }
func (s *State) SetBlockStateHash(block *types.Block) error { // State.storage
sCopy := s.Copy() //-------------------------------------
err := sCopy.appendBlock(block, types.PartSetHeader{})
if err != nil { //-----------------------------------------------------------------------------
return err
} type InvalidTxError struct {
// Set block.StateHash Tx types.Tx
block.StateHash = sCopy.Hash() Reason error
return nil
} }
// Appends the block, does not check block.StateHash func (txErr InvalidTxError) Error() string {
// NOTE: If an error occurs during block execution, state will be left return fmt.Sprintf("Invalid tx: [%v] reason: [%v]", txErr.Tx, txErr.Reason)
// at an invalid state. Copy the state before calling appendBlock!
func (s *State) appendBlock(block *types.Block, blockPartsHeader types.PartSetHeader) error {
// Basic block validation.
err := block.ValidateBasic(s.LastBlockHeight, s.LastBlockHash, s.LastBlockParts, s.LastBlockTime)
if err != nil {
return err
}
// Validate block Validation.
if block.Height == 1 {
if len(block.Validation.Commits) != 0 {
return errors.New("Block at height 1 (first block) should have no Validation commits")
}
} else {
if uint(len(block.Validation.Commits)) != s.LastBondedValidators.Size() {
return errors.New(Fmt("Invalid block validation size. Expected %v, got %v",
s.LastBondedValidators.Size(), len(block.Validation.Commits)))
}
var sumVotingPower uint64
s.LastBondedValidators.Iterate(func(index uint, val *Validator) bool {
commit := block.Validation.Commits[index]
if commit.IsZero() {
return false
} else {
vote := &types.Vote{
Height: block.Height - 1,
Round: commit.Round,
Type: types.VoteTypeCommit,
BlockHash: block.LastBlockHash,
BlockParts: block.LastBlockParts,
}
if val.PubKey.VerifyBytes(account.SignBytes(vote), commit.Signature) {
sumVotingPower += val.VotingPower
return false
} else {
log.Warn(Fmt("Invalid validation signature.\nval: %v\nvote: %v", val, vote))
err = errors.New("Invalid validation signature")
return true
}
}
})
if err != nil {
return err
}
if sumVotingPower <= s.LastBondedValidators.TotalVotingPower()*2/3 {
return errors.New("Insufficient validation voting power")
}
}
// Update Validator.LastCommitHeight as necessary.
for i, commit := range block.Validation.Commits {
if commit.IsZero() {
continue
}
_, val := s.LastBondedValidators.GetByIndex(uint(i))
if val == nil {
panic(Fmt("Failed to fetch validator at index %v", i))
}
if _, val_ := s.BondedValidators.GetByAddress(val.Address); val_ != nil {
val_.LastCommitHeight = block.Height - 1
updated := s.BondedValidators.Update(val_)
if !updated {
panic("Failed to update bonded validator LastCommitHeight")
}
} else if _, val_ := s.UnbondingValidators.GetByAddress(val.Address); val_ != nil {
val_.LastCommitHeight = block.Height - 1
updated := s.UnbondingValidators.Update(val_)
if !updated {
panic("Failed to update unbonding validator LastCommitHeight")
}
} else {
panic("Could not find validator")
}
}
// Remember LastBondedValidators
s.LastBondedValidators = s.BondedValidators.Copy()
// Commit each tx
for _, tx := range block.Data.Txs {
err := s.ExecTx(tx, true)
if err != nil {
return InvalidTxError{tx, err}
}
}
// If any unbonding periods are over,
// reward account with bonded coins.
toRelease := []*Validator{}
s.UnbondingValidators.Iterate(func(index uint, val *Validator) bool {
if val.UnbondHeight+unbondingPeriodBlocks < block.Height {
toRelease = append(toRelease, val)
}
return false
})
for _, val := range toRelease {
s.releaseValidator(val)
}
// If any validators haven't signed in a while,
// unbond them, they have timed out.
toTimeout := []*Validator{}
s.BondedValidators.Iterate(func(index uint, val *Validator) bool {
lastActivityHeight := MaxUint(val.BondHeight, val.LastCommitHeight)
if lastActivityHeight+validatorTimeoutBlocks < block.Height {
log.Info("Validator timeout", "validator", val, "height", block.Height)
toTimeout = append(toTimeout, val)
}
return false
})
for _, val := range toTimeout {
s.unbondValidator(val)
}
// Increment validator AccumPowers
s.BondedValidators.IncrementAccum(1)
s.LastBlockHeight = block.Height
s.LastBlockHash = block.Hash()
s.LastBlockParts = blockPartsHeader
s.LastBlockTime = block.Time
return nil
}
// The returned Account is a copy, so mutating it
// has no side effects.
func (s *State) GetAccount(address []byte) *account.Account {
_, acc := s.accounts.Get(address)
if acc == nil {
return nil
}
return acc.(*account.Account).Copy()
}
// The returned Account is a copy, so mutating it
// has no side effects.
func (s *State) GetAccounts() merkle.Tree {
return s.accounts.Copy()
}
// The account is copied before setting, so mutating it
// afterwards has no side effects.
func (s *State) UpdateAccount(account *account.Account) {
s.accounts.Set(account.Address, account.Copy())
}
// The accounts are copied before setting, so mutating it
// afterwards has no side effects.
func (s *State) UpdateAccounts(accounts map[string]*account.Account) {
for _, acc := range accounts {
s.accounts.Set(acc.Address, acc.Copy())
}
}
func (s *State) RemoveAccount(address []byte) bool {
_, removed := s.accounts.Remove(address)
return removed
}
// The returned ValidatorInfo is a copy, so mutating it
// has no side effects.
func (s *State) GetValidatorInfo(address []byte) *ValidatorInfo {
_, valInfo := s.validatorInfos.Get(address)
if valInfo == nil {
return nil
}
return valInfo.(*ValidatorInfo).Copy()
}
// Returns false if new, true if updated.
// The valInfo is copied before setting, so mutating it
// afterwards has no side effects.
func (s *State) SetValidatorInfo(valInfo *ValidatorInfo) (updated bool) {
return s.validatorInfos.Set(valInfo.Address, valInfo.Copy())
}
// Returns a hash that represents the state data,
// excluding Last*
func (s *State) Hash() []byte {
hashables := []merkle.Hashable{
s.BondedValidators,
s.UnbondingValidators,
s.accounts,
s.validatorInfos,
}
return merkle.HashFromHashables(hashables)
} }

View File

@ -2,7 +2,6 @@ package state
import ( import (
"github.com/tendermint/tendermint/account" "github.com/tendermint/tendermint/account"
"github.com/tendermint/tendermint/binary"
"github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
@ -11,6 +10,17 @@ import (
"time" "time"
) )
func execTxWithState(state *State, tx types.Tx, runCall bool) error {
cache := NewBlockCache(state)
err := ExecTx(cache, tx, runCall)
if err != nil {
return err
} else {
cache.Sync()
return nil
}
}
func TestCopyState(t *testing.T) { func TestCopyState(t *testing.T) {
// Generate a random state // Generate a random state
s0, privAccounts, _ := RandGenesisState(10, true, 1000, 5, true, 1000) s0, privAccounts, _ := RandGenesisState(10, true, 1000, 5, true, 1000)
@ -91,10 +101,10 @@ func TestGenesisSaveLoad(t *testing.T) {
// Make complete block and blockParts // Make complete block and blockParts
block := makeBlock(t, s0, nil, nil) block := makeBlock(t, s0, nil, nil)
blockParts := types.NewPartSetFromData(binary.BinaryBytes(block)) blockParts := block.MakePartSet()
// Now append the block to s0. // Now append the block to s0.
err := s0.AppendBlock(block, blockParts.Header()) err := ExecBlock(s0, block, blockParts.Header())
if err != nil { if err != nil {
t.Error("Error appending initial block:", err) t.Error("Error appending initial block:", err)
} }
@ -183,7 +193,7 @@ func TestTxSequence(t *testing.T) {
tx := makeSendTx(sequence) tx := makeSendTx(sequence)
tx.Inputs[0].Signature = privAccounts[0].Sign(tx) tx.Inputs[0].Signature = privAccounts[0].Sign(tx)
stateCopy := state.Copy() stateCopy := state.Copy()
err := stateCopy.ExecTx(tx, true) err := execTxWithState(stateCopy, tx, true)
if i == 1 { if i == 1 {
// Sequence is good. // Sequence is good.
if err != nil { if err != nil {
@ -242,7 +252,7 @@ func TestTxs(t *testing.T) {
} }
tx.Inputs[0].Signature = privAccounts[0].Sign(tx) tx.Inputs[0].Signature = privAccounts[0].Sign(tx)
err := state.ExecTx(tx, true) err := execTxWithState(state, tx, true)
if err != nil { if err != nil {
t.Errorf("Got error in executing send transaction, %v", err) t.Errorf("Got error in executing send transaction, %v", err)
} }
@ -279,7 +289,7 @@ func TestTxs(t *testing.T) {
}, },
} }
tx.Inputs[0].Signature = privAccounts[0].Sign(tx) tx.Inputs[0].Signature = privAccounts[0].Sign(tx)
err := state.ExecTx(tx, true) err := execTxWithState(state, tx, true)
if err != nil { if err != nil {
t.Errorf("Got error in executing bond transaction, %v", err) t.Errorf("Got error in executing bond transaction, %v", err)
} }
@ -338,7 +348,7 @@ func TestAddValidator(t *testing.T) {
// Make complete block and blockParts // Make complete block and blockParts
block0 := makeBlock(t, s0, nil, []types.Tx{bondTx}) block0 := makeBlock(t, s0, nil, []types.Tx{bondTx})
block0Parts := types.NewPartSetFromData(binary.BinaryBytes(block0)) block0Parts := block0.MakePartSet()
// Sanity check // Sanity check
if s0.BondedValidators.Size() != 1 { if s0.BondedValidators.Size() != 1 {
@ -346,7 +356,7 @@ func TestAddValidator(t *testing.T) {
} }
// Now append the block to s0. // Now append the block to s0.
err := s0.AppendBlock(block0, block0Parts.Header()) err := ExecBlock(s0, block0, block0Parts.Header())
if err != nil { if err != nil {
t.Error("Error appending initial block:", err) t.Error("Error appending initial block:", err)
} }
@ -379,8 +389,8 @@ func TestAddValidator(t *testing.T) {
}, },
}, nil, }, nil,
) )
block1Parts := types.NewPartSetFromData(binary.BinaryBytes(block1)) block1Parts := block1.MakePartSet()
err = s0.AppendBlock(block1, block1Parts.Header()) err = ExecBlock(s0, block1, block1Parts.Header())
if err != nil { if err != nil {
t.Error("Error appending secondary block:", err) t.Error("Error appending secondary block:", err)
} }

191
state/tx_cache.go Normal file
View File

@ -0,0 +1,191 @@
package state
import (
ac "github.com/tendermint/tendermint/account"
. "github.com/tendermint/tendermint/common"
"github.com/tendermint/tendermint/vm"
"github.com/tendermint/tendermint/vm/sha3"
)
type TxCache struct {
backend *BlockCache
accounts map[Word256]vmAccountInfo
storages map[Tuple256]Word256
logs []*vm.Log
}
func NewTxCache(backend *BlockCache) *TxCache {
return &TxCache{
backend: backend,
accounts: make(map[Word256]vmAccountInfo),
storages: make(map[Tuple256]Word256),
logs: make([]*vm.Log, 0),
}
}
//-------------------------------------
// TxCache.account
func (cache *TxCache) GetAccount(addr Word256) *vm.Account {
acc, removed := vmUnpack(cache.accounts[addr])
if removed {
return nil
} else {
return acc
}
}
func (cache *TxCache) UpdateAccount(acc *vm.Account) {
addr := acc.Address
// SANITY CHECK
_, removed := vmUnpack(cache.accounts[addr])
if removed {
panic("UpdateAccount on a removed account")
}
// SANITY CHECK END
cache.accounts[addr] = vmAccountInfo{acc, false}
}
func (cache *TxCache) RemoveAccount(acc *vm.Account) {
addr := acc.Address
// SANITY CHECK
_, removed := vmUnpack(cache.accounts[addr])
if removed {
panic("RemoveAccount on a removed account")
}
// SANITY CHECK END
cache.accounts[addr] = vmAccountInfo{acc, true}
}
// Creates a 20 byte address and bumps the creator's nonce.
func (cache *TxCache) CreateAccount(creator *vm.Account) *vm.Account {
// Generate an address
nonce := creator.Nonce
creator.Nonce += 1
addr := RightPadWord256(NewContractAddress(creator.Address.Prefix(20), nonce))
// Create account from address.
account, removed := vmUnpack(cache.accounts[addr])
if removed || account == nil {
account = &vm.Account{
Address: addr,
Balance: 0,
Code: nil,
Nonce: 0,
StorageRoot: Zero256,
}
cache.accounts[addr] = vmAccountInfo{account, false}
return account
} else {
panic(Fmt("Could not create account, address already exists: %X", addr))
}
}
// TxCache.account
//-------------------------------------
// TxCache.storage
func (cache *TxCache) GetStorage(addr Word256, key Word256) Word256 {
// Check cache
value, ok := cache.storages[Tuple256{addr, key}]
if ok {
return value
}
// Load from backend
return cache.backend.GetStorage(addr, key)
}
// NOTE: Set value to zero to removed from the trie.
func (cache *TxCache) SetStorage(addr Word256, key Word256, value Word256) {
_, removed := vmUnpack(cache.accounts[addr])
if removed {
panic("SetStorage() on a removed account")
}
cache.storages[Tuple256{addr, key}] = value
}
// TxCache.storage
//-------------------------------------
// These updates do not have to be in deterministic order,
// the backend is responsible for ordering updates.
func (cache *TxCache) Sync() {
// Remove or update storage
for addrKey, value := range cache.storages {
addr, key := Tuple256Split(addrKey)
cache.backend.SetStorage(addr, key, value)
}
// Remove or update accounts
for addr, accInfo := range cache.accounts {
acc, removed := vmUnpack(accInfo)
if removed {
cache.backend.RemoveAccount(addr.Prefix(20))
} else {
cache.backend.UpdateAccount(toStateAccount(acc))
}
}
// TODO support logs, add them to the cache somehow.
}
func (cache *TxCache) AddLog(log *vm.Log) {
cache.logs = append(cache.logs, log)
}
//-----------------------------------------------------------------------------
// Convenience function to return address of new contract
func NewContractAddress(caller []byte, nonce uint64) []byte {
temp := make([]byte, 32+8)
copy(temp, caller)
PutUint64(temp[32:], nonce)
return sha3.Sha3(temp)[:20]
}
// Converts backend.Account to vm.Account struct.
func toVMAccount(acc *ac.Account) *vm.Account {
return &vm.Account{
Address: RightPadWord256(acc.Address),
Balance: acc.Balance,
Code: acc.Code, // This is crazy.
Nonce: uint64(acc.Sequence),
StorageRoot: RightPadWord256(acc.StorageRoot),
Other: acc.PubKey,
}
}
// Converts vm.Account to backend.Account struct.
func toStateAccount(acc *vm.Account) *ac.Account {
pubKey, ok := acc.Other.(ac.PubKey)
if !ok {
pubKey = ac.PubKeyNil{}
}
var storageRoot []byte
if acc.StorageRoot.IsZero() {
storageRoot = nil
} else {
storageRoot = acc.StorageRoot.Bytes()
}
return &ac.Account{
Address: acc.Address.Prefix(20),
PubKey: pubKey,
Balance: acc.Balance,
Code: acc.Code,
Sequence: uint(acc.Nonce),
StorageRoot: storageRoot,
}
}
type vmAccountInfo struct {
account *vm.Account
removed bool
}
func vmUnpack(accInfo vmAccountInfo) (*vm.Account, bool) {
return accInfo.account, accInfo.removed
}

View File

@ -2,12 +2,15 @@ package state
import ( import (
"bytes" "bytes"
"errors"
"fmt" "fmt"
"sort" "sort"
"strings" "strings"
"github.com/tendermint/tendermint/account"
. "github.com/tendermint/tendermint/common" . "github.com/tendermint/tendermint/common"
"github.com/tendermint/tendermint/merkle" "github.com/tendermint/tendermint/merkle"
"github.com/tendermint/tendermint/types"
) )
// ValidatorSet represent a set of *Validator at a given height. // ValidatorSet represent a set of *Validator at a given height.
@ -198,6 +201,50 @@ func (valSet *ValidatorSet) Iterate(fn func(index uint, val *Validator) bool) {
} }
} }
// Verify that +2/3 of the set had signed the given signBytes
func (valSet *ValidatorSet) VerifyValidation(hash []byte, parts types.PartSetHeader, height uint, v *types.Validation) error {
if valSet.Size() != uint(len(v.Commits)) {
return errors.New(Fmt("Invalid validation -- wrong set size: %v vs %v",
valSet.Size(), len(v.Commits)))
}
talliedVotingPower := uint64(0)
seenValidators := map[string]struct{}{}
for idx, commit := range v.Commits {
// may be zero, in which case skip.
if commit.Signature.IsZero() {
continue
}
_, val := valSet.GetByIndex(uint(idx))
commitSignBytes := account.SignBytes(&types.Vote{
Height: height, Round: commit.Round, Type: types.VoteTypeCommit,
BlockHash: hash,
BlockParts: parts,
})
// Validate
if _, seen := seenValidators[string(val.Address)]; seen {
return Errorf("Duplicate validator for commit %v for Validation %v", commit, v)
}
if !val.PubKey.VerifyBytes(commitSignBytes, commit.Signature) {
return Errorf("Invalid signature for commit %v for Validation %v", commit, v)
}
// Tally
seenValidators[string(val.Address)] = struct{}{}
talliedVotingPower += val.VotingPower
}
if talliedVotingPower > valSet.TotalVotingPower()*2/3 {
return nil
} else {
return Errorf("insufficient voting power %v, needed %v",
talliedVotingPower, (valSet.TotalVotingPower()*2/3 + 1))
}
}
func (valSet *ValidatorSet) String() string { func (valSet *ValidatorSet) String() string {
return valSet.StringIndented("") return valSet.StringIndented("")
} }

View File

@ -1,265 +0,0 @@
package state
import (
"bytes"
"sort"
ac "github.com/tendermint/tendermint/account"
"github.com/tendermint/tendermint/binary"
. "github.com/tendermint/tendermint/common"
"github.com/tendermint/tendermint/merkle"
"github.com/tendermint/tendermint/vm"
"github.com/tendermint/tendermint/vm/sha3"
)
// Converts state.Account to vm.Account struct.
func toVMAccount(acc *ac.Account) *vm.Account {
return &vm.Account{
Address: vm.BytesToWord(acc.Address),
Balance: acc.Balance,
Code: acc.Code, // This is crazy.
Nonce: uint64(acc.Sequence),
StorageRoot: vm.BytesToWord(acc.StorageRoot),
Other: acc.PubKey,
}
}
// Converts vm.Account to state.Account struct.
func toStateAccount(acc *vm.Account) *ac.Account {
pubKey, ok := acc.Other.(ac.PubKey)
if !ok {
pubKey = ac.PubKeyNil{}
}
var storageRoot []byte
if acc.StorageRoot.IsZero() {
storageRoot = nil
} else {
storageRoot = acc.StorageRoot.Bytes()
}
return &ac.Account{
Address: acc.Address.Address(),
PubKey: pubKey,
Balance: acc.Balance,
Code: acc.Code,
Sequence: uint(acc.Nonce),
StorageRoot: storageRoot,
}
}
//-----------------------------------------------------------------------------
type AccountInfo struct {
account *vm.Account
deleted bool
}
type VMAppState struct {
state *State
accounts map[string]AccountInfo
storage map[string]vm.Word
logs []*vm.Log
}
func NewVMAppState(state *State) *VMAppState {
return &VMAppState{
state: state,
accounts: make(map[string]AccountInfo),
storage: make(map[string]vm.Word),
logs: make([]*vm.Log, 0),
}
}
func unpack(accInfo AccountInfo) (*vm.Account, bool) {
return accInfo.account, accInfo.deleted
}
func (vas *VMAppState) GetAccount(addr vm.Word) (*vm.Account, error) {
account, deleted := unpack(vas.accounts[addr.String()])
if deleted {
return nil, Errorf("Account was deleted: %X", addr)
} else if account != nil {
return account, nil
} else {
acc := vas.state.GetAccount(addr.Address())
if acc == nil {
return nil, Errorf("Invalid account addr: %X", addr)
}
return toVMAccount(acc), nil
}
}
func (vas *VMAppState) UpdateAccount(account *vm.Account) error {
accountInfo, ok := vas.accounts[account.Address.String()]
if !ok {
vas.accounts[account.Address.String()] = AccountInfo{account, false}
return nil
}
account, deleted := unpack(accountInfo)
if deleted {
return Errorf("Account was deleted: %X", account.Address)
} else {
vas.accounts[account.Address.String()] = AccountInfo{account, false}
return nil
}
}
func (vas *VMAppState) DeleteAccount(account *vm.Account) error {
accountInfo, ok := vas.accounts[account.Address.String()]
if !ok {
vas.accounts[account.Address.String()] = AccountInfo{account, true}
return nil
}
account, deleted := unpack(accountInfo)
if deleted {
return Errorf("Account was already deleted: %X", account.Address)
} else {
vas.accounts[account.Address.String()] = AccountInfo{account, true}
return nil
}
}
// Creates a 20 byte address and bumps the creator's nonce.
func (vas *VMAppState) CreateAccount(creator *vm.Account) (*vm.Account, error) {
// Generate an address
nonce := creator.Nonce
creator.Nonce += 1
addr := vm.RightPadWord(NewContractAddress(creator.Address.Address(), nonce))
// Create account from address.
account, deleted := unpack(vas.accounts[addr.String()])
if deleted || account == nil {
account = &vm.Account{
Address: addr,
Balance: 0,
Code: nil,
Nonce: 0,
StorageRoot: vm.Zero,
}
vas.accounts[addr.String()] = AccountInfo{account, false}
return account, nil
} else {
panic(Fmt("Could not create account, address already exists: %X", addr))
// return nil, Errorf("Account already exists: %X", addr)
}
}
func (vas *VMAppState) GetStorage(addr vm.Word, key vm.Word) (vm.Word, error) {
account, deleted := unpack(vas.accounts[addr.String()])
if account == nil {
return vm.Zero, Errorf("Invalid account addr: %X", addr)
} else if deleted {
return vm.Zero, Errorf("Account was deleted: %X", addr)
}
value, ok := vas.storage[addr.String()+key.String()]
if ok {
return value, nil
} else {
return vm.Zero, nil
}
}
// NOTE: Set value to zero to delete from the trie.
func (vas *VMAppState) SetStorage(addr vm.Word, key vm.Word, value vm.Word) (bool, error) {
account, deleted := unpack(vas.accounts[addr.String()])
if account == nil {
return false, Errorf("Invalid account addr: %X", addr)
} else if deleted {
return false, Errorf("Account was deleted: %X", addr)
}
_, ok := vas.storage[addr.String()+key.String()]
vas.storage[addr.String()+key.String()] = value
return ok, nil
}
// CONTRACT the updates are in deterministic order.
func (vas *VMAppState) Sync() {
// Determine order for accounts
addrStrs := []string{}
for addrStr := range vas.accounts {
addrStrs = append(addrStrs, addrStr)
}
sort.Strings(addrStrs)
// Update or delete accounts.
for _, addrStr := range addrStrs {
account, deleted := unpack(vas.accounts[addrStr])
if deleted {
removed := vas.state.RemoveAccount(account.Address.Address())
if !removed {
panic(Fmt("Could not remove account to be deleted: %X", account.Address))
}
} else {
if account == nil {
panic(Fmt("Account should not be nil for addr: %X", account.Address))
}
vas.state.UpdateAccount(toStateAccount(account))
}
}
// Determine order for storage updates
// The address comes first so it'll be grouped.
storageKeyStrs := []string{}
for keyStr := range vas.storage {
storageKeyStrs = append(storageKeyStrs, keyStr)
}
sort.Strings(storageKeyStrs)
// Update storage for all account/key.
storage := merkle.NewIAVLTree(
binary.BasicCodec, // TODO change
binary.BasicCodec, // TODO change
1024, // TODO change.
vas.state.DB,
)
var currentAccount *vm.Account
var deleted bool
for _, storageKey := range storageKeyStrs {
value := vas.storage[storageKey]
addrKeyBytes := []byte(storageKey)
addr := addrKeyBytes[:32]
key := addrKeyBytes[32:]
if currentAccount == nil || !bytes.Equal(currentAccount.Address[:], addr) {
currentAccount, deleted = unpack(vas.accounts[string(addr)])
if deleted {
continue
}
var storageRoot []byte
if currentAccount.StorageRoot.IsZero() {
storageRoot = nil
} else {
storageRoot = currentAccount.StorageRoot.Bytes()
}
storage.Load(storageRoot)
}
if value.IsZero() {
_, removed := storage.Remove(key)
if !removed {
panic(Fmt("Storage could not be removed for addr: %X @ %X", addr, key))
}
} else {
storage.Set(key, value)
}
}
// TODO support logs, add them to the state somehow.
}
func (vas *VMAppState) AddLog(log *vm.Log) {
vas.logs = append(vas.logs, log)
}
//-----------------------------------------------------------------------------
// Convenience function to return address of new contract
func NewContractAddress(caller []byte, nonce uint64) []byte {
temp := make([]byte, 32+8)
copy(temp, caller)
vm.PutUint64(temp[32:], nonce)
return sha3.Sha3(temp)[:20]
}

View File

@ -39,7 +39,9 @@ func (b *Block) ValidateBasic(lastBlockHeight uint, lastBlockHash []byte,
if !b.LastBlockParts.Equals(lastBlockParts) { if !b.LastBlockParts.Equals(lastBlockParts) {
return errors.New("Wrong Block.Header.LastBlockParts") return errors.New("Wrong Block.Header.LastBlockParts")
} }
/* TODO: Determine bounds. /* TODO: Determine bounds
See blockchain/reactor "stopSyncingDurationMinutes"
if !b.Time.After(lastBlockTime) { if !b.Time.After(lastBlockTime) {
return errors.New("Invalid Block.Header.Time") return errors.New("Invalid Block.Header.Time")
} }
@ -53,19 +55,31 @@ func (b *Block) ValidateBasic(lastBlockHeight uint, lastBlockHash []byte,
return nil return nil
} }
// Computes and returns the block hash.
// If the block is incomplete (e.g. missing Header.StateHash)
// then the hash is nil, to prevent the usage of that hash.
func (b *Block) Hash() []byte { func (b *Block) Hash() []byte {
if b.Header == nil || b.Validation == nil || b.Data == nil { if b.Header == nil || b.Validation == nil || b.Data == nil {
return nil return nil
} }
hashes := [][]byte{ hashHeader := b.Header.Hash()
b.Header.Hash(), hashValidation := b.Validation.Hash()
b.Validation.Hash(), hashData := b.Data.Hash()
b.Data.Hash(),
// If hashHeader is nil, required fields are missing.
if len(hashHeader) == 0 {
return nil
} }
// Merkle hash from sub-hashes.
// Merkle hash from subhashes.
hashes := [][]byte{hashHeader, hashValidation, hashData}
return merkle.HashFromHashes(hashes) return merkle.HashFromHashes(hashes)
} }
func (b *Block) MakePartSet() *PartSet {
return NewPartSetFromData(binary.BinaryBytes(b))
}
// Convenience. // Convenience.
// A nil block never hashes to anything. // A nil block never hashes to anything.
// Nothing hashes to a nil hash. // Nothing hashes to a nil hash.
@ -119,7 +133,12 @@ type Header struct {
StateHash []byte StateHash []byte
} }
// NOTE: hash is nil if required fields are missing.
func (h *Header) Hash() []byte { func (h *Header) Hash() []byte {
if len(h.StateHash) == 0 {
return nil
}
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
hasher, n, err := sha256.New(), new(int64), new(error) hasher, n, err := sha256.New(), new(int64), new(error)
binary.WriteBinary(h, buf, n, err) binary.WriteBinary(h, buf, n, err)

15
types/block_meta.go Normal file
View File

@ -0,0 +1,15 @@
package types
type BlockMeta struct {
Hash []byte // The block hash
Header *Header // The block's Header
Parts PartSetHeader // The PartSetHeader, for transfer
}
func NewBlockMeta(block *Block, blockParts *PartSet) *BlockMeta {
return &BlockMeta{
Hash: block.Hash(),
Header: block.Header,
Parts: blockParts.Header(),
}
}

View File

@ -1,247 +0,0 @@
package types
import (
"bytes"
"encoding/json"
"fmt"
"io"
"github.com/tendermint/tendermint/binary"
. "github.com/tendermint/tendermint/common"
dbm "github.com/tendermint/tendermint/db"
)
/*
Simple low level store for blocks.
There are three types of information stored:
- BlockMeta: Meta information about each block
- Block part: Parts of each block, aggregated w/ PartSet
- Validation: The Validation part of each block, for gossiping commit votes
Currently the commit signatures are duplicated in the Block parts as
well as the Validation. In the future this may change, perhaps by moving
the Validation data outside the Block.
*/
type BlockStore struct {
height uint
db dbm.DB
}
func NewBlockStore(db dbm.DB) *BlockStore {
bsjson := LoadBlockStoreStateJSON(db)
return &BlockStore{
height: bsjson.Height,
db: db,
}
}
// Height() returns the last known contiguous block height.
func (bs *BlockStore) Height() uint {
return bs.height
}
func (bs *BlockStore) GetReader(key []byte) io.Reader {
bytez := bs.db.Get(key)
if bytez == nil {
return nil
}
return bytes.NewReader(bytez)
}
func (bs *BlockStore) LoadBlock(height uint) *Block {
var n int64
var err error
r := bs.GetReader(calcBlockMetaKey(height))
if r == nil {
panic(Fmt("Block does not exist at height %v", height))
}
meta := binary.ReadBinary(&BlockMeta{}, r, &n, &err).(*BlockMeta)
if err != nil {
panic(Fmt("Error reading block meta: %v", err))
}
bytez := []byte{}
for i := uint(0); i < meta.Parts.Total; i++ {
part := bs.LoadBlockPart(height, i)
bytez = append(bytez, part.Bytes...)
}
block := binary.ReadBinary(&Block{}, bytes.NewReader(bytez), &n, &err).(*Block)
if err != nil {
panic(Fmt("Error reading block: %v", err))
}
return block
}
func (bs *BlockStore) LoadBlockPart(height uint, index uint) *Part {
var n int64
var err error
r := bs.GetReader(calcBlockPartKey(height, index))
if r == nil {
panic(Fmt("BlockPart does not exist for height %v index %v", height, index))
}
part := binary.ReadBinary(&Part{}, r, &n, &err).(*Part)
if err != nil {
panic(Fmt("Error reading block part: %v", err))
}
return part
}
func (bs *BlockStore) LoadBlockMeta(height uint) *BlockMeta {
var n int64
var err error
r := bs.GetReader(calcBlockMetaKey(height))
if r == nil {
panic(Fmt("BlockMeta does not exist for height %v", height))
}
meta := binary.ReadBinary(&BlockMeta{}, r, &n, &err).(*BlockMeta)
if err != nil {
panic(Fmt("Error reading block meta: %v", err))
}
return meta
}
// NOTE: the Commit-vote heights are for the block at `height-1`
// Since these are included in the subsequent block, the height
// is off by 1.
func (bs *BlockStore) LoadBlockValidation(height uint) *Validation {
var n int64
var err error
r := bs.GetReader(calcBlockValidationKey(height))
if r == nil {
panic(Fmt("BlockValidation does not exist for height %v", height))
}
validation := binary.ReadBinary(&Validation{}, r, &n, &err).(*Validation)
if err != nil {
panic(Fmt("Error reading validation: %v", err))
}
return validation
}
// NOTE: the Commit-vote heights are for the block at `height`
func (bs *BlockStore) LoadSeenValidation(height uint) *Validation {
var n int64
var err error
r := bs.GetReader(calcSeenValidationKey(height))
if r == nil {
panic(Fmt("SeenValidation does not exist for height %v", height))
}
validation := binary.ReadBinary(&Validation{}, r, &n, &err).(*Validation)
if err != nil {
panic(Fmt("Error reading validation: %v", err))
}
return validation
}
// blockParts: Must be parts of the block
// seenValidation: The +2/3 commits that were seen which finalized the height.
// If all the nodes restart after committing a block,
// we need this to reload the commits to catch-up nodes to the
// most recent height. Otherwise they'd stall at H-1.
// Also good to have to debug consensus issues & punish wrong-signers
// whose commits weren't included in the block.
func (bs *BlockStore) SaveBlock(block *Block, blockParts *PartSet, seenValidation *Validation) {
height := block.Height
if height != bs.height+1 {
panic(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.height+1, height))
}
if !blockParts.IsComplete() {
panic(Fmt("BlockStore can only save complete block part sets"))
}
// Save block meta
meta := makeBlockMeta(block, blockParts)
metaBytes := binary.BinaryBytes(meta)
bs.db.Set(calcBlockMetaKey(height), metaBytes)
// Save block parts
for i := uint(0); i < blockParts.Total(); i++ {
bs.saveBlockPart(height, i, blockParts.GetPart(i))
}
// Save block validation (duplicate and separate from the Block)
blockValidationBytes := binary.BinaryBytes(block.Validation)
bs.db.Set(calcBlockValidationKey(height), blockValidationBytes)
// Save seen validation (seen +2/3 commits)
seenValidationBytes := binary.BinaryBytes(seenValidation)
bs.db.Set(calcSeenValidationKey(height), seenValidationBytes)
// Save new BlockStoreStateJSON descriptor
BlockStoreStateJSON{Height: height}.Save(bs.db)
// Done!
bs.height = height
}
func (bs *BlockStore) saveBlockPart(height uint, index uint, part *Part) {
if height != bs.height+1 {
panic(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.height+1, height))
}
partBytes := binary.BinaryBytes(part)
bs.db.Set(calcBlockPartKey(height, index), partBytes)
}
//-----------------------------------------------------------------------------
type BlockMeta struct {
Hash []byte // The block hash
Header *Header // The block's Header
Parts PartSetHeader // The PartSetHeader, for transfer
}
func makeBlockMeta(block *Block, blockParts *PartSet) *BlockMeta {
return &BlockMeta{
Hash: block.Hash(),
Header: block.Header,
Parts: blockParts.Header(),
}
}
//-----------------------------------------------------------------------------
func calcBlockMetaKey(height uint) []byte {
return []byte(fmt.Sprintf("H:%v", height))
}
func calcBlockPartKey(height uint, partIndex uint) []byte {
return []byte(fmt.Sprintf("P:%v:%v", height, partIndex))
}
func calcBlockValidationKey(height uint) []byte {
return []byte(fmt.Sprintf("V:%v", height))
}
func calcSeenValidationKey(height uint) []byte {
return []byte(fmt.Sprintf("SV:%v", height))
}
//-----------------------------------------------------------------------------
var blockStoreKey = []byte("blockStore")
type BlockStoreStateJSON struct {
Height uint
}
func (bsj BlockStoreStateJSON) Save(db dbm.DB) {
bytes, err := json.Marshal(bsj)
if err != nil {
panic(Fmt("Could not marshal state bytes: %v", err))
}
db.Set(blockStoreKey, bytes)
}
func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON {
bytes := db.Get(blockStoreKey)
if bytes == nil {
return BlockStoreStateJSON{
Height: 0,
}
}
bsj := BlockStoreStateJSON{}
err := json.Unmarshal(bytes, &bsj)
if err != nil {
panic(Fmt("Could not unmarshal bytes: %X", bytes))
}
return bsj
}

View File

@ -1,35 +0,0 @@
package vm
import (
"encoding/binary"
)
func Uint64ToWord(i uint64) Word {
word := Word{}
PutUint64(word[:], i)
return word
}
func BytesToWord(bz []byte) Word {
word := Word{}
copy(word[:], bz)
return word
}
func LeftPadWord(bz []byte) (word Word) {
copy(word[32-len(bz):], bz)
return
}
func RightPadWord(bz []byte) (word Word) {
copy(word[:], bz)
return
}
func GetUint64(word Word) uint64 {
return binary.LittleEndian.Uint64(word[:])
}
func PutUint64(dest []byte, i uint64) {
binary.LittleEndian.PutUint64(dest, i)
}

View File

@ -3,7 +3,6 @@ package vm
const ( const (
GasSha3 uint64 = 1 GasSha3 uint64 = 1
GasGetAccount uint64 = 1 GasGetAccount uint64 = 1
GasStorageCreate uint64 = 1
GasStorageUpdate uint64 = 1 GasStorageUpdate uint64 = 1
GasStackOp uint64 = 1 GasStackOp uint64 = 1

View File

@ -3,19 +3,18 @@ package vm
import ( import (
"code.google.com/p/go.crypto/ripemd160" "code.google.com/p/go.crypto/ripemd160"
"crypto/sha256" "crypto/sha256"
. "github.com/tendermint/tendermint/common"
"github.com/tendermint/tendermint/vm/secp256k1" "github.com/tendermint/tendermint/vm/secp256k1"
"github.com/tendermint/tendermint/vm/sha3" "github.com/tendermint/tendermint/vm/sha3"
. "github.com/tendermint/tendermint/common"
) )
var nativeContracts = make(map[Word]NativeContract) var nativeContracts = make(map[Word256]NativeContract)
func init() { func init() {
nativeContracts[Uint64ToWord(1)] = ecrecoverFunc nativeContracts[Uint64ToWord256(1)] = ecrecoverFunc
nativeContracts[Uint64ToWord(2)] = sha256Func nativeContracts[Uint64ToWord256(2)] = sha256Func
nativeContracts[Uint64ToWord(3)] = ripemd160Func nativeContracts[Uint64ToWord256(3)] = ripemd160Func
nativeContracts[Uint64ToWord(4)] = identityFunc nativeContracts[Uint64ToWord256(4)] = identityFunc
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------

View File

@ -2,11 +2,12 @@ package vm
import ( import (
"fmt" "fmt"
. "github.com/tendermint/tendermint/common"
) )
// Not goroutine safe // Not goroutine safe
type Stack struct { type Stack struct {
data []Word data []Word256
ptr int ptr int
gas *uint64 gas *uint64
@ -15,7 +16,7 @@ type Stack struct {
func NewStack(capacity int, gas *uint64, err *error) *Stack { func NewStack(capacity int, gas *uint64, err *error) *Stack {
return &Stack{ return &Stack{
data: make([]Word, capacity), data: make([]Word256, capacity),
ptr: 0, ptr: 0,
gas: gas, gas: gas,
err: err, err: err,
@ -36,7 +37,7 @@ func (st *Stack) setErr(err error) {
} }
} }
func (st *Stack) Push(d Word) { func (st *Stack) Push(d Word256) {
st.useGas(GasStackOp) st.useGas(GasStackOp)
if st.ptr == cap(st.data) { if st.ptr == cap(st.data) {
st.setErr(ErrDataStackOverflow) st.setErr(ErrDataStackOverflow)
@ -50,18 +51,18 @@ func (st *Stack) PushBytes(bz []byte) {
if len(bz) != 32 { if len(bz) != 32 {
panic("Invalid bytes size: expected 32") panic("Invalid bytes size: expected 32")
} }
st.Push(BytesToWord(bz)) st.Push(RightPadWord256(bz))
} }
func (st *Stack) Push64(i uint64) { func (st *Stack) Push64(i uint64) {
st.Push(Uint64ToWord(i)) st.Push(Uint64ToWord256(i))
} }
func (st *Stack) Pop() Word { func (st *Stack) Pop() Word256 {
st.useGas(GasStackOp) st.useGas(GasStackOp)
if st.ptr == 0 { if st.ptr == 0 {
st.setErr(ErrDataStackUnderflow) st.setErr(ErrDataStackUnderflow)
return Zero return Zero256
} }
st.ptr-- st.ptr--
return st.data[st.ptr] return st.data[st.ptr]
@ -72,7 +73,7 @@ func (st *Stack) PopBytes() []byte {
} }
func (st *Stack) Pop64() uint64 { func (st *Stack) Pop64() uint64 {
return GetUint64(st.Pop()) return GetUint64(st.Pop().Bytes())
} }
func (st *Stack) Len() int { func (st *Stack) Len() int {
@ -100,7 +101,7 @@ func (st *Stack) Dup(n int) {
} }
// Not an opcode, costs no gas. // Not an opcode, costs no gas.
func (st *Stack) Peek() Word { func (st *Stack) Peek() Word256 {
return st.data[st.ptr-1] return st.data[st.ptr-1]
} }

View File

@ -1,8 +1,6 @@
package main package vm
import ( import (
"fmt"
. "github.com/tendermint/tendermint/common" . "github.com/tendermint/tendermint/common"
. "github.com/tendermint/tendermint/vm" . "github.com/tendermint/tendermint/vm"
"github.com/tendermint/tendermint/vm/sha3" "github.com/tendermint/tendermint/vm/sha3"
@ -10,41 +8,39 @@ import (
type FakeAppState struct { type FakeAppState struct {
accounts map[string]*Account accounts map[string]*Account
storage map[string]Word storage map[string]Word256
logs []*Log logs []*Log
} }
func (fas *FakeAppState) GetAccount(addr Word) (*Account, error) { func (fas *FakeAppState) GetAccount(addr Word256) *Account {
account := fas.accounts[addr.String()] account := fas.accounts[addr.String()]
if account != nil { if account != nil {
return account, nil return account
} else { } else {
return nil, Errorf("Invalid account addr: %v", addr) panic(Fmt("Invalid account addr: %X", addr))
} }
} }
func (fas *FakeAppState) UpdateAccount(account *Account) error { func (fas *FakeAppState) UpdateAccount(account *Account) {
_, ok := fas.accounts[account.Address.String()] _, ok := fas.accounts[account.Address.String()]
if !ok { if !ok {
return Errorf("Invalid account addr: %v", account.Address.String()) panic(Fmt("Invalid account addr: %X", account.Address))
} else { } else {
// Nothing to do // Nothing to do
return nil
} }
} }
func (fas *FakeAppState) DeleteAccount(account *Account) error { func (fas *FakeAppState) RemoveAccount(account *Account) {
_, ok := fas.accounts[account.Address.String()] _, ok := fas.accounts[account.Address.String()]
if !ok { if !ok {
return Errorf("Invalid account addr: %v", account.Address.String()) panic(Fmt("Invalid account addr: %X", account.Address))
} else { } else {
// Delete account // Remove account
delete(fas.accounts, account.Address.String()) delete(fas.accounts, account.Address.String())
return nil
} }
} }
func (fas *FakeAppState) CreateAccount(creator *Account) (*Account, error) { func (fas *FakeAppState) CreateAccount(creator *Account) *Account {
addr := createAddress(creator) addr := createAddress(creator)
account := fas.accounts[addr.String()] account := fas.accounts[addr.String()]
if account == nil { if account == nil {
@ -53,75 +49,46 @@ func (fas *FakeAppState) CreateAccount(creator *Account) (*Account, error) {
Balance: 0, Balance: 0,
Code: nil, Code: nil,
Nonce: 0, Nonce: 0,
StorageRoot: Zero, StorageRoot: Zero256,
}, nil }
} else { } else {
return nil, Errorf("Invalid account addr: %v", addr) panic(Fmt("Invalid account addr: %X", addr))
} }
} }
func (fas *FakeAppState) GetStorage(addr Word, key Word) (Word, error) { func (fas *FakeAppState) GetStorage(addr Word256, key Word256) Word256 {
_, ok := fas.accounts[addr.String()] _, ok := fas.accounts[addr.String()]
if !ok { if !ok {
return Zero, Errorf("Invalid account addr: %v", addr) panic(Fmt("Invalid account addr: %X", addr))
} }
value, ok := fas.storage[addr.String()+key.String()] value, ok := fas.storage[addr.String()+key.String()]
if ok { if ok {
return value, nil return value
} else { } else {
return Zero, nil return Zero256
} }
} }
func (fas *FakeAppState) SetStorage(addr Word, key Word, value Word) (bool, error) { func (fas *FakeAppState) SetStorage(addr Word256, key Word256, value Word256) {
_, ok := fas.accounts[addr.String()] _, ok := fas.accounts[addr.String()]
if !ok { if !ok {
return false, Errorf("Invalid account addr: %v", addr) panic(Fmt("Invalid account addr: %X", addr))
} }
_, ok = fas.storage[addr.String()+key.String()]
fas.storage[addr.String()+key.String()] = value fas.storage[addr.String()+key.String()] = value
return ok, nil
} }
func (fas *FakeAppState) AddLog(log *Log) { func (fas *FakeAppState) AddLog(log *Log) {
fas.logs = append(fas.logs, log) fas.logs = append(fas.logs, log)
} }
func main() {
appState := &FakeAppState{
accounts: make(map[string]*Account),
storage: make(map[string]Word),
logs: nil,
}
params := Params{
BlockHeight: 0,
BlockHash: Zero,
BlockTime: 0,
GasLimit: 0,
}
ourVm := NewVM(appState, params, Zero)
// Create accounts
account1 := &Account{
Address: Uint64ToWord(100),
}
account2 := &Account{
Address: Uint64ToWord(101),
}
var gas uint64 = 1000
output, err := ourVm.Call(account1, account2, []byte{0x5B, 0x60, 0x00, 0x56}, []byte{}, 0, &gas)
fmt.Printf("Output: %v Error: %v\n", output, err)
}
// Creates a 20 byte address and bumps the nonce. // Creates a 20 byte address and bumps the nonce.
func createAddress(creator *Account) Word { func createAddress(creator *Account) Word256 {
nonce := creator.Nonce nonce := creator.Nonce
creator.Nonce += 1 creator.Nonce += 1
temp := make([]byte, 32+8) temp := make([]byte, 32+8)
copy(temp, creator.Address[:]) copy(temp, creator.Address[:])
PutUint64(temp[32:], nonce) PutUint64(temp[32:], nonce)
return RightPadWord(sha3.Sha3(temp)[:20]) return RightPadWord256(sha3.Sha3(temp)[:20])
} }

99
vm/test/vm_test.go Normal file
View File

@ -0,0 +1,99 @@
package vm
import (
"crypto/rand"
"encoding/hex"
"fmt"
"strings"
"testing"
"time"
. "github.com/tendermint/tendermint/common"
. "github.com/tendermint/tendermint/vm"
)
func newAppState() *FakeAppState {
return &FakeAppState{
accounts: make(map[string]*Account),
storage: make(map[string]Word256),
logs: nil,
}
}
func newParams() Params {
return Params{
BlockHeight: 0,
BlockHash: Zero256,
BlockTime: 0,
GasLimit: 0,
}
}
func makeBytes(n int) []byte {
b := make([]byte, n)
rand.Read(b)
return b
}
func TestVM(t *testing.T) {
ourVm := NewVM(newAppState(), newParams(), Zero256)
// Create accounts
account1 := &Account{
Address: Uint64ToWord256(100),
}
account2 := &Account{
Address: Uint64ToWord256(101),
}
var gas uint64 = 1000
N := []byte{0xff, 0xff}
// Loop N times
code := []byte{0x60, 0x00, 0x60, 0x20, 0x52, 0x5B, byte(0x60 + len(N) - 1)}
for i := 0; i < len(N); i++ {
code = append(code, N[i])
}
code = append(code, []byte{0x60, 0x20, 0x51, 0x12, 0x15, 0x60, byte(0x1b + len(N)), 0x57, 0x60, 0x01, 0x60, 0x20, 0x51, 0x01, 0x60, 0x20, 0x52, 0x60, 0x05, 0x56, 0x5B}...)
start := time.Now()
output, err := ourVm.Call(account1, account2, code, []byte{}, 0, &gas)
fmt.Printf("Output: %v Error: %v\n", output, err)
fmt.Println("Call took:", time.Since(start))
}
func TestSubcurrency(t *testing.T) {
st := newAppState()
// Create accounts
account1 := &Account{
Address: RightPadWord256(makeBytes(20)),
}
account2 := &Account{
Address: RightPadWord256(makeBytes(20)),
}
st.accounts[account1.Address.String()] = account1
st.accounts[account2.Address.String()] = account2
ourVm := NewVM(st, newParams(), Zero256)
var gas uint64 = 1000
code_parts := []string{"620f42403355",
"7c0100000000000000000000000000000000000000000000000000000000",
"600035046315cf268481141561004657",
"6004356040526040515460605260206060f35b63693200ce81141561008757",
"60043560805260243560a052335460c0523360e05260a05160c05112151561008657",
"60a05160c0510360e0515560a0516080515401608051555b5b505b6000f3"}
code, _ := hex.DecodeString(strings.Join(code_parts, ""))
fmt.Printf("Code: %x\n", code)
data, _ := hex.DecodeString("693200CE0000000000000000000000004B4363CDE27C2EB05E66357DB05BC5C88F850C1A0000000000000000000000000000000000000000000000000000000000000005")
output, err := ourVm.Call(account1, account2, code, data, 0, &gas)
fmt.Printf("Output: %v Error: %v\n", output, err)
}
/*
// infinite loop
code := []byte{0x5B, 0x60, 0x00, 0x56}
// mstore
code := []byte{0x60, 0x00, 0x60, 0x20}
// mstore, mload
code := []byte{0x60, 0x01, 0x60, 0x20, 0x52, 0x60, 0x20, 0x51}
*/

View File

@ -1,44 +1,25 @@
package vm package vm
import () import (
. "github.com/tendermint/tendermint/common"
)
const ( const (
defaultDataStackCapacity = 10 defaultDataStackCapacity = 10
) )
var (
Zero = Word{0}
One = Word{1}
)
type Word [32]byte
func (w Word) String() string { return string(w[:]) }
func (w Word) Copy() Word { return w }
func (w Word) Bytes() []byte { return w[:] } // copied.
func (w Word) Address() []byte { return w[:20] }
func (w Word) IsZero() bool {
accum := byte(0)
for _, byt := range w {
accum |= byt
}
return accum == 0
}
//-----------------------------------------------------------------------------
type Account struct { type Account struct {
Address Word Address Word256
Balance uint64 Balance uint64
Code []byte Code []byte
Nonce uint64 Nonce uint64
StorageRoot Word StorageRoot Word256
Other interface{} // For holding all other data. Other interface{} // For holding all other data.
} }
type Log struct { type Log struct {
Address Word Address Word256
Topics []Word Topics []Word256
Data []byte Data []byte
Height uint64 Height uint64
} }
@ -46,14 +27,14 @@ type Log struct {
type AppState interface { type AppState interface {
// Accounts // Accounts
GetAccount(addr Word) (*Account, error) GetAccount(addr Word256) *Account
UpdateAccount(*Account) error UpdateAccount(*Account)
DeleteAccount(*Account) error RemoveAccount(*Account)
CreateAccount(*Account) (*Account, error) CreateAccount(*Account) *Account
// Storage // Storage
GetStorage(Word, Word) (Word, error) GetStorage(Word256, Word256) Word256
SetStorage(Word, Word, Word) (bool, error) // Setting to Zero is deleting. SetStorage(Word256, Word256, Word256) // Setting to Zero is deleting.
// Logs // Logs
AddLog(*Log) AddLog(*Log)
@ -61,7 +42,7 @@ type AppState interface {
type Params struct { type Params struct {
BlockHeight uint64 BlockHeight uint64
BlockHash Word BlockHash Word256
BlockTime int64 BlockTime int64
GasLimit uint64 GasLimit uint64
} }

371
vm/vm.go
View File

@ -3,13 +3,14 @@ package vm
import ( import (
"errors" "errors"
"fmt" "fmt"
"math" "math/big"
. "github.com/tendermint/tendermint/common" . "github.com/tendermint/tendermint/common"
"github.com/tendermint/tendermint/vm/sha3" "github.com/tendermint/tendermint/vm/sha3"
) )
var ( var (
ErrUnknownAddress = errors.New("Unknown address")
ErrInsufficientBalance = errors.New("Insufficient balance") ErrInsufficientBalance = errors.New("Insufficient balance")
ErrInvalidJumpDest = errors.New("Invalid jump dest") ErrInvalidJumpDest = errors.New("Invalid jump dest")
ErrInsufficientGas = errors.New("Insuffient gas") ErrInsufficientGas = errors.New("Insuffient gas")
@ -23,21 +24,30 @@ var (
ErrInvalidContract = errors.New("Invalid contract") ErrInvalidContract = errors.New("Invalid contract")
) )
type Debug bool
const ( const (
dataStackCapacity = 1024 dataStackCapacity = 1024
callStackCapacity = 100 // TODO ensure usage. callStackCapacity = 100 // TODO ensure usage.
memoryCapacity = 1024 * 1024 // 1 MB memoryCapacity = 1024 * 1024 // 1 MB
dbg Debug = true
) )
func (d Debug) Printf(s string, a ...interface{}) {
if d {
fmt.Printf(s, a...)
}
}
type VM struct { type VM struct {
appState AppState appState AppState
params Params params Params
origin Word origin Word256
callDepth int callDepth int
} }
func NewVM(appState AppState, params Params, origin Word) *VM { func NewVM(appState AppState, params Params, origin Word256) *VM {
return &VM{ return &VM{
appState: appState, appState: appState,
params: params, params: params,
@ -73,7 +83,7 @@ func (vm *VM) Call(caller, callee *Account, code, input []byte, value uint64, ga
// Just like Call() but does not transfer 'value' or modify the callDepth. // Just like Call() but does not transfer 'value' or modify the callDepth.
func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, gas *uint64) (output []byte, err error) { func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, gas *uint64) (output []byte, err error) {
fmt.Printf("(%d) (%X) %X (code=%d) gas: %v (d) %X\n", vm.callDepth, caller.Address[:4], callee.Address, len(callee.Code), *gas, input) dbg.Printf("(%d) (%X) %X (code=%d) gas: %v (d) %X\n", vm.callDepth, caller.Address[:4], callee.Address, len(callee.Code), *gas, input)
var ( var (
pc uint64 = 0 pc uint64 = 0
@ -89,7 +99,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga
} }
var op = codeGetOp(code, pc) var op = codeGetOp(code, pc)
fmt.Printf("(pc) %-3d (op) %-14s (st) %-4d ", pc, op.String(), stack.Len()) dbg.Printf("(pc) %-3d (op) %-14s (st) %-4d ", pc, op.String(), stack.Len())
switch op { switch op {
@ -97,164 +107,197 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga
return nil, nil return nil, nil
case ADD: // 0x01 case ADD: // 0x01
x, y := stack.Pop64(), stack.Pop64() //x, y := stack.Pop64(), stack.Pop64()
stack.Push64(x + y) //stack.Push64(x + y)
fmt.Printf(" %v + %v = %v\n", x, y, x+y) x, y := stack.Pop(), stack.Pop()
xb := new(big.Int).SetBytes(flip(x[:]))
yb := new(big.Int).SetBytes(flip(y[:]))
sum := new(big.Int).Add(xb, yb)
stack.Push(RightPadWord256(flip(sum.Bytes())))
dbg.Printf(" %v + %v = %v\n", xb, yb, sum)
case MUL: // 0x02 case MUL: // 0x02
x, y := stack.Pop64(), stack.Pop64() //x, y := stack.Pop64(), stack.Pop64()
stack.Push64(x * y) //stack.Push64(x * y)
fmt.Printf(" %v * %v = %v\n", x, y, x*y) x, y := stack.Pop(), stack.Pop()
xb := new(big.Int).SetBytes(flip(x[:]))
yb := new(big.Int).SetBytes(flip(y[:]))
prod := new(big.Int).Mul(xb, yb)
stack.Push(RightPadWord256(flip(prod.Bytes())))
dbg.Printf(" %v * %v = %v\n", xb, yb, prod)
case SUB: // 0x03 case SUB: // 0x03
x, y := stack.Pop64(), stack.Pop64() //x, y := stack.Pop64(), stack.Pop64()
stack.Push64(x - y) //stack.Push64(x - y)
fmt.Printf(" %v - %v = %v\n", x, y, x-y) x, y := stack.Pop(), stack.Pop()
xb := new(big.Int).SetBytes(flip(x[:]))
yb := new(big.Int).SetBytes(flip(y[:]))
diff := new(big.Int).Sub(xb, yb)
stack.Push(RightPadWord256(flip(diff.Bytes())))
dbg.Printf(" %v - %v = %v\n", xb, yb, diff)
case DIV: // 0x04 case DIV: // 0x04
x, y := stack.Pop64(), stack.Pop64() //x, y := stack.Pop64(), stack.Pop64()
if y == 0 { // TODO //stack.Push64(x / y)
stack.Push(Zero) x, y := stack.Pop(), stack.Pop()
fmt.Printf(" %v / %v = %v (TODO)\n", x, y, 0) if y.IsZero() { // TODO
stack.Push(Zero256)
dbg.Printf(" %x / %x = %v (TODO)\n", x, y, 0)
} else { } else {
stack.Push64(x / y) xb := new(big.Int).SetBytes(flip(x[:]))
fmt.Printf(" %v / %v = %v\n", x, y, x/y) yb := new(big.Int).SetBytes(flip(y[:]))
div := new(big.Int).Div(xb, yb)
stack.Push(RightPadWord256(flip(div.Bytes())))
dbg.Printf(" %v / %v = %v\n", xb, yb, div)
} }
case SDIV: // 0x05 case SDIV: // 0x05
// TODO ... big?
x, y := int64(stack.Pop64()), int64(stack.Pop64()) x, y := int64(stack.Pop64()), int64(stack.Pop64())
if y == 0 { // TODO if y == 0 { // TODO
stack.Push(Zero) stack.Push(Zero256)
fmt.Printf(" %v / %v = %v (TODO)\n", x, y, 0) dbg.Printf(" %v / %v = %v (TODO)\n", x, y, 0)
} else { } else {
stack.Push64(uint64(x / y)) stack.Push64(uint64(x / y))
fmt.Printf(" %v / %v = %v\n", x, y, x/y) dbg.Printf(" %v / %v = %v\n", x, y, x/y)
} }
case MOD: // 0x06 case MOD: // 0x06
x, y := stack.Pop64(), stack.Pop64() //x, y := stack.Pop64(), stack.Pop64()
if y == 0 { // TODO x, y := stack.Pop(), stack.Pop()
stack.Push(Zero) if y.IsZero() { // TODO
fmt.Printf(" %v %% %v = %v (TODO)\n", x, y, 0) stack.Push(Zero256)
dbg.Printf(" %v %% %v = %v (TODO)\n", x, y, 0)
} else { } else {
stack.Push64(x % y) xb := new(big.Int).SetBytes(flip(x[:]))
fmt.Printf(" %v %% %v = %v\n", x, y, x%y) yb := new(big.Int).SetBytes(flip(y[:]))
mod := new(big.Int).Mod(xb, yb)
stack.Push(RightPadWord256(flip(mod.Bytes())))
dbg.Printf(" %v %% %v = %v\n", xb, yb, mod)
} }
case SMOD: // 0x07 case SMOD: // 0x07
// TODO ... big?
x, y := int64(stack.Pop64()), int64(stack.Pop64()) x, y := int64(stack.Pop64()), int64(stack.Pop64())
if y == 0 { // TODO if y == 0 { // TODO
stack.Push(Zero) stack.Push(Zero256)
fmt.Printf(" %v %% %v = %v (TODO)\n", x, y, 0) dbg.Printf(" %v %% %v = %v (TODO)\n", x, y, 0)
} else { } else {
stack.Push64(uint64(x % y)) stack.Push64(uint64(x % y))
fmt.Printf(" %v %% %v = %v\n", x, y, x%y) dbg.Printf(" %v %% %v = %v\n", x, y, x%y)
} }
case ADDMOD: // 0x08 case ADDMOD: // 0x08
// TODO ... big?
x, y, z := stack.Pop64(), stack.Pop64(), stack.Pop64() x, y, z := stack.Pop64(), stack.Pop64(), stack.Pop64()
if z == 0 { // TODO if z == 0 { // TODO
stack.Push(Zero) stack.Push(Zero256)
fmt.Printf(" (%v + %v) %% %v = %v (TODO)\n", x, y, z, 0) dbg.Printf(" (%v + %v) %% %v = %v (TODO)\n", x, y, z, 0)
} else { } else {
stack.Push64(x % y) stack.Push64((x + y) % z)
fmt.Printf(" (%v + %v) %% %v = %v\n", x, y, z, (x+y)%z) dbg.Printf(" (%v + %v) %% %v = %v\n", x, y, z, (x+y)%z)
} }
case MULMOD: // 0x09 case MULMOD: // 0x09
// TODO ... big?
x, y, z := stack.Pop64(), stack.Pop64(), stack.Pop64() x, y, z := stack.Pop64(), stack.Pop64(), stack.Pop64()
if z == 0 { // TODO if z == 0 { // TODO
stack.Push(Zero) stack.Push(Zero256)
fmt.Printf(" (%v + %v) %% %v = %v (TODO)\n", x, y, z, 0) dbg.Printf(" (%v + %v) %% %v = %v (TODO)\n", x, y, z, 0)
} else { } else {
stack.Push64(x % y) stack.Push64((x * y) % z)
fmt.Printf(" (%v + %v) %% %v = %v\n", x, y, z, (x*y)%z) dbg.Printf(" (%v + %v) %% %v = %v\n", x, y, z, (x*y)%z)
} }
case EXP: // 0x0A case EXP: // 0x0A
x, y := stack.Pop64(), stack.Pop64() //x, y := stack.Pop64(), stack.Pop64()
stack.Push64(ExpUint64(x, y)) //stack.Push64(ExpUint64(x, y))
fmt.Printf(" %v ** %v = %v\n", x, y, uint64(math.Pow(float64(x), float64(y)))) x, y := stack.Pop(), stack.Pop()
xb := new(big.Int).SetBytes(flip(x[:]))
yb := new(big.Int).SetBytes(flip(y[:]))
pow := new(big.Int).Exp(xb, yb, big.NewInt(0))
stack.Push(RightPadWord256(flip(pow.Bytes())))
dbg.Printf(" %v ** %v = %v\n", xb, yb, pow)
case SIGNEXTEND: // 0x0B case SIGNEXTEND: // 0x0B
x, y := stack.Pop64(), stack.Pop64() x, y := stack.Pop64(), stack.Pop64()
res := (y << uint(x)) >> x res := (y << uint(x)) >> x
stack.Push64(res) stack.Push64(res)
fmt.Printf(" (%v << %v) >> %v = %v\n", y, x, x, res) dbg.Printf(" (%v << %v) >> %v = %v\n", y, x, x, res)
case LT: // 0x10 case LT: // 0x10
x, y := stack.Pop64(), stack.Pop64() x, y := stack.Pop64(), stack.Pop64()
if x < y { if x < y {
stack.Push64(1) stack.Push64(1)
} else { } else {
stack.Push(Zero) stack.Push(Zero256)
} }
fmt.Printf(" %v < %v = %v\n", x, y, x < y) dbg.Printf(" %v < %v = %v\n", x, y, x < y)
case GT: // 0x11 case GT: // 0x11
x, y := stack.Pop64(), stack.Pop64() x, y := stack.Pop64(), stack.Pop64()
if x > y { if x > y {
stack.Push64(1) stack.Push64(1)
} else { } else {
stack.Push(Zero) stack.Push(Zero256)
} }
fmt.Printf(" %v > %v = %v\n", x, y, x > y) dbg.Printf(" %v > %v = %v\n", x, y, x > y)
case SLT: // 0x12 case SLT: // 0x12
x, y := int64(stack.Pop64()), int64(stack.Pop64()) x, y := int64(stack.Pop64()), int64(stack.Pop64())
if x < y { if x < y {
stack.Push64(1) stack.Push64(1)
} else { } else {
stack.Push(Zero) stack.Push(Zero256)
} }
fmt.Printf(" %v < %v = %v\n", x, y, x < y) dbg.Printf(" %v < %v = %v\n", x, y, x < y)
case SGT: // 0x13 case SGT: // 0x13
x, y := int64(stack.Pop64()), int64(stack.Pop64()) x, y := int64(stack.Pop64()), int64(stack.Pop64())
if x > y { if x > y {
stack.Push64(1) stack.Push64(1)
} else { } else {
stack.Push(Zero) stack.Push(Zero256)
} }
fmt.Printf(" %v > %v = %v\n", x, y, x > y) dbg.Printf(" %v > %v = %v\n", x, y, x > y)
case EQ: // 0x14 case EQ: // 0x14
x, y := stack.Pop64(), stack.Pop64() x, y := stack.Pop64(), stack.Pop64()
if x > y { if x == y {
stack.Push64(1) stack.Push64(1)
} else { } else {
stack.Push(Zero) stack.Push(Zero256)
} }
fmt.Printf(" %v == %v = %v\n", x, y, x == y) dbg.Printf(" %v == %v = %v\n", x, y, x == y)
case ISZERO: // 0x15 case ISZERO: // 0x15
x := stack.Pop64() x := stack.Pop64()
if x == 0 { if x == 0 {
stack.Push64(1) stack.Push64(1)
} else { } else {
stack.Push(Zero) stack.Push(Zero256)
} }
fmt.Printf(" %v == 0 = %v\n", x, x == 0) dbg.Printf(" %v == 0 = %v\n", x, x == 0)
case AND: // 0x16 case AND: // 0x16
x, y := stack.Pop64(), stack.Pop64() x, y := stack.Pop64(), stack.Pop64()
stack.Push64(x & y) stack.Push64(x & y)
fmt.Printf(" %v & %v = %v\n", x, y, x&y) dbg.Printf(" %v & %v = %v\n", x, y, x&y)
case OR: // 0x17 case OR: // 0x17
x, y := stack.Pop64(), stack.Pop64() x, y := stack.Pop64(), stack.Pop64()
stack.Push64(x | y) stack.Push64(x | y)
fmt.Printf(" %v | %v = %v\n", x, y, x|y) dbg.Printf(" %v | %v = %v\n", x, y, x|y)
case XOR: // 0x18 case XOR: // 0x18
x, y := stack.Pop64(), stack.Pop64() x, y := stack.Pop64(), stack.Pop64()
stack.Push64(x ^ y) stack.Push64(x ^ y)
fmt.Printf(" %v ^ %v = %v\n", x, y, x^y) dbg.Printf(" %v ^ %v = %v\n", x, y, x^y)
case NOT: // 0x19 case NOT: // 0x19
x := stack.Pop64() x := stack.Pop64()
stack.Push64(^x) stack.Push64(^x)
fmt.Printf(" !%v = %v\n", x, ^x) dbg.Printf(" !%v = %v\n", x, ^x)
case BYTE: // 0x1A case BYTE: // 0x1A
idx, val := stack.Pop64(), stack.Pop() idx, val := stack.Pop64(), stack.Pop()
@ -263,7 +306,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga
res = val[idx] res = val[idx]
} }
stack.Push64(uint64(res)) stack.Push64(uint64(res))
fmt.Printf(" => 0x%X\n", res) dbg.Printf(" => 0x%X\n", res)
case SHA3: // 0x20 case SHA3: // 0x20
if ok = useGas(gas, GasSha3); !ok { if ok = useGas(gas, GasSha3); !ok {
@ -276,36 +319,36 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga
} }
data = sha3.Sha3(data) data = sha3.Sha3(data)
stack.PushBytes(data) stack.PushBytes(data)
fmt.Printf(" => (%v) %X\n", size, data) dbg.Printf(" => (%v) %X\n", size, data)
case ADDRESS: // 0x30 case ADDRESS: // 0x30
stack.Push(callee.Address) stack.Push(callee.Address)
fmt.Printf(" => %X\n", callee.Address) dbg.Printf(" => %X\n", callee.Address)
case BALANCE: // 0x31 case BALANCE: // 0x31
addr := stack.Pop() addr := stack.Pop()
if ok = useGas(gas, GasGetAccount); !ok { if ok = useGas(gas, GasGetAccount); !ok {
return nil, firstErr(err, ErrInsufficientGas) return nil, firstErr(err, ErrInsufficientGas)
} }
account, err_ := vm.appState.GetAccount(addr) // TODO ensure that 20byte lengths are supported. acc := vm.appState.GetAccount(addr) // TODO ensure that 20byte lengths are supported.
if err_ != nil { if acc == nil {
return nil, firstErr(err, err_) return nil, firstErr(err, ErrUnknownAddress)
} }
balance := account.Balance balance := acc.Balance
stack.Push64(balance) stack.Push64(balance)
fmt.Printf(" => %v (%X)\n", balance, addr) dbg.Printf(" => %v (%X)\n", balance, addr)
case ORIGIN: // 0x32 case ORIGIN: // 0x32
stack.Push(vm.origin) stack.Push(vm.origin)
fmt.Printf(" => %X\n", vm.origin) dbg.Printf(" => %X\n", vm.origin)
case CALLER: // 0x33 case CALLER: // 0x33
stack.Push(caller.Address) stack.Push(caller.Address)
fmt.Printf(" => %X\n", caller.Address) dbg.Printf(" => %X\n", caller.Address)
case CALLVALUE: // 0x34 case CALLVALUE: // 0x34
stack.Push64(value) stack.Push64(value)
fmt.Printf(" => %v\n", value) dbg.Printf(" => %v\n", value)
case CALLDATALOAD: // 0x35 case CALLDATALOAD: // 0x35
offset := stack.Pop64() offset := stack.Pop64()
@ -313,12 +356,12 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga
if !ok { if !ok {
return nil, firstErr(err, ErrInputOutOfBounds) return nil, firstErr(err, ErrInputOutOfBounds)
} }
stack.Push(RightPadWord(data)) stack.Push(RightPadWord256(data))
fmt.Printf(" => 0x%X\n", data) dbg.Printf(" => 0x%X\n", data)
case CALLDATASIZE: // 0x36 case CALLDATASIZE: // 0x36
stack.Push64(uint64(len(input))) stack.Push64(uint64(len(input)))
fmt.Printf(" => %d\n", len(input)) dbg.Printf(" => %d\n", len(input))
case CALLDATACOPY: // 0x37 case CALLDATACOPY: // 0x37
memOff := stack.Pop64() memOff := stack.Pop64()
@ -333,18 +376,17 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga
return nil, firstErr(err, ErrMemoryOutOfBounds) return nil, firstErr(err, ErrMemoryOutOfBounds)
} }
copy(dest, data) copy(dest, data)
fmt.Printf(" => [%v, %v, %v] %X\n", memOff, inputOff, length, data) dbg.Printf(" => [%v, %v, %v] %X\n", memOff, inputOff, length, data)
case CODESIZE: // 0x38 case CODESIZE: // 0x38
l := uint64(len(code)) l := uint64(len(code))
stack.Push64(l) stack.Push64(l)
fmt.Printf(" => %d\n", l) dbg.Printf(" => %d\n", l)
case CODECOPY: // 0x39 case CODECOPY: // 0x39
memOff := stack.Pop64() memOff := stack.Pop64()
codeOff := stack.Pop64() codeOff := stack.Pop64()
length := stack.Pop64() length := stack.Pop64()
fmt.Println("CODECOPY: codeOff, length, codelength", codeOff, length, len(code))
data, ok := subslice(code, codeOff, length, false) data, ok := subslice(code, codeOff, length, false)
if !ok { if !ok {
return nil, firstErr(err, ErrCodeOutOfBounds) return nil, firstErr(err, ErrCodeOutOfBounds)
@ -354,36 +396,36 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga
return nil, firstErr(err, ErrMemoryOutOfBounds) return nil, firstErr(err, ErrMemoryOutOfBounds)
} }
copy(dest, data) copy(dest, data)
fmt.Printf(" => [%v, %v, %v] %X\n", memOff, codeOff, length, data) dbg.Printf(" => [%v, %v, %v] %X\n", memOff, codeOff, length, data)
case GASPRICE_DEPRECATED: // 0x3A case GASPRICE_DEPRECATED: // 0x3A
stack.Push(Zero) stack.Push(Zero256)
fmt.Printf(" => %X (GASPRICE IS DEPRECATED)\n") dbg.Printf(" => %X (GASPRICE IS DEPRECATED)\n")
case EXTCODESIZE: // 0x3B case EXTCODESIZE: // 0x3B
addr := stack.Pop() addr := stack.Pop()
if ok = useGas(gas, GasGetAccount); !ok { if ok = useGas(gas, GasGetAccount); !ok {
return nil, firstErr(err, ErrInsufficientGas) return nil, firstErr(err, ErrInsufficientGas)
} }
account, err_ := vm.appState.GetAccount(addr) acc := vm.appState.GetAccount(addr)
if err_ != nil { if acc == nil {
return nil, firstErr(err, err_) return nil, firstErr(err, ErrUnknownAddress)
} }
code := account.Code code := acc.Code
l := uint64(len(code)) l := uint64(len(code))
stack.Push64(l) stack.Push64(l)
fmt.Printf(" => %d\n", l) dbg.Printf(" => %d\n", l)
case EXTCODECOPY: // 0x3C case EXTCODECOPY: // 0x3C
addr := stack.Pop() addr := stack.Pop()
if ok = useGas(gas, GasGetAccount); !ok { if ok = useGas(gas, GasGetAccount); !ok {
return nil, firstErr(err, ErrInsufficientGas) return nil, firstErr(err, ErrInsufficientGas)
} }
account, err_ := vm.appState.GetAccount(addr) acc := vm.appState.GetAccount(addr)
if err_ != nil { if acc == nil {
return nil, firstErr(err, err_) return nil, firstErr(err, ErrUnknownAddress)
} }
code := account.Code code := acc.Code
memOff := stack.Pop64() memOff := stack.Pop64()
codeOff := stack.Pop64() codeOff := stack.Pop64()
length := stack.Pop64() length := stack.Pop64()
@ -396,33 +438,33 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga
return nil, firstErr(err, ErrMemoryOutOfBounds) return nil, firstErr(err, ErrMemoryOutOfBounds)
} }
copy(dest, data) copy(dest, data)
fmt.Printf(" => [%v, %v, %v] %X\n", memOff, codeOff, length, data) dbg.Printf(" => [%v, %v, %v] %X\n", memOff, codeOff, length, data)
case BLOCKHASH: // 0x40 case BLOCKHASH: // 0x40
stack.Push(Zero) stack.Push(Zero256)
fmt.Printf(" => 0x%X (NOT SUPPORTED)\n", stack.Peek().Bytes()) dbg.Printf(" => 0x%X (NOT SUPPORTED)\n", stack.Peek().Bytes())
case COINBASE: // 0x41 case COINBASE: // 0x41
stack.Push(Zero) stack.Push(Zero256)
fmt.Printf(" => 0x%X (NOT SUPPORTED)\n", stack.Peek().Bytes()) dbg.Printf(" => 0x%X (NOT SUPPORTED)\n", stack.Peek().Bytes())
case TIMESTAMP: // 0x42 case TIMESTAMP: // 0x42
time := vm.params.BlockTime time := vm.params.BlockTime
stack.Push64(uint64(time)) stack.Push64(uint64(time))
fmt.Printf(" => 0x%X\n", time) dbg.Printf(" => 0x%X\n", time)
case BLOCKHEIGHT: // 0x43 case BLOCKHEIGHT: // 0x43
number := uint64(vm.params.BlockHeight) number := uint64(vm.params.BlockHeight)
stack.Push64(number) stack.Push64(number)
fmt.Printf(" => 0x%X\n", number) dbg.Printf(" => 0x%X\n", number)
case GASLIMIT: // 0x45 case GASLIMIT: // 0x45
stack.Push64(vm.params.GasLimit) stack.Push64(vm.params.GasLimit)
fmt.Printf(" => %v\n", vm.params.GasLimit) dbg.Printf(" => %v\n", vm.params.GasLimit)
case POP: // 0x50 case POP: // 0x50
stack.Pop() stack.Pop()
fmt.Printf(" => %v\n", vm.params.GasLimit) dbg.Printf(" => %v\n", vm.params.GasLimit)
case MLOAD: // 0x51 case MLOAD: // 0x51
offset := stack.Pop64() offset := stack.Pop64()
@ -430,17 +472,17 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga
if !ok { if !ok {
return nil, firstErr(err, ErrMemoryOutOfBounds) return nil, firstErr(err, ErrMemoryOutOfBounds)
} }
stack.Push(RightPadWord(data)) stack.Push(RightPadWord256(data))
fmt.Printf(" => 0x%X\n", data) dbg.Printf(" => 0x%X\n", data)
case MSTORE: // 0x52 case MSTORE: // 0x52
offset, data := stack.Pop64(), stack.Pop() offset, data := stack.Pop64(), stack.Pop()
dest, ok := subslice(memory, offset, 32, true) dest, ok := subslice(memory, offset, 32, false)
if !ok { if !ok {
return nil, firstErr(err, ErrMemoryOutOfBounds) return nil, firstErr(err, ErrMemoryOutOfBounds)
} }
copy(dest, data[:]) copy(dest, flip(data[:]))
fmt.Printf(" => 0x%X\n", data) dbg.Printf(" => 0x%X\n", data)
case MSTORE8: // 0x53 case MSTORE8: // 0x53
offset, val := stack.Pop64(), byte(stack.Pop64()&0xFF) offset, val := stack.Pop64(), byte(stack.Pop64()&0xFF)
@ -448,26 +490,21 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga
return nil, firstErr(err, ErrMemoryOutOfBounds) return nil, firstErr(err, ErrMemoryOutOfBounds)
} }
memory[offset] = val memory[offset] = val
fmt.Printf(" => [%v] 0x%X\n", offset, val) dbg.Printf(" => [%v] 0x%X\n", offset, val)
case SLOAD: // 0x54 case SLOAD: // 0x54
loc := stack.Pop() loc := stack.Pop()
data, _ := vm.appState.GetStorage(callee.Address, loc) data := vm.appState.GetStorage(callee.Address, loc)
stack.Push(data) stack.Push(flipWord(data))
fmt.Printf(" {0x%X : 0x%X}\n", loc, data) dbg.Printf(" {0x%X : 0x%X}\n", loc, data)
case SSTORE: // 0x55 case SSTORE: // 0x55
loc, data := stack.Pop(), stack.Pop() loc, data := stack.Pop(), stack.Pop()
updated, err_ := vm.appState.SetStorage(callee.Address, loc, data) loc = flipWord(loc)
if err = firstErr(err, err_); err != nil { data = flipWord(data)
return nil, err vm.appState.SetStorage(callee.Address, loc, data)
} useGas(gas, GasStorageUpdate)
if updated { dbg.Printf(" {0x%X : 0x%X}\n", loc, data)
useGas(gas, GasStorageUpdate)
} else {
useGas(gas, GasStorageCreate)
}
fmt.Printf(" {0x%X : 0x%X}\n", loc, data)
case JUMP: // 0x56 case JUMP: // 0x56
err = jump(code, stack.Pop64(), &pc) err = jump(code, stack.Pop64(), &pc)
@ -479,7 +516,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga
err = jump(code, pos, &pc) err = jump(code, pos, &pc)
continue continue
} }
fmt.Printf(" ~> false\n") dbg.Printf(" ~> false\n")
case PC: // 0x58 case PC: // 0x58
stack.Push64(pc) stack.Push64(pc)
@ -489,10 +526,10 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga
case GAS: // 0x5A case GAS: // 0x5A
stack.Push64(*gas) stack.Push64(*gas)
fmt.Printf(" => %X\n", *gas) dbg.Printf(" => %X\n", *gas)
case JUMPDEST: // 0x5B case JUMPDEST: // 0x5B
fmt.Printf("\n") dbg.Printf("\n")
// Do nothing // Do nothing
case PUSH1, PUSH2, PUSH3, PUSH4, PUSH5, PUSH6, PUSH7, PUSH8, PUSH9, PUSH10, PUSH11, PUSH12, PUSH13, PUSH14, PUSH15, PUSH16, PUSH17, PUSH18, PUSH19, PUSH20, PUSH21, PUSH22, PUSH23, PUSH24, PUSH25, PUSH26, PUSH27, PUSH28, PUSH29, PUSH30, PUSH31, PUSH32: case PUSH1, PUSH2, PUSH3, PUSH4, PUSH5, PUSH6, PUSH7, PUSH8, PUSH9, PUSH10, PUSH11, PUSH12, PUSH13, PUSH14, PUSH15, PUSH16, PUSH17, PUSH18, PUSH19, PUSH20, PUSH21, PUSH22, PUSH23, PUSH24, PUSH25, PUSH26, PUSH27, PUSH28, PUSH29, PUSH30, PUSH31, PUSH32:
@ -501,24 +538,24 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga
if !ok { if !ok {
return nil, firstErr(err, ErrCodeOutOfBounds) return nil, firstErr(err, ErrCodeOutOfBounds)
} }
res := RightPadWord(codeSegment) res := RightPadWord256(codeSegment)
stack.Push(res) stack.Push(res)
pc += a pc += a
fmt.Printf(" => 0x%X\n", res) dbg.Printf(" => 0x%X\n", res)
case DUP1, DUP2, DUP3, DUP4, DUP5, DUP6, DUP7, DUP8, DUP9, DUP10, DUP11, DUP12, DUP13, DUP14, DUP15, DUP16: case DUP1, DUP2, DUP3, DUP4, DUP5, DUP6, DUP7, DUP8, DUP9, DUP10, DUP11, DUP12, DUP13, DUP14, DUP15, DUP16:
n := int(op - DUP1 + 1) n := int(op - DUP1 + 1)
stack.Dup(n) stack.Dup(n)
fmt.Printf(" => [%d] 0x%X\n", n, stack.Peek().Bytes()) dbg.Printf(" => [%d] 0x%X\n", n, stack.Peek().Bytes())
case SWAP1, SWAP2, SWAP3, SWAP4, SWAP5, SWAP6, SWAP7, SWAP8, SWAP9, SWAP10, SWAP11, SWAP12, SWAP13, SWAP14, SWAP15, SWAP16: case SWAP1, SWAP2, SWAP3, SWAP4, SWAP5, SWAP6, SWAP7, SWAP8, SWAP9, SWAP10, SWAP11, SWAP12, SWAP13, SWAP14, SWAP15, SWAP16:
n := int(op - SWAP1 + 2) n := int(op - SWAP1 + 2)
stack.Swap(n) stack.Swap(n)
fmt.Printf(" => [%d]\n", n) dbg.Printf(" => [%d]\n", n)
case LOG0, LOG1, LOG2, LOG3, LOG4: case LOG0, LOG1, LOG2, LOG3, LOG4:
n := int(op - LOG0) n := int(op - LOG0)
topics := make([]Word, n) topics := make([]Word256, n)
offset, size := stack.Pop64(), stack.Pop64() offset, size := stack.Pop64(), stack.Pop64()
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
topics[i] = stack.Pop() topics[i] = stack.Pop()
@ -534,7 +571,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga
vm.params.BlockHeight, vm.params.BlockHeight,
} }
vm.appState.AddLog(log) vm.appState.AddLog(log)
fmt.Printf(" => %v\n", log) dbg.Printf(" => %v\n", log)
case CREATE: // 0xF0 case CREATE: // 0xF0
contractValue := stack.Pop64() contractValue := stack.Pop64()
@ -551,19 +588,14 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga
// TODO charge for gas to create account _ the code length * GasCreateByte // TODO charge for gas to create account _ the code length * GasCreateByte
newAccount, err := vm.appState.CreateAccount(callee) newAccount := vm.appState.CreateAccount(callee)
if err != nil { // Run the input to get the contract code.
stack.Push(Zero) ret, err_ := vm.Call(callee, newAccount, input, input, contractValue, gas)
fmt.Printf(" (*) 0x0 %v\n", err) if err_ != nil {
stack.Push(Zero256)
} else { } else {
// Run the input to get the contract code. newAccount.Code = ret // Set the code
ret, err_ := vm.Call(callee, newAccount, input, input, contractValue, gas) stack.Push(newAccount.Address)
if err_ != nil {
stack.Push(Zero)
} else {
newAccount.Code = ret // Set the code
stack.Push(newAccount.Address)
}
} }
case CALL, CALLCODE: // 0xF1, 0xF2 case CALL, CALLCODE: // 0xF1, 0xF2
@ -571,7 +603,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga
addr, value := stack.Pop(), stack.Pop64() addr, value := stack.Pop(), stack.Pop64()
inOffset, inSize := stack.Pop64(), stack.Pop64() // inputs inOffset, inSize := stack.Pop64(), stack.Pop64() // inputs
retOffset, retSize := stack.Pop64(), stack.Pop64() // outputs retOffset, retSize := stack.Pop64(), stack.Pop64() // outputs
fmt.Printf(" => %X\n", addr) dbg.Printf(" => %X\n", addr)
// Get the arguments from the memory // Get the arguments from the memory
args, ok := subslice(memory, inOffset, inSize, false) args, ok := subslice(memory, inOffset, inSize, false)
@ -598,22 +630,22 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga
if ok = useGas(gas, GasGetAccount); !ok { if ok = useGas(gas, GasGetAccount); !ok {
return nil, firstErr(err, ErrInsufficientGas) return nil, firstErr(err, ErrInsufficientGas)
} }
account, err_ := vm.appState.GetAccount(addr) acc := vm.appState.GetAccount(addr)
if err = firstErr(err, err_); err != nil { if acc == nil {
return nil, err return nil, firstErr(err, ErrUnknownAddress)
} }
if op == CALLCODE { if op == CALLCODE {
ret, err = vm.Call(callee, callee, account.Code, args, value, gas) ret, err = vm.Call(callee, callee, acc.Code, args, value, gas)
} else { } else {
ret, err = vm.Call(callee, account, account.Code, args, value, gas) ret, err = vm.Call(callee, acc, acc.Code, args, value, gas)
} }
} }
// Push result // Push result
if err != nil { if err != nil {
stack.Push(Zero) stack.Push(Zero256)
} else { } else {
stack.Push(One) stack.Push(One256)
dest, ok := subslice(memory, retOffset, retSize, false) dest, ok := subslice(memory, retOffset, retSize, false)
if !ok { if !ok {
return nil, firstErr(err, ErrMemoryOutOfBounds) return nil, firstErr(err, ErrMemoryOutOfBounds)
@ -624,7 +656,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga
// Handle remaining gas. // Handle remaining gas.
*gas += gasLimit *gas += gasLimit
fmt.Printf("resume %X (%v)\n", callee.Address, gas) dbg.Printf("resume %X (%v)\n", callee.Address, gas)
case RETURN: // 0xF3 case RETURN: // 0xF3
offset, size := stack.Pop64(), stack.Pop64() offset, size := stack.Pop64(), stack.Pop64()
@ -632,7 +664,7 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga
if !ok { if !ok {
return nil, firstErr(err, ErrMemoryOutOfBounds) return nil, firstErr(err, ErrMemoryOutOfBounds)
} }
fmt.Printf(" => [%v, %v] (%d) 0x%X\n", offset, size, len(ret), ret) dbg.Printf(" => [%v, %v] (%d) 0x%X\n", offset, size, len(ret), ret)
return ret, nil return ret, nil
case SUICIDE: // 0xFF case SUICIDE: // 0xFF
@ -640,20 +672,20 @@ func (vm *VM) call(caller, callee *Account, code, input []byte, value uint64, ga
if ok = useGas(gas, GasGetAccount); !ok { if ok = useGas(gas, GasGetAccount); !ok {
return nil, firstErr(err, ErrInsufficientGas) return nil, firstErr(err, ErrInsufficientGas)
} }
// TODO if the receiver is Zero, then make it the fee. // TODO if the receiver is , then make it the fee.
receiver, err_ := vm.appState.GetAccount(addr) receiver := vm.appState.GetAccount(addr)
if err = firstErr(err, err_); err != nil { if receiver == nil {
return nil, err return nil, firstErr(err, ErrUnknownAddress)
} }
balance := callee.Balance balance := callee.Balance
receiver.Balance += balance receiver.Balance += balance
vm.appState.UpdateAccount(receiver) vm.appState.UpdateAccount(receiver)
vm.appState.DeleteAccount(callee) vm.appState.RemoveAccount(callee)
fmt.Printf(" => (%X) %v\n", addr[:4], balance) dbg.Printf(" => (%X) %v\n", addr[:4], balance)
fallthrough fallthrough
default: default:
fmt.Printf("(pc) %-3v Invalid opcode %X\n", pc, op) dbg.Printf("(pc) %-3v Invalid opcode %X\n", pc, op)
panic(fmt.Errorf("Invalid opcode %X", op)) panic(fmt.Errorf("Invalid opcode %X", op))
} }
@ -688,10 +720,10 @@ func codeGetOp(code []byte, n uint64) OpCode {
func jump(code []byte, to uint64, pc *uint64) (err error) { func jump(code []byte, to uint64, pc *uint64) (err error) {
dest := codeGetOp(code, to) dest := codeGetOp(code, to)
if dest != JUMPDEST { if dest != JUMPDEST {
fmt.Printf(" ~> %v invalid jump dest %v\n", to, dest) dbg.Printf(" ~> %v invalid jump dest %v\n", to, dest)
return ErrInvalidJumpDest return ErrInvalidJumpDest
} }
fmt.Printf(" ~> %v\n", to) dbg.Printf(" ~> %v\n", to)
*pc = to *pc = to
return nil return nil
} }
@ -724,10 +756,25 @@ func transfer(from, to *Account, amount uint64) error {
} }
func flip(in []byte) []byte { func flip(in []byte) []byte {
l2 := len(in) / 2
flipped := make([]byte, len(in)) flipped := make([]byte, len(in))
for i := 0; i < len(flipped)/2; i++ { // copy the middle bit (if its even it will get overwritten)
if len(in) != 0 {
flipped[l2] = in[l2]
}
for i := 0; i < l2; i++ {
flipped[i] = in[len(in)-1-i] flipped[i] = in[len(in)-1-i]
flipped[len(in)-1-i] = in[i] flipped[len(in)-1-i] = in[i]
} }
return flipped return flipped
} }
func flipWord(in Word256) Word256 {
word := Word256{}
// copy the middle bit (if its even it will get overwritten)
for i := 0; i < 16; i++ {
word[i] = in[len(in)-1-i]
word[len(in)-1-i] = in[i]
}
return word
}