2019-07-25 16:02:47 +02:00
|
|
|
package v2
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"time"
|
|
|
|
)
|
|
|
|
|
|
|
|
/*
|
|
|
|
# Message passing between components
|
|
|
|
* output of one routine becomes input for all other routines
|
|
|
|
* avoid loops somehow
|
|
|
|
* Message priority
|
|
|
|
|
|
|
|
# Components have isolated lifecycle management
|
|
|
|
* Lifecycle management
|
|
|
|
* Setup
|
|
|
|
* Teardown
|
|
|
|
# Individual
|
|
|
|
* message passing should be non blocking
|
|
|
|
* backpressure between components
|
|
|
|
* Lifecycle management of components
|
|
|
|
* Observable behavior:
|
|
|
|
* progress
|
|
|
|
* blocking components
|
|
|
|
What would look a test look like?
|
|
|
|
Lifecycle management
|
|
|
|
Start/Stop
|
|
|
|
|
|
|
|
How to make this non blocking?
|
|
|
|
|
|
|
|
How to avoid Thread saturation
|
|
|
|
How to handle concurrency
|
|
|
|
*/
|
|
|
|
|
|
|
|
type testEvent struct {
|
|
|
|
msg string
|
|
|
|
time time.Time
|
|
|
|
}
|
|
|
|
|
|
|
|
type testEventTwo struct {
|
|
|
|
msg string
|
|
|
|
}
|
|
|
|
|
|
|
|
type stopEvent struct{}
|
2019-07-27 00:25:32 +02:00
|
|
|
type timeCheck struct {
|
|
|
|
time time.Time
|
|
|
|
}
|
2019-07-25 16:02:47 +02:00
|
|
|
|
2019-07-27 10:58:58 +02:00
|
|
|
// scheduler
|
|
|
|
|
|
|
|
type scheduler struct {
|
2019-07-27 11:38:37 +02:00
|
|
|
input chan Event
|
|
|
|
output chan Event
|
|
|
|
stopped chan struct{}
|
2019-07-25 16:02:47 +02:00
|
|
|
}
|
|
|
|
|
2019-07-27 10:58:58 +02:00
|
|
|
func newScheduler(output chan Event) *scheduler {
|
|
|
|
input := make(chan Event, 1)
|
|
|
|
return &scheduler{
|
|
|
|
input: input,
|
|
|
|
output: output,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-27 11:38:37 +02:00
|
|
|
func (sc *scheduler) run() {
|
2019-07-27 10:58:58 +02:00
|
|
|
fmt.Println("scheduler run")
|
2019-07-25 16:02:47 +02:00
|
|
|
for {
|
2019-07-27 11:38:37 +02:00
|
|
|
iEvent, ok := <-sc.input
|
|
|
|
if !ok {
|
2019-07-27 10:58:58 +02:00
|
|
|
fmt.Println("stopping scheduler")
|
2019-07-27 11:38:37 +02:00
|
|
|
sc.stopped <- struct{}{}
|
2019-07-25 16:02:47 +02:00
|
|
|
break
|
|
|
|
}
|
2019-07-27 11:38:37 +02:00
|
|
|
oEvents := sc.handle(iEvent)
|
2019-07-27 00:25:32 +02:00
|
|
|
for _, event := range oEvents {
|
2019-07-27 11:38:37 +02:00
|
|
|
sc.output <- event
|
2019-07-27 00:25:32 +02:00
|
|
|
}
|
2019-07-25 16:02:47 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-27 10:58:58 +02:00
|
|
|
func (fs *scheduler) send(event Event) bool {
|
|
|
|
fmt.Println("scheduler send")
|
2019-07-27 00:25:32 +02:00
|
|
|
select {
|
|
|
|
case fs.input <- event:
|
|
|
|
return true
|
|
|
|
default:
|
2019-07-27 10:58:58 +02:00
|
|
|
fmt.Println("scheduler channel was full")
|
2019-07-27 00:25:32 +02:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sc *scheduler) handle(event Event) Events {
|
2019-07-27 10:58:58 +02:00
|
|
|
switch event.(type) {
|
2019-07-27 00:25:32 +02:00
|
|
|
case timeCheck:
|
|
|
|
fmt.Println("scheduler handle timeCheck")
|
|
|
|
case testEvent:
|
|
|
|
fmt.Println("scheduler handle testEvent")
|
|
|
|
}
|
|
|
|
return Events{}
|
|
|
|
}
|
|
|
|
|
2019-07-27 11:38:37 +02:00
|
|
|
func (sc *scheduler) stop() {
|
2019-07-27 10:58:58 +02:00
|
|
|
fmt.Println("scheduler stop")
|
2019-07-27 11:38:37 +02:00
|
|
|
close(sc.input)
|
|
|
|
<-sc.stopped
|
2019-07-27 10:58:58 +02:00
|
|
|
}
|
|
|
|
|
2019-07-27 00:25:32 +02:00
|
|
|
// processor
|
|
|
|
type processor struct {
|
2019-07-27 11:38:37 +02:00
|
|
|
input chan Event
|
|
|
|
output chan Event
|
|
|
|
stopped chan struct{}
|
2019-07-27 00:25:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func newProcessor(output chan Event) *processor {
|
2019-07-27 10:58:58 +02:00
|
|
|
input := make(chan Event, 1)
|
2019-07-27 00:25:32 +02:00
|
|
|
return &processor{
|
2019-07-27 10:58:58 +02:00
|
|
|
input: input,
|
|
|
|
output: output,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-27 11:38:37 +02:00
|
|
|
func (pc *processor) run() {
|
2019-07-27 10:58:58 +02:00
|
|
|
fmt.Println("processor run")
|
|
|
|
for {
|
2019-07-27 11:38:37 +02:00
|
|
|
iEvent, ok := <-pc.input
|
|
|
|
if !ok {
|
2019-07-27 10:58:58 +02:00
|
|
|
fmt.Println("stopping processor")
|
2019-07-27 11:38:37 +02:00
|
|
|
pc.stopped <- struct{}{}
|
2019-07-27 10:58:58 +02:00
|
|
|
break
|
|
|
|
}
|
2019-07-27 11:38:37 +02:00
|
|
|
|
|
|
|
oEvents := pc.handle(iEvent)
|
2019-07-27 10:58:58 +02:00
|
|
|
for _, event := range oEvents {
|
2019-07-27 11:38:37 +02:00
|
|
|
pc.output <- event
|
2019-07-27 10:58:58 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-27 11:38:37 +02:00
|
|
|
func (pc *processor) send(event Event) bool {
|
2019-07-27 10:58:58 +02:00
|
|
|
fmt.Println("processor send")
|
|
|
|
select {
|
2019-07-27 11:38:37 +02:00
|
|
|
case pc.input <- event:
|
2019-07-27 10:58:58 +02:00
|
|
|
return true
|
|
|
|
default:
|
|
|
|
fmt.Println("processor channel was full")
|
|
|
|
return false
|
2019-07-25 16:02:47 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-27 11:38:37 +02:00
|
|
|
func (pc *processor) handle(event Event) Events {
|
2019-07-27 10:58:58 +02:00
|
|
|
switch event.(type) {
|
2019-07-27 00:25:32 +02:00
|
|
|
case timeCheck:
|
2019-07-27 10:58:58 +02:00
|
|
|
fmt.Println("processor handle timeCheck")
|
2019-07-27 00:25:32 +02:00
|
|
|
case testEvent:
|
|
|
|
fmt.Println("processor handle testEvent")
|
|
|
|
}
|
|
|
|
return Events{}
|
|
|
|
}
|
|
|
|
|
2019-07-27 11:38:37 +02:00
|
|
|
func (pc *processor) stop() {
|
2019-07-27 10:58:58 +02:00
|
|
|
fmt.Println("processor stop")
|
2019-07-27 11:38:37 +02:00
|
|
|
close(pc.input)
|
|
|
|
<-pc.stopped
|
2019-07-27 10:58:58 +02:00
|
|
|
}
|
|
|
|
|
2019-07-27 00:25:32 +02:00
|
|
|
// demuxer
|
|
|
|
type demuxer struct {
|
2019-07-27 10:58:58 +02:00
|
|
|
input chan Event
|
|
|
|
output chan Event
|
2019-07-27 00:25:32 +02:00
|
|
|
scheduler *scheduler
|
|
|
|
processor *processor
|
2019-07-27 11:38:37 +02:00
|
|
|
stopped chan struct{}
|
2019-07-27 00:25:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func newDemuxer(output chan Event, scheduler *scheduler, processor *processor) *demuxer {
|
2019-07-27 10:58:58 +02:00
|
|
|
input := make(chan Event, 1)
|
2019-07-27 00:25:32 +02:00
|
|
|
return &demuxer{
|
2019-07-27 10:58:58 +02:00
|
|
|
input: input,
|
|
|
|
output: output,
|
2019-07-27 00:25:32 +02:00
|
|
|
scheduler: scheduler,
|
|
|
|
processor: processor,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (dm *demuxer) run() {
|
2019-07-27 10:58:58 +02:00
|
|
|
fmt.Println("Running demuxer")
|
2019-07-25 16:02:47 +02:00
|
|
|
for {
|
2019-07-27 11:38:37 +02:00
|
|
|
// so now we need a way to flush
|
|
|
|
event, ok := <-dm.input
|
|
|
|
if !ok {
|
|
|
|
fmt.Println("demuxer stopping")
|
|
|
|
dm.stopped <- struct{}{}
|
|
|
|
break
|
|
|
|
}
|
2019-07-27 10:58:58 +02:00
|
|
|
// event.time = time.Now()
|
|
|
|
received := dm.scheduler.send(event)
|
|
|
|
if !received {
|
|
|
|
panic("couldn't send to scheduler")
|
|
|
|
}
|
|
|
|
|
|
|
|
received = dm.processor.send(event)
|
|
|
|
if !received {
|
|
|
|
panic("couldn't send to the processor")
|
|
|
|
}
|
2019-07-25 16:02:47 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-27 11:38:37 +02:00
|
|
|
func (dm *demuxer) send(event Event) bool {
|
2019-07-27 10:58:58 +02:00
|
|
|
fmt.Println("demuxer send")
|
2019-07-27 11:38:37 +02:00
|
|
|
// we need to close if this is closed first
|
2019-07-27 10:58:58 +02:00
|
|
|
select {
|
2019-07-27 11:38:37 +02:00
|
|
|
case dm.input <- event:
|
2019-07-27 10:58:58 +02:00
|
|
|
return true
|
|
|
|
default:
|
|
|
|
fmt.Println("demuxer channel was full")
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-27 11:38:37 +02:00
|
|
|
func (dm *demuxer) stop() {
|
2019-07-27 10:58:58 +02:00
|
|
|
fmt.Println("demuxer stop")
|
2019-07-27 11:38:37 +02:00
|
|
|
close(dm.input)
|
|
|
|
<-dm.stopped
|
|
|
|
fmt.Println("demuxer stopped")
|
2019-07-27 10:58:58 +02:00
|
|
|
}
|
|
|
|
|
2019-07-27 00:25:32 +02:00
|
|
|
// reactor
|
|
|
|
|
2019-07-25 16:02:47 +02:00
|
|
|
type DummyReactor struct {
|
2019-07-27 11:38:37 +02:00
|
|
|
events chan Event
|
|
|
|
demuxer *demuxer
|
|
|
|
scheduler *scheduler
|
|
|
|
processor *processor
|
|
|
|
ticker *time.Ticker
|
|
|
|
tickerStopped chan struct{}
|
2019-07-25 16:02:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (dr *DummyReactor) Start() {
|
2019-07-27 00:25:32 +02:00
|
|
|
bufferSize := 10
|
|
|
|
events := make(chan Event, bufferSize)
|
|
|
|
|
2019-07-27 11:38:37 +02:00
|
|
|
dr.scheduler = newScheduler(events)
|
|
|
|
dr.processor = newProcessor(events)
|
|
|
|
dr.demuxer = newDemuxer(events, dr.scheduler, dr.processor)
|
|
|
|
dr.tickerStopped = make(chan struct{})
|
2019-07-27 00:25:32 +02:00
|
|
|
|
2019-07-27 11:38:37 +02:00
|
|
|
go dr.scheduler.run()
|
|
|
|
go dr.processor.run()
|
2019-07-27 00:25:32 +02:00
|
|
|
go dr.demuxer.run()
|
2019-07-27 11:38:37 +02:00
|
|
|
|
2019-07-27 10:58:58 +02:00
|
|
|
go func() {
|
2019-07-27 11:38:37 +02:00
|
|
|
ticker := time.NewTicker(1 * time.Second)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
dr.demuxer.send(timeCheck{})
|
|
|
|
case <-dr.tickerStopped:
|
|
|
|
fmt.Println("ticker stopped")
|
|
|
|
return
|
|
|
|
}
|
2019-07-27 10:58:58 +02:00
|
|
|
}
|
|
|
|
}()
|
2019-07-25 16:02:47 +02:00
|
|
|
}
|
|
|
|
|
2019-07-27 11:38:37 +02:00
|
|
|
// XXX: We need to have a smooth shutdown process
|
2019-07-27 00:25:32 +02:00
|
|
|
func (dr *DummyReactor) Stop() {
|
2019-07-27 11:38:37 +02:00
|
|
|
fmt.Println("reactor stopping")
|
2019-07-27 10:58:58 +02:00
|
|
|
// this should be synchronous
|
2019-07-27 11:38:37 +02:00
|
|
|
dr.tickerStopped <- struct{}{}
|
|
|
|
fmt.Println("waiting for ticker")
|
|
|
|
// the order here matters
|
|
|
|
dr.demuxer.stop() // this need to drain first
|
|
|
|
dr.scheduler.stop()
|
|
|
|
dr.processor.stop()
|
|
|
|
|
|
|
|
fmt.Println("reactor stopped")
|
2019-07-27 00:25:32 +02:00
|
|
|
}
|
2019-07-25 16:02:47 +02:00
|
|
|
|
2019-07-27 00:25:32 +02:00
|
|
|
func (dr *DummyReactor) Receive(event Event) {
|
2019-07-27 10:58:58 +02:00
|
|
|
fmt.Println("receive event")
|
2019-07-27 00:25:32 +02:00
|
|
|
sent := dr.demuxer.send(event)
|
|
|
|
if !sent {
|
|
|
|
panic("demuxer is full")
|
|
|
|
}
|
|
|
|
}
|
2019-07-25 16:02:47 +02:00
|
|
|
|
2019-07-27 00:25:32 +02:00
|
|
|
func (dr *DummyReactor) AddPeer() {
|
|
|
|
// TODO: add peer event and send to demuxer
|
|
|
|
}
|