Pauan bdcf27c7cb Major improvements to wasm-bindgen-futures (#1760)
This PR contains a few major improvements:

* Code duplication has been removed.

* Everything has been refactored so that the implementation is much easier to understand.

* `future_to_promise` is now implemented with `spawn_local` rather than the other way around (this means `spawn_local` is faster since it doesn't need to create an unneeded `Promise`).

* Both the single threaded and multi threaded executors have been rewritten from scratch:

   * They only create 1-2 allocations in Rust per Task, and all of the allocations happen when the Task is created.

   * The singlethreaded executor creates 1 Promise per tick, rather than 1 Promise per tick per Task.

   * Both executors do *not* create `Closure`s during polling, instead all needed `Closure`s are created ahead of time.

   * Both executors now have correct behavior with regard to spurious wakeups and waking up during the call to `poll`.

   * Both executors cache the `Waker` so it doesn't need to be recreated all the time.

I believe both executors are now optimal in terms of both Rust and JS performance.
2019-09-26 13:33:12 -05:00

90 lines
2.7 KiB
Rust

use js_sys::Promise;
use std::cell::{Cell, RefCell};
use std::collections::VecDeque;
use std::rc::Rc;
use wasm_bindgen::prelude::*;
struct QueueState {
// The queue of Tasks which will be run in order. In practice this is all the
// synchronous work of futures, and each `Task` represents calling `poll` on
// a future "at the right time"
tasks: RefCell<VecDeque<Rc<crate::task::Task>>>,
// This flag indicates whether we're currently executing inside of
// `run_all` or have scheduled `run_all` to run in the future. This is
// used to ensure that it's only scheduled once.
is_spinning: Cell<bool>,
}
impl QueueState {
fn run_all(&self) {
debug_assert!(self.is_spinning.get());
// Runs all Tasks until empty. This blocks the event loop if a Future is
// stuck in an infinite loop, so we may want to yield back to the main
// event loop occasionally. For now though greedy execution should get
// the job done.
loop {
let task = match self.tasks.borrow_mut().pop_front() {
Some(task) => task,
None => break,
};
task.run();
}
// All of the Tasks have been run, so it's now possible to schedule the
// next tick again
self.is_spinning.set(false);
}
}
pub(crate) struct Queue {
state: Rc<QueueState>,
promise: Promise,
closure: Closure<dyn FnMut(JsValue)>,
}
impl Queue {
pub(crate) fn push_task(&self, task: Rc<crate::task::Task>) {
self.state.tasks.borrow_mut().push_back(task);
// If we're already inside the `run_all` loop then that'll pick up the
// task we just enqueued. If we're not in `run_all`, though, then we need
// to schedule a microtask.
//
// Note that we currently use a promise and a closure to do this, but
// eventually we should probably use something like `queueMicrotask`:
// https://developer.mozilla.org/en-US/docs/Web/API/WindowOrWorkerGlobalScope/queueMicrotask
if !self.state.is_spinning.replace(true) {
self.promise.then(&self.closure);
}
}
}
impl Queue {
fn new() -> Self {
let state = Rc::new(QueueState {
is_spinning: Cell::new(false),
tasks: RefCell::new(VecDeque::new()),
});
Self {
promise: Promise::resolve(&JsValue::undefined()),
closure: {
let state = Rc::clone(&state);
// This closure will only be called on the next microtask event
// tick
Closure::wrap(Box::new(move |_| state.run_all()))
},
state,
}
}
}
thread_local! {
pub(crate) static QUEUE: Queue = Queue::new();
}