//! Internal-only runtime module used for the `wasm_bindgen_test` crate. //! //! No API contained in this module will respect semver, these should all be //! considered private APIs. // # Architecture of `wasm_bindgen_test` // // This module can seem a bit funky, but it's intended to be the runtime support // of the `#[wasm_bindgen_test]` macro and be amenable to executing wasm test // suites. The general idea is that for a wasm test binary there will be a set // of functions tagged `#[wasm_bindgen_test]`. It's the job of the runtime // support to execute all of these functions, collecting and collating the // results. // // This runtime support works in tandem with the `wasm-bindgen-test-runner` // binary as part of the `wasm-bindgen-cli` package. // // ## High Level Overview // // Here's a rough and (semi) high level overview of what happens when this crate // runs. // // * First, the user runs `cargo test --target wasm32-unknown-unknown` // // * Cargo then compiles all the test suites (aka `tests/*.rs`) as wasm binaries // (the `bin` crate type). These binaries all have entry points that are // `main` functions, but it's actually not used. The binaries are also // compiled with `--test`, which means they're linked to the standard `test` // crate, but this crate doesn't work on wasm and so we bypass it entirely. // // * Instead of using `#[test]`, which doesn't work, users wrote tests with // `#[wasm_bindgen_test]`. This macro expands to a bunch of `#[no_mangle]` // functions with known names (currently named `__wbg_test_*`). // // * Next up, Cargo was configured via its test runner support to execute the // `wasm-bindgen-test-runner` binary. Instead of what Cargo normally does, // executing `target/wasm32-unknown-unknown/debug/deps/foo-xxxxx.wasm` (which // will fail as we can't actually execute was binaries), Cargo will execute // `wasm-bindgen-test-runner target/.../foo-xxxxx.wasm`. // // * The `wasm-bindgen-test-runner` binary takes over. It runs `wasm-bindgen` // over the binary, generating JS bindings and such. It also figures out if // we're running in node.js or a browser. // // * The `wasm-bindgen-test-runner` binary generates a JS entry point. This // entry point creates a `Context` below. The runner binary also parses the // wasm file and finds all functions that are named `__wbg_test_*`. The // generate file gathers up all these functions into an array and then passes // them to `Context` below. Note that these functions are passed as *JS // values*. // // * Somehow, the runner then executes the JS file. This may be with node.js, it // may serve up files in a server and wait for the user, or it serves up files // in a server and starts headless testing. // // * Testing starts, it loads all the modules using either ES imports or Node // `require` statements. Everything is loaded in JS now. // // * A `Context` is created. The `Context` is forwarded the CLI arguments of the // original `wasm-bindgen-test-runner` in an environment specific fashion. // This is used for test filters today. // // * The `Context::run` function is called. Again, the generated JS has gathered // all wasm tests to be executed into a list, and it's passed in here. Again, // it's very important that these functions are JS values, not function // pointers in Rust. // // * Next, `Context::run` will proceed to execute all of the functions. When a // function is executed we're invoking a JS function, which means we're // allowed to catch exceptions. This is how we handle failing tests without // aborting the entire process. // // * When a test executes, it's executing an entry point generated by // `#[wasm_bindgen_test]`. The test informs the `Context` of its name and // other metadata, and then `Context::execute` actually invokes the tests // itself (which currently is a unit function). // // * Finally, after all tests are run, the `Context` prints out all the results. // // ## Other various notes // // Phew, that was a lot! Some other various bits and pieces you may want to be // aware of are throughout the code. These include things like how printing // results is different in node vs a browser, or how we even detect if we're in // node or a browser. // // Overall this is all somewhat in flux as it's pretty new, and feedback is // always of course welcome! use std::cell::{RefCell, Cell}; use std::fmt; use std::mem; use console_error_panic_hook; use js_sys::{Array, Function}; use wasm_bindgen::prelude::*; pub mod node; pub mod browser; pub mod detect; /// Runtime test harness support instantiated in JS. /// /// The node.js entry script instantiates a `Context` here which is used to /// drive test execution. #[wasm_bindgen] pub struct Context { /// An optional filter used to restrict which tests are actually executed /// and which are ignored. This is passed via the `args` function which /// comes from the command line of `wasm-bindgen-test-runner`. Currently /// this is the only "CLI option" filter: Option, /// The current test that is executing. If `None` no test is executing, if /// `Some` it's the name of the tests. current_test: RefCell>, /// Counter of the number of tests that have succeeded. succeeded: Cell, /// Counter of the number of tests that have been ignored ignored: Cell, /// A list of all tests which have failed. The first element of this pair is /// the name of the test that failed, and the second is all logging /// information (formatted) associated with the failure. failures: RefCell>, /// Sink for `console.log` invocations when a test is running. This is /// filled in by the `Context::console_log` function below while a test is /// executing (aka while `current_test` above is `Some`). current_log: RefCell, current_error: RefCell, /// Flag set as a test executes if it was actually ignored. ignore_this_test: Cell, /// How to actually format output, either node.js or browser-specific /// implementation. formatter: Box, } trait Formatter { fn writeln(&self, line: &str); fn log_start(&self, name: &str); fn log_success(&self); fn log_ignored(&self); fn log_failure(&self, err: JsValue) -> String; } #[wasm_bindgen] extern { #[wasm_bindgen(js_namespace = console, js_name = log)] #[doc(hidden)] pub fn console_log(s: &str); // General-purpose conversion into a `String`. #[wasm_bindgen(js_name = String)] fn stringify(val: &JsValue) -> String; } /// Internal implementation detail of the `console_log!` macro. pub fn log(args: &fmt::Arguments) { console_log(&args.to_string()); } #[wasm_bindgen] impl Context { /// Creates a new context ready to run tests. /// /// A `Context` is the main structure through which test execution is /// coordinated, and this will collect output and results for all executed /// tests. #[wasm_bindgen(constructor)] pub fn new() -> Context { console_error_panic_hook::set_once(); let formatter = match node::Node::new() { Some(node) => Box::new(node) as Box, None => Box::new(browser::Browser::new()), }; Context { filter: None, current_test: RefCell::new(None), succeeded: Cell::new(0), ignored: Cell::new(0), failures: RefCell::new(Vec::new()), current_log: RefCell::new(String::new()), current_error: RefCell::new(String::new()), ignore_this_test: Cell::new(false), formatter, } } /// Inform this context about runtime arguments passed to the test /// harness. /// /// Eventually this will be used to support flags, but for now it's just /// used to support test filters. pub fn args(&mut self, args: Vec) { // Here we want to reject all flags like `--foo` or `-f` as we don't // support anything, and also we only support at most one non-flag // argument as a test filter. // // Everything else is rejected. for arg in args { let arg = arg.as_string().unwrap(); if arg.starts_with("-") { panic!("flag {} not supported", arg); } else if self.filter.is_some() { panic!("more than one filter argument cannot be passed"); } self.filter = Some(arg); } } /// Executes a list of tests, returning whether any of them failed. /// /// This is the main entry point for executing tests. All the tests passed /// in are the JS `Function` object that was plucked off the /// `WebAssembly.Instance` exports list. This allows us to invoke it but /// still catch JS exceptions. pub fn run(&self, tests: Vec) -> bool { let this = JsValue::null(); // Each entry point has one argument, a raw pointer to this `Context`, // so build up that array we'll be passing all the functions. let args = Array::new(); args.push(&JsValue::from(self as *const Context as u32)); let noun = if tests.len() == 1 { "test" } else { "tests" }; self.formatter.writeln(&format!("running {} {}", tests.len(), noun)); self.formatter.writeln(""); for test in tests { self.ignore_this_test.set(false); // Use `Function.apply` to catch any exceptions and otherwise invoke // the test. let test = Function::from(test); match test.apply(&this, &args) { Ok(_) => { if self.ignore_this_test.get() { self.log_ignore() } else { self.log_success() } } Err(e) => self.log_failure(e), } drop(self.current_test.borrow_mut().take()); *self.current_log.borrow_mut() = String::new(); *self.current_error.borrow_mut() = String::new(); } self.log_results(); self.failures.borrow().len() == 0 } fn log_start(&self, test: &str) { let mut current_test = self.current_test.borrow_mut(); assert!(current_test.is_none()); *current_test = Some(test.to_string()); self.formatter.log_start(test); } fn log_success(&self) { self.formatter.log_success(); self.succeeded.set(self.succeeded.get() + 1); } fn log_ignore(&self) { self.formatter.log_ignored(); self.ignored.set(self.ignored.get() + 1); } fn log_failure(&self, err: JsValue) { let name = self.current_test.borrow().as_ref().unwrap().clone(); let log = mem::replace(&mut *self.current_log.borrow_mut(), String::new()); let error = mem::replace(&mut *self.current_error.borrow_mut(), String::new()); let mut msg = String::new(); if log.len() > 0 { msg.push_str("log output:\n"); msg.push_str(&tab(&log)); msg.push_str("\n"); } if error.len() > 0 { msg.push_str("error output:\n"); msg.push_str(&tab(&error)); msg.push_str("\n"); } msg.push_str("JS exception that was thrown:\n"); msg.push_str(&tab(&self.formatter.log_failure(err))); self.failures.borrow_mut().push((name, msg)); } fn log_results(&self) { let failures = self.failures.borrow(); if failures.len() > 0 { self.formatter.writeln("\nfailures:\n"); for (test, logs) in failures.iter() { let msg = format!("---- {} output ----\n{}", test, tab(logs)); self.formatter.writeln(&msg); } self.formatter.writeln("failures:\n"); for (test, _) in failures.iter() { self.formatter.writeln(&format!(" {}", test)); } } self.formatter.writeln(""); self.formatter.writeln(&format!( "test result: {}. \ {} passed; \ {} failed; \ {} ignored\n", if failures.len() == 0 { "ok" } else { "FAILED" }, self.succeeded.get(), failures.len(), self.ignored.get(), )); } /// Handler for `console.log` invocations. /// /// If a test is currently running it takes the `args` array and stringifies /// it and appends it to the current output of the test. Otherwise it passes /// the arguments to the original `console.log` function, psased as /// `original`. pub fn console_log(&self, original: &Function, args: &Array) { self.log(original, args, &self.current_log) } /// Handler for `console.error` invocations. /// /// Works the same as `console_log` above. pub fn console_error(&self, original: &Function, args: &Array) { self.log(original, args, &self.current_error) } fn log(&self, orig: &Function, args: &Array, dst: &RefCell) { if self.current_test.borrow().is_none() { drop(orig.apply(&JsValue::null(), args)); return } let mut log = dst.borrow_mut(); args.for_each(&mut |val, idx, _array| { if idx != 0 { log.push_str(" "); } log.push_str(&stringify(&val)); }); log.push_str("\n"); } } impl Context { /// Entry point for a test in wasm. The `#[wasm_bindgen_test]` macro /// generates invocations of this method. pub fn execute(&self, name: &str, f: impl FnOnce()) { self.log_start(name); if let Some(filter) = &self.filter { if !name.contains(filter) { self.ignore_this_test.set(true); return } } f(); } } fn tab(s: &str) -> String { let mut result = String::new(); for line in s.lines() { result.push_str(" "); result.push_str(line); result.push_str("\n"); } return result; }