Update with list IR from walrus

This commit updates `wasm-bindgen` to the latest version of `walrus`
which transforms all internal IR representations to a list-based IR
instead of a tree-based IR. This isn't a major change other than
cosmetic for `wasm-bindgen` itself, but involves a lot of changes to the
threads/anyref passes.

This commit also updates our CI configuration to actually run all the
anyref tests on CI. This is done by downloading a nightly build of
node.js which is theorized to continue to be there for awhile until the
full support makes its way into releases.
This commit is contained in:
Alex Crichton
2019-08-12 10:49:00 -07:00
parent 1d0c333a2b
commit ad34fa29d8
8 changed files with 323 additions and 371 deletions

View File

@ -5,7 +5,7 @@ use std::mem;
use failure::{bail, format_err, Error};
use walrus::ir::Value;
use walrus::{DataId, FunctionId, InitExpr, LocalFunction, ValType};
use walrus::{DataId, FunctionId, InitExpr, ValType};
use walrus::{ExportItem, GlobalId, GlobalKind, ImportKind, MemoryId, Module};
const PAGE_SIZE: u32 = 1 << 16;
@ -365,106 +365,86 @@ fn inject_start(
use walrus::ir::*;
assert!(stack_size % PAGE_SIZE == 0);
let mut builder = walrus::FunctionBuilder::new();
let mut exprs = Vec::new();
let mut builder = walrus::FunctionBuilder::new(&mut module.types, &[], &[]);
let local = module.locals.add(ValType::I32);
let mut body = builder.func_body();
let addr = builder.i32_const(addr as i32);
let one = builder.i32_const(1);
let thread_id = builder.atomic_rmw(
memory,
AtomicOp::Add,
AtomicWidth::I32,
MemArg {
align: 4,
offset: 0,
},
addr,
one,
);
let thread_id = builder.local_tee(local, thread_id);
let global_set = builder.global_set(globals.thread_id, thread_id);
exprs.push(global_set);
body.i32_const(addr as i32)
.i32_const(1)
.atomic_rmw(
memory,
AtomicOp::Add,
AtomicWidth::I32,
MemArg {
align: 4,
offset: 0,
},
)
.local_tee(local)
.global_set(globals.thread_id);
// Perform an if/else based on whether we're the first thread or not. Our
// thread ID will be zero if we're the first thread, otherwise it'll be
// nonzero (assuming we don't overflow...)
//
let thread_id_is_nonzero = builder.local_get(local);
body.local_get(local);
body.if_else(
Box::new([]),
Box::new([]),
// If our thread id is nonzero then we're the second or greater thread, so
// we give ourselves a stack via memory.grow and we update our stack
// pointer as the default stack pointer is surely wrong for us.
|body| {
if let Some(stack_pointer) = stack_pointer {
// local0 = grow_memory(stack_size);
body.i32_const((stack_size / PAGE_SIZE) as i32)
.memory_grow(memory)
.local_set(local);
// If our thread id is nonzero then we're the second or greater thread, so
// we give ourselves a stack via memory.grow and we update our stack
// pointer as the default stack pointer is surely wrong for us.
let mut block = builder.if_else_block(Box::new([]), Box::new([]));
if let Some(stack_pointer) = stack_pointer {
// local0 = grow_memory(stack_size);
let grow_amount = block.i32_const((stack_size / PAGE_SIZE) as i32);
let memory_growth = block.memory_grow(memory, grow_amount);
let set_local = block.local_set(local, memory_growth);
block.expr(set_local);
// if local0 == -1 then trap
body.block(Box::new([]), Box::new([]), |body| {
let target = body.id();
body.local_get(local)
.i32_const(-1)
.binop(BinaryOp::I32Ne)
.br_if(target)
.unreachable();
});
// if local0 == -1 then trap
let if_negative_trap = {
let mut block = block.block(Box::new([]), Box::new([]));
let lhs = block.local_get(local);
let rhs = block.i32_const(-1);
let condition = block.binop(BinaryOp::I32Ne, lhs, rhs);
let id = block.id();
let br_if = block.br_if(condition, id, Box::new([]));
block.expr(br_if);
let unreachable = block.unreachable();
block.expr(unreachable);
id
};
block.expr(if_negative_trap.into());
// stack_pointer = local0 + stack_size
let get_local = block.local_get(local);
let page_size = block.i32_const(PAGE_SIZE as i32);
let sp_base = block.binop(BinaryOp::I32Mul, get_local, page_size);
let stack_size = block.i32_const(stack_size as i32);
let sp = block.binop(BinaryOp::I32Add, sp_base, stack_size);
let set_stack_pointer = block.global_set(stack_pointer, sp);
block.expr(set_stack_pointer);
}
let if_nonzero_block = block.id();
drop(block);
// If the thread ID is zero then we can skip the update of the stack
// pointer as we know our stack pointer is valid. We need to initialize
// memory, however, so do that here.
let if_zero_block = {
let mut block = builder.if_else_block(Box::new([]), Box::new([]));
match &memory_init {
InitMemory::Segments(segments) => {
for segment in segments {
let zero = block.i32_const(0);
let offset = match segment.offset {
InitExpr::Global(id) => block.global_get(id),
InitExpr::Value(v) => block.const_(v),
};
let len = block.i32_const(segment.len as i32);
let init = block.memory_init(memory, segment.id, offset, zero, len);
block.expr(init);
let drop = block.data_drop(segment.id);
block.expr(drop);
// stack_pointer = local0 + stack_size
body.local_get(local)
.i32_const(PAGE_SIZE as i32)
.binop(BinaryOp::I32Mul)
.i32_const(stack_size as i32)
.binop(BinaryOp::I32Add)
.global_set(stack_pointer);
}
},
// If the thread ID is zero then we can skip the update of the stack
// pointer as we know our stack pointer is valid. We need to initialize
// memory, however, so do that here.
|body| {
match &memory_init {
InitMemory::Segments(segments) => {
for segment in segments {
// let zero = block.i32_const(0);
match segment.offset {
InitExpr::Global(id) => body.global_get(id),
InitExpr::Value(v) => body.const_(v),
};
body.i32_const(0)
.i32_const(segment.len as i32)
.memory_init(memory, segment.id)
.data_drop(segment.id);
}
}
InitMemory::Call {
wasm_init_memory, ..
} => {
body.call(*wasm_init_memory);
}
}
InitMemory::Call {
wasm_init_memory, ..
} => {
let call = block.call(*wasm_init_memory, Box::new([]));
block.expr(call);
}
}
block.id()
};
let block = builder.if_else(thread_id_is_nonzero, if_nonzero_block, if_zero_block);
exprs.push(block);
},
);
// If we have these globals then we're using the new thread local system
// implemented in LLVM, which means that `__wasm_init_tls` needs to be
@ -477,21 +457,19 @@ fn inject_start(
} = memory_init
{
let malloc = find_wbindgen_malloc(module)?;
let size = builder.i32_const(tls_size as i32);
let ptr = builder.call(malloc, Box::new([size]));
let block = builder.call(wasm_init_tls, Box::new([ptr]));
exprs.push(block);
body.i32_const(tls_size as i32)
.call(malloc)
.call(wasm_init_tls);
}
// If a start function previously existed we're done with our own
// initialization so delegate to them now.
if let Some(id) = module.start.take() {
exprs.push(builder.call(id, Box::new([])));
body.call(id);
}
// Finish off our newly generated function.
let ty = module.types.add(&[], &[]);
let id = builder.finish(ty, Vec::new(), exprs, module);
let id = builder.finish(Vec::new(), &mut module.funcs);
// ... and finally flag it as the new start function
module.start = Some(id);
@ -559,54 +537,39 @@ fn implement_thread_intrinsics(module: &mut Module, globals: &Globals) -> Result
struct Visitor<'a> {
map: &'a HashMap<FunctionId, Intrinsic>,
globals: &'a Globals,
func: &'a mut LocalFunction,
}
module.funcs.iter_local_mut().for_each(|(_id, func)| {
let mut entry = func.entry_block();
Visitor {
map: &map,
globals,
func,
}
.visit_block_id_mut(&mut entry);
let entry = func.entry_block();
dfs_pre_order_mut(&mut Visitor { map: &map, globals }, func, entry);
});
impl VisitorMut for Visitor<'_> {
fn local_function_mut(&mut self) -> &mut LocalFunction {
self.func
}
fn visit_expr_mut(&mut self, expr: &mut Expr) {
let call = match expr {
Expr::Call(e) => e,
other => return other.visit_mut(self),
fn visit_instr_mut(&mut self, instr: &mut Instr) {
let call = match instr {
Instr::Call(e) => e,
_ => return,
};
match self.map.get(&call.func) {
Some(Intrinsic::GetThreadId) => {
assert!(call.args.is_empty());
*expr = GlobalGet {
*instr = GlobalGet {
global: self.globals.thread_id,
}
.into();
}
Some(Intrinsic::GetTcb) => {
assert!(call.args.is_empty());
*expr = GlobalGet {
*instr = GlobalGet {
global: self.globals.thread_tcb,
}
.into();
}
Some(Intrinsic::SetTcb) => {
assert_eq!(call.args.len(), 1);
call.args[0].visit_mut(self);
*expr = GlobalSet {
*instr = GlobalSet {
global: self.globals.thread_tcb,
value: call.args[0],
}
.into();
}
None => call.visit_mut(self),
None => {}
}
}
}