Merge remote-tracking branch 'origin/master' into feature/llvm-osr

This commit is contained in:
losfair
2019-08-21 15:49:25 -07:00
28 changed files with 2937 additions and 400 deletions

506
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -24,12 +24,12 @@ rayon = "1.1.0"
# Dependencies for caching.
[dependencies.serde]
version = "1.0.98"
version = "1.0.99"
features = ["rc"]
[dependencies.serde_derive]
version = "1.0.98"
[dependencies.serde_bytes]
version = "0.11.1"
version = "0.11.2"
[dependencies.serde-bench]
version = "0.0.7"

View File

@ -76,8 +76,7 @@ pub extern "C" fn nearbyintf64(x: f64) -> f64 {
}
}
/// A declaration for the stack probe function in Rust's standard library, for
/// catching callstack overflow.
extern "C" {
pub fn __rust_probestack();
}
// FIXME: Is there a replacement on AArch64?
#[cfg(all(target_os = "linux", target_arch = "aarch64"))]
#[no_mangle]
pub extern "C" fn __rust_probestack() {}

View File

@ -138,6 +138,14 @@ pub unsafe fn do_unwind(signum: i32, siginfo: *const c_void, ucontext: *const c_
longjmp(jmp_buf as *mut ::nix::libc::c_void, signum)
}
#[cfg(all(target_os = "linux", target_arch = "aarch64"))]
unsafe fn get_faulting_addr_and_ip(
_siginfo: *const c_void,
_ucontext: *const c_void,
) -> (*const c_void, *const c_void) {
(::std::ptr::null(), ::std::ptr::null())
}
#[cfg(all(target_os = "linux", target_arch = "x86_64"))]
unsafe fn get_faulting_addr_and_ip(
siginfo: *const c_void,
@ -230,5 +238,6 @@ unsafe fn get_faulting_addr_and_ip(
#[cfg(not(any(
all(target_os = "macos", target_arch = "x86_64"),
all(target_os = "linux", target_arch = "x86_64"),
all(target_os = "linux", target_arch = "aarch64"),
)))]
compile_error!("This crate doesn't yet support compiling on operating systems other than linux and macos and architectures other than x86_64");

View File

@ -159,7 +159,7 @@ pub fn _gai_strerror(ctx: &mut Ctx, ecode: i32) -> i32 {
.unwrap()
};
for (i, byte) in bytes.iter().enumerate() {
writer[i].set(*byte as i8);
writer[i].set(*byte as _);
}
string_on_guest.offset() as _
@ -283,7 +283,7 @@ pub fn _getaddrinfo(
.deref(ctx.memory(0), 0, str_size as _)
.unwrap();
for (i, b) in canonname_bytes.into_iter().enumerate() {
guest_canonname_writer[i].set(*b as i8)
guest_canonname_writer[i].set(*b as _)
}
guest_canonname

View File

@ -37,7 +37,7 @@ pub fn execvp(ctx: &mut Ctx, command_name_offset: u32, argv_offset: u32) -> i32
// construct raw pointers and hand them to `execvp`
let command_pointer = command_name_string.as_ptr() as *const i8;
let args_pointer = argv.as_ptr();
unsafe { libc_execvp(command_pointer, args_pointer) }
unsafe { libc_execvp(command_pointer as *const _, args_pointer as *const *const _) }
}
/// execl

View File

@ -23,7 +23,7 @@ pub fn printf(ctx: &mut Ctx, memory_offset: i32, extra: i32) -> i32 {
pub fn chroot(ctx: &mut Ctx, name_ptr: i32) -> i32 {
debug!("emscripten::chroot");
let name = emscripten_memory_pointer!(ctx.memory(0), name_ptr) as *const i8;
unsafe { _chroot(name) }
unsafe { _chroot(name as *const _) }
}
/// getpwuid

View File

@ -54,7 +54,7 @@ pub fn killpg(_ctx: &mut Ctx, _a: i32, _b: i32) -> i32 {
pub fn pathconf(ctx: &mut Ctx, path_ptr: i32, name: i32) -> i32 {
debug!("emscripten::pathconf");
let path = emscripten_memory_pointer!(ctx.memory(0), path_ptr) as *const i8;
unsafe { libc::pathconf(path, name).try_into().unwrap() }
unsafe { libc::pathconf(path as *const _, name).try_into().unwrap() }
}
#[cfg(not(unix))]

View File

@ -97,7 +97,7 @@ pub fn ___syscall6(ctx: &mut Ctx, _which: c_int, mut varargs: VarArgs) -> c_int
pub fn ___syscall12(ctx: &mut Ctx, _which: c_int, mut varargs: VarArgs) -> c_int {
debug!("emscripten::___syscall12 (chdir) {}", _which);
let path_ptr = varargs.get_str(ctx);
let real_path_owned = get_cstr_path(ctx, path_ptr);
let real_path_owned = get_cstr_path(ctx, path_ptr as *const _);
let real_path = if let Some(ref rp) = real_path_owned {
rp.as_c_str().as_ptr()
} else {
@ -168,13 +168,13 @@ pub fn ___syscall38(ctx: &mut Ctx, _which: c_int, mut varargs: VarArgs) -> i32 {
debug!("emscripten::___syscall38 (rename)");
let old_path = varargs.get_str(ctx);
let new_path = varargs.get_str(ctx);
let real_old_path_owned = get_cstr_path(ctx, old_path);
let real_old_path_owned = get_cstr_path(ctx, old_path as *const _);
let real_old_path = if let Some(ref rp) = real_old_path_owned {
rp.as_c_str().as_ptr()
} else {
old_path
};
let real_new_path_owned = get_cstr_path(ctx, new_path);
let real_new_path_owned = get_cstr_path(ctx, new_path as *const _);
let real_new_path = if let Some(ref rp) = real_new_path_owned {
rp.as_c_str().as_ptr()
} else {
@ -194,7 +194,7 @@ pub fn ___syscall38(ctx: &mut Ctx, _which: c_int, mut varargs: VarArgs) -> i32 {
pub fn ___syscall40(ctx: &mut Ctx, _which: c_int, mut varargs: VarArgs) -> c_int {
debug!("emscripten::___syscall40 (rmdir)");
let pathname_addr = varargs.get_str(ctx);
let real_path_owned = get_cstr_path(ctx, pathname_addr);
let real_path_owned = get_cstr_path(ctx, pathname_addr as *const _);
let real_path = if let Some(ref rp) = real_path_owned {
rp.as_c_str().as_ptr()
} else {
@ -359,7 +359,7 @@ pub fn ___syscall183(ctx: &mut Ctx, _which: c_int, mut varargs: VarArgs) -> i32
let buf_writer = buf_offset.deref(ctx.memory(0), 0, len as u32 + 1).unwrap();
for (i, byte) in path_string.bytes().enumerate() {
buf_writer[i].set(byte as i8);
buf_writer[i].set(byte as _);
}
buf_writer[len].set(0);
buf_offset.offset() as i32
@ -535,7 +535,7 @@ pub fn ___syscall195(ctx: &mut Ctx, _which: c_int, mut varargs: VarArgs) -> c_in
let pathname_addr = varargs.get_str(ctx);
let buf: u32 = varargs.get(ctx);
let real_path_owned = get_cstr_path(ctx, pathname_addr);
let real_path_owned = get_cstr_path(ctx, pathname_addr as *const _);
let real_path = if let Some(ref rp) = real_path_owned {
rp.as_c_str().as_ptr()
} else {

View File

@ -146,7 +146,7 @@ pub fn ___syscall5(ctx: &mut Ctx, _which: c_int, mut varargs: VarArgs) -> c_int
let pathname_addr = varargs.get_str(ctx);
let flags: i32 = varargs.get(ctx);
let mode: u32 = varargs.get(ctx);
let real_path_owned = utils::get_cstr_path(ctx, pathname_addr);
let real_path_owned = utils::get_cstr_path(ctx, pathname_addr as *const _);
let real_path = if let Some(ref rp) = real_path_owned {
rp.as_c_str().as_ptr()
} else {
@ -198,13 +198,13 @@ pub fn ___syscall83(ctx: &mut Ctx, _which: c_int, mut varargs: VarArgs) -> c_int
let path1 = varargs.get_str(ctx);
let path2 = varargs.get_str(ctx);
let real_path1_owned = utils::get_cstr_path(ctx, path1);
let real_path1_owned = utils::get_cstr_path(ctx, path1 as *const _);
let real_path1 = if let Some(ref rp) = real_path1_owned {
rp.as_c_str().as_ptr()
} else {
path1
};
let real_path2_owned = utils::get_cstr_path(ctx, path2);
let real_path2_owned = utils::get_cstr_path(ctx, path2 as *const _);
let real_path2 = if let Some(ref rp) = real_path2_owned {
rp.as_c_str().as_ptr()
} else {
@ -227,7 +227,7 @@ pub fn ___syscall85(ctx: &mut Ctx, _which: c_int, mut varargs: VarArgs) -> i32 {
let buf = varargs.get_str(ctx);
// let buf_addr: i32 = varargs.get(ctx);
let buf_size: i32 = varargs.get(ctx);
let real_path_owned = get_cstr_path(ctx, pathname_addr);
let real_path_owned = get_cstr_path(ctx, pathname_addr as *const _);
let real_path = if let Some(ref rp) = real_path_owned {
rp.as_c_str().as_ptr()
} else {
@ -266,7 +266,7 @@ pub fn ___syscall194(ctx: &mut Ctx, _which: c_int, mut varargs: VarArgs) -> c_in
pub fn ___syscall198(ctx: &mut Ctx, _which: c_int, mut varargs: VarArgs) -> c_int {
debug!("emscripten::___syscall198 (lchown) {}", _which);
let path_ptr = varargs.get_str(ctx);
let real_path_owned = utils::get_cstr_path(ctx, path_ptr);
let real_path_owned = utils::get_cstr_path(ctx, path_ptr as *const _);
let real_path = if let Some(ref rp) = real_path_owned {
rp.as_c_str().as_ptr()
} else {
@ -307,7 +307,7 @@ pub fn ___syscall212(ctx: &mut Ctx, _which: c_int, mut varargs: VarArgs) -> c_in
debug!("emscripten::___syscall212 (chown) {}", _which);
let pathname_addr = varargs.get_str(ctx);
let real_path_owned = utils::get_cstr_path(ctx, pathname_addr);
let real_path_owned = utils::get_cstr_path(ctx, pathname_addr as *const _);
let real_path = if let Some(ref rp) = real_path_owned {
rp.as_c_str().as_ptr()
} else {
@ -336,7 +336,7 @@ pub fn ___syscall219(ctx: &mut Ctx, _which: c_int, mut varargs: VarArgs) -> c_in
pub fn ___syscall33(ctx: &mut Ctx, _which: c_int, mut varargs: VarArgs) -> c_int {
debug!("emscripten::___syscall33 (access) {}", _which);
let path = varargs.get_str(ctx);
let real_path_owned = utils::get_cstr_path(ctx, path);
let real_path_owned = utils::get_cstr_path(ctx, path as *const _);
let real_path = if let Some(ref rp) = real_path_owned {
rp.as_c_str().as_ptr()
} else {
@ -364,7 +364,7 @@ pub fn ___syscall34(ctx: &mut Ctx, _which: c_int, mut varargs: VarArgs) -> c_int
pub fn ___syscall39(ctx: &mut Ctx, _which: c_int, mut varargs: VarArgs) -> c_int {
debug!("emscripten::___syscall39 (mkdir) {}", _which);
let pathname_addr = varargs.get_str(ctx);
let real_path_owned = utils::get_cstr_path(ctx, pathname_addr);
let real_path_owned = utils::get_cstr_path(ctx, pathname_addr as *const _);
let real_path = if let Some(ref rp) = real_path_owned {
rp.as_c_str().as_ptr()
} else {
@ -986,7 +986,7 @@ pub fn ___syscall122(ctx: &mut Ctx, _which: c_int, mut varargs: VarArgs) -> c_in
pub fn ___syscall196(ctx: &mut Ctx, _which: i32, mut varargs: VarArgs) -> i32 {
debug!("emscripten::___syscall196 (lstat64) {}", _which);
let path = varargs.get_str(ctx);
let real_path_owned = utils::get_cstr_path(ctx, path);
let real_path_owned = utils::get_cstr_path(ctx, path as *const _);
let real_path = if let Some(ref rp) = real_path_owned {
rp.as_c_str().as_ptr()
} else {
@ -1063,7 +1063,7 @@ pub fn ___syscall220(ctx: &mut Ctx, _which: i32, mut varargs: VarArgs) -> i32 {
let upper_bound = std::cmp::min((*dirent).d_reclen, 255) as usize;
let mut i = 0;
while i < upper_bound {
*(dirp.add(pos + 11 + i) as *mut i8) = (*dirent).d_name[i];
*(dirp.add(pos + 11 + i) as *mut i8) = (*dirent).d_name[i] as _;
i += 1;
}
// We set the termination string char

View File

@ -236,7 +236,8 @@ pub fn read_string_from_wasm(memory: &Memory, offset: u32) -> String {
pub fn get_cstr_path(ctx: &mut Ctx, path: *const i8) -> Option<std::ffi::CString> {
use std::collections::VecDeque;
let path_str = unsafe { std::ffi::CStr::from_ptr(path).to_str().unwrap() }.to_string();
let path_str =
unsafe { std::ffi::CStr::from_ptr(path as *const _).to_str().unwrap() }.to_string();
let data = get_emscripten_data(ctx);
let path = PathBuf::from(path_str);
let mut prefix_added = false;

File diff suppressed because it is too large Load Diff

View File

@ -131,6 +131,7 @@ pub struct Intrinsics {
pub trap_call_indirect_oob: BasicValueEnum,
pub trap_memory_oob: BasicValueEnum,
pub trap_illegal_arithmetic: BasicValueEnum,
pub trap_misaligned_atomic: BasicValueEnum,
// VM intrinsics.
pub memory_grow_dynamic_local: FunctionValue,
@ -460,6 +461,7 @@ impl Intrinsics {
trap_call_indirect_oob: i32_ty.const_int(3, false).as_basic_value_enum(),
trap_memory_oob: i32_ty.const_int(2, false).as_basic_value_enum(),
trap_illegal_arithmetic: i32_ty.const_int(4, false).as_basic_value_enum(),
trap_misaligned_atomic: i32_ty.const_int(5, false).as_basic_value_enum(),
// VM intrinsics.
memory_grow_dynamic_local: module.add_function(

View File

@ -26,13 +26,13 @@ features = ["serde-1"]
# Dependencies for caching.
[dependencies.serde]
version = "1.0.98"
version = "1.0.99"
# This feature is required for serde to support serializing/deserializing reference counted pointers (e.g. Rc and Arc).
features = ["rc"]
[dependencies.serde_derive]
version = "1.0.98"
[dependencies.serde_bytes]
version = "0.11.1"
version = "0.11.2"
[dependencies.serde-bench]
version = "0.0.7"
[dependencies.blake2b_simd]

View File

@ -114,6 +114,7 @@ impl Default for MemoryBoundCheckMode {
#[derive(Debug, Default)]
pub struct Features {
pub simd: bool,
pub threads: bool,
}
/// Configuration data for the compiler

View File

@ -140,7 +140,7 @@ impl<
pub fn validating_parser_config(features: &Features) -> wasmparser::ValidatingParserConfig {
wasmparser::ValidatingParserConfig {
operator_config: wasmparser::OperatorValidatorConfig {
enable_threads: false,
enable_threads: features.threads,
enable_reference_types: false,
enable_simd: features.simd,
enable_bulk_memory: false,

View File

@ -142,7 +142,7 @@ pub fn validate_and_report_errors_with_features(
enable_bulk_memory: false,
enable_multi_value: false,
enable_reference_types: false,
enable_threads: false,
enable_threads: features.threads,
},
mutable_global_imports: true,
};

View File

@ -12,12 +12,15 @@ use std::{
cell::{Cell, RefCell},
fmt, mem,
rc::Rc,
sync::Arc,
};
pub use self::dynamic::DynamicMemory;
pub use self::static_::{SharedStaticMemory, StaticMemory};
pub use self::static_::StaticMemory;
pub use self::view::{Atomically, MemoryView};
use parking_lot::Mutex;
mod dynamic;
pub mod ptr;
mod static_;
@ -151,20 +154,10 @@ impl Memory {
unsafe { MemoryView::new(base as _, length as u32) }
}
/// Convert this memory to a shared memory if the shared flag
/// is present in the description used to create it.
pub fn shared(self) -> Option<SharedMemory> {
if self.desc.shared {
Some(SharedMemory { desc: self.desc })
} else {
None
}
}
pub(crate) fn vm_local_memory(&self) -> *mut vm::LocalMemory {
match &self.variant {
MemoryVariant::Unshared(unshared_mem) => unshared_mem.vm_local_memory(),
MemoryVariant::Shared(_) => unimplemented!(),
MemoryVariant::Shared(shared_mem) => shared_mem.vm_local_memory(),
}
}
}
@ -241,7 +234,7 @@ impl UnsharedMemory {
MemoryType::SharedStatic => panic!("attempting to create shared unshared memory"),
};
Ok(UnsharedMemory {
Ok(Self {
internal: Rc::new(UnsharedMemoryInternal {
storage: RefCell::new(storage),
local: Cell::new(local),
@ -289,27 +282,56 @@ impl Clone for UnsharedMemory {
}
pub struct SharedMemory {
#[allow(dead_code)]
desc: MemoryDescriptor,
internal: Arc<SharedMemoryInternal>,
}
pub struct SharedMemoryInternal {
memory: RefCell<Box<StaticMemory>>,
local: Cell<vm::LocalMemory>,
lock: Mutex<()>,
}
impl SharedMemory {
fn new(desc: MemoryDescriptor) -> Result<Self, CreationError> {
Ok(Self { desc })
let mut local = vm::LocalMemory {
base: std::ptr::null_mut(),
bound: 0,
memory: std::ptr::null_mut(),
};
let memory = StaticMemory::new(desc, &mut local)?;
Ok(Self {
internal: Arc::new(SharedMemoryInternal {
memory: RefCell::new(memory),
local: Cell::new(local),
lock: Mutex::new(()),
}),
})
}
pub fn grow(&self, _delta: Pages) -> Result<Pages, GrowError> {
unimplemented!()
pub fn grow(&self, delta: Pages) -> Result<Pages, GrowError> {
let _guard = self.internal.lock.lock();
let mut local = self.internal.local.get();
let pages = self.internal.memory.borrow_mut().grow(delta, &mut local);
pages
}
pub fn size(&self) -> Pages {
unimplemented!()
let _guard = self.internal.lock.lock();
self.internal.memory.borrow_mut().size()
}
pub(crate) fn vm_local_memory(&self) -> *mut vm::LocalMemory {
self.internal.local.as_ptr()
}
}
impl Clone for SharedMemory {
fn clone(&self) -> Self {
unimplemented!()
SharedMemory {
internal: Arc::clone(&self.internal),
}
}
}

View File

@ -1,12 +1,10 @@
use crate::error::GrowError;
use crate::{
error::CreationError,
memory::static_::{SAFE_STATIC_GUARD_SIZE, SAFE_STATIC_HEAP_SIZE},
sys,
types::MemoryDescriptor,
units::Pages,
vm,
};
use crate::{error::CreationError, sys, types::MemoryDescriptor, units::Pages, vm};
#[doc(hidden)]
pub const SAFE_STATIC_HEAP_SIZE: usize = 1 << 32; // 4 GiB
#[doc(hidden)]
pub const SAFE_STATIC_GUARD_SIZE: usize = 1 << 31; // 2 GiB
/// This is an internal-only api.
///

View File

@ -1,10 +0,0 @@
#[doc(hidden)]
pub const SAFE_STATIC_HEAP_SIZE: usize = 1 << 32; // 4 GiB
#[doc(hidden)]
pub const SAFE_STATIC_GUARD_SIZE: usize = 1 << 31; // 2 GiB
mod shared;
mod unshared;
pub use self::shared::SharedStaticMemory;
pub use self::unshared::StaticMemory;

View File

@ -1,11 +0,0 @@
use crate::sys;
use parking_lot::Mutex;
use std::sync::atomic::AtomicUsize;
// Remove this attribute once this is used.
#[allow(dead_code)]
pub struct SharedStaticMemory {
memory: sys::Memory,
current: AtomicUsize,
lock: Mutex<()>,
}

View File

@ -23,6 +23,7 @@ pub enum WasmTrapInfo {
MemoryOutOfBounds = 2,
CallIndirectOOB = 3,
IllegalArithmetic = 4,
MisalignedAtomicAccess = 5,
Unknown,
}
@ -39,6 +40,7 @@ impl fmt::Display for WasmTrapInfo {
WasmTrapInfo::MemoryOutOfBounds => "memory out-of-bounds access",
WasmTrapInfo::CallIndirectOOB => "`call_indirect` out-of-bounds",
WasmTrapInfo::IllegalArithmetic => "illegal arithmetic operation",
WasmTrapInfo::MisalignedAtomicAccess => "misaligned atomic access",
WasmTrapInfo::Unknown => "unknown",
}
)

View File

@ -208,7 +208,7 @@ fn get_intrinsics_for_module(m: &ModuleInfo) -> *const Intrinsics {
match mem_desc.memory_type() {
MemoryType::Dynamic => &INTRINSICS_LOCAL_DYNAMIC_MEMORY,
MemoryType::Static => &INTRINSICS_LOCAL_STATIC_MEMORY,
MemoryType::SharedStatic => unimplemented!(),
MemoryType::SharedStatic => &INTRINSICS_LOCAL_STATIC_MEMORY,
}
}
LocalOrImport::Import(import_mem_index) => {
@ -216,7 +216,7 @@ fn get_intrinsics_for_module(m: &ModuleInfo) -> *const Intrinsics {
match mem_desc.memory_type() {
MemoryType::Dynamic => &INTRINSICS_IMPORTED_DYNAMIC_MEMORY,
MemoryType::Static => &INTRINSICS_IMPORTED_STATIC_MEMORY,
MemoryType::SharedStatic => unimplemented!(),
MemoryType::SharedStatic => &INTRINSICS_IMPORTED_STATIC_MEMORY,
}
}
}

473
lib/spectests/spectests/atomic.wast vendored Normal file
View File

@ -0,0 +1,473 @@
;; atomic.wast from WebAssembly test suite.
;; https://github.com/WebAssembly/testsuite/blob/master/LICENSE
;;
;; Modified by Wasmer to parse with the wabt spec test parser, replacing
;; '_u.add' with '.add_u' and similarly for 'sub', 'and', 'or', 'xor', 'xchg'
;; and 'cmpxchg'.
;; atomic operations
(module
(memory 1 1 shared)
(func (export "init") (param $value i64) (i64.store (i32.const 0) (local.get $value)))
(func (export "i32.atomic.load") (param $addr i32) (result i32) (i32.atomic.load (local.get $addr)))
(func (export "i64.atomic.load") (param $addr i32) (result i64) (i64.atomic.load (local.get $addr)))
(func (export "i32.atomic.load8_u") (param $addr i32) (result i32) (i32.atomic.load8_u (local.get $addr)))
(func (export "i32.atomic.load16_u") (param $addr i32) (result i32) (i32.atomic.load16_u (local.get $addr)))
(func (export "i64.atomic.load8_u") (param $addr i32) (result i64) (i64.atomic.load8_u (local.get $addr)))
(func (export "i64.atomic.load16_u") (param $addr i32) (result i64) (i64.atomic.load16_u (local.get $addr)))
(func (export "i64.atomic.load32_u") (param $addr i32) (result i64) (i64.atomic.load32_u (local.get $addr)))
(func (export "i32.atomic.store") (param $addr i32) (param $value i32) (i32.atomic.store (local.get $addr) (local.get $value)))
(func (export "i64.atomic.store") (param $addr i32) (param $value i64) (i64.atomic.store (local.get $addr) (local.get $value)))
(func (export "i32.atomic.store8") (param $addr i32) (param $value i32) (i32.atomic.store8 (local.get $addr) (local.get $value)))
(func (export "i32.atomic.store16") (param $addr i32) (param $value i32) (i32.atomic.store16 (local.get $addr) (local.get $value)))
(func (export "i64.atomic.store8") (param $addr i32) (param $value i64) (i64.atomic.store8 (local.get $addr) (local.get $value)))
(func (export "i64.atomic.store16") (param $addr i32) (param $value i64) (i64.atomic.store16 (local.get $addr) (local.get $value)))
(func (export "i64.atomic.store32") (param $addr i32) (param $value i64) (i64.atomic.store32 (local.get $addr) (local.get $value)))
(func (export "i32.atomic.rmw.add") (param $addr i32) (param $value i32) (result i32) (i32.atomic.rmw.add (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw.add") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw.add (local.get $addr) (local.get $value)))
(func (export "i32.atomic.rmw8.add_u") (param $addr i32) (param $value i32) (result i32) (i32.atomic.rmw8.add_u (local.get $addr) (local.get $value)))
(func (export "i32.atomic.rmw16.add_u") (param $addr i32) (param $value i32) (result i32) (i32.atomic.rmw16.add_u (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw8.add_u") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw8.add_u (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw16.add_u") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw16.add_u (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw32.add_u") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw32.add_u (local.get $addr) (local.get $value)))
(func (export "i32.atomic.rmw.sub") (param $addr i32) (param $value i32) (result i32) (i32.atomic.rmw.sub (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw.sub") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw.sub (local.get $addr) (local.get $value)))
(func (export "i32.atomic.rmw8.sub_u") (param $addr i32) (param $value i32) (result i32) (i32.atomic.rmw8.sub_u (local.get $addr) (local.get $value)))
(func (export "i32.atomic.rmw16.sub_u") (param $addr i32) (param $value i32) (result i32) (i32.atomic.rmw16.sub_u (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw8.sub_u") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw8.sub_u (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw16.sub_u") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw16.sub_u (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw32.sub_u") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw32.sub_u (local.get $addr) (local.get $value)))
(func (export "i32.atomic.rmw.and") (param $addr i32) (param $value i32) (result i32) (i32.atomic.rmw.and (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw.and") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw.and (local.get $addr) (local.get $value)))
(func (export "i32.atomic.rmw8.and_u") (param $addr i32) (param $value i32) (result i32) (i32.atomic.rmw8.and_u (local.get $addr) (local.get $value)))
(func (export "i32.atomic.rmw16.and_u") (param $addr i32) (param $value i32) (result i32) (i32.atomic.rmw16.and_u (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw8.and_u") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw8.and_u (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw16.and_u") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw16.and_u (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw32.and_u") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw32.and_u (local.get $addr) (local.get $value)))
(func (export "i32.atomic.rmw.or") (param $addr i32) (param $value i32) (result i32) (i32.atomic.rmw.or (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw.or") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw.or (local.get $addr) (local.get $value)))
(func (export "i32.atomic.rmw8.or_u") (param $addr i32) (param $value i32) (result i32) (i32.atomic.rmw8.or_u (local.get $addr) (local.get $value)))
(func (export "i32.atomic.rmw16.or_u") (param $addr i32) (param $value i32) (result i32) (i32.atomic.rmw16.or_u (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw8.or_u") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw8.or_u (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw16.or_u") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw16.or_u (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw32.or_u") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw32.or_u (local.get $addr) (local.get $value)))
(func (export "i32.atomic.rmw.xor") (param $addr i32) (param $value i32) (result i32) (i32.atomic.rmw.xor (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw.xor") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw.xor (local.get $addr) (local.get $value)))
(func (export "i32.atomic.rmw8.xor_u") (param $addr i32) (param $value i32) (result i32) (i32.atomic.rmw8.xor_u (local.get $addr) (local.get $value)))
(func (export "i32.atomic.rmw16.xor_u") (param $addr i32) (param $value i32) (result i32) (i32.atomic.rmw16.xor_u (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw8.xor_u") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw8.xor_u (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw16.xor_u") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw16.xor_u (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw32.xor_u") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw32.xor_u (local.get $addr) (local.get $value)))
(func (export "i32.atomic.rmw.xchg") (param $addr i32) (param $value i32) (result i32) (i32.atomic.rmw.xchg (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw.xchg") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw.xchg (local.get $addr) (local.get $value)))
(func (export "i32.atomic.rmw8.xchg_u") (param $addr i32) (param $value i32) (result i32) (i32.atomic.rmw8.xchg_u (local.get $addr) (local.get $value)))
(func (export "i32.atomic.rmw16.xchg_u") (param $addr i32) (param $value i32) (result i32) (i32.atomic.rmw16.xchg_u (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw8.xchg_u") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw8.xchg_u (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw16.xchg_u") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw16.xchg_u (local.get $addr) (local.get $value)))
(func (export "i64.atomic.rmw32.xchg_u") (param $addr i32) (param $value i64) (result i64) (i64.atomic.rmw32.xchg_u (local.get $addr) (local.get $value)))
(func (export "i32.atomic.rmw.cmpxchg") (param $addr i32) (param $expected i32) (param $value i32) (result i32) (i32.atomic.rmw.cmpxchg (local.get $addr) (local.get $expected) (local.get $value)))
(func (export "i64.atomic.rmw.cmpxchg") (param $addr i32) (param $expected i64) (param $value i64) (result i64) (i64.atomic.rmw.cmpxchg (local.get $addr) (local.get $expected) (local.get $value)))
(func (export "i32.atomic.rmw8.cmpxchg_u") (param $addr i32) (param $expected i32) (param $value i32) (result i32) (i32.atomic.rmw8.cmpxchg_u (local.get $addr) (local.get $expected) (local.get $value)))
(func (export "i32.atomic.rmw16.cmpxchg_u") (param $addr i32) (param $expected i32) (param $value i32) (result i32) (i32.atomic.rmw16.cmpxchg_u (local.get $addr) (local.get $expected) (local.get $value)))
(func (export "i64.atomic.rmw8.cmpxchg_u") (param $addr i32) (param $expected i64) (param $value i64) (result i64) (i64.atomic.rmw8.cmpxchg_u (local.get $addr) (local.get $expected) (local.get $value)))
(func (export "i64.atomic.rmw16.cmpxchg_u") (param $addr i32) (param $expected i64) (param $value i64) (result i64) (i64.atomic.rmw16.cmpxchg_u (local.get $addr) (local.get $expected) (local.get $value)))
(func (export "i64.atomic.rmw32.cmpxchg_u") (param $addr i32) (param $expected i64) (param $value i64) (result i64) (i64.atomic.rmw32.cmpxchg_u (local.get $addr) (local.get $expected) (local.get $value)))
)
;; *.atomic.load*
(invoke "init" (i64.const 0x0706050403020100))
(assert_return (invoke "i32.atomic.load" (i32.const 0)) (i32.const 0x03020100))
(assert_return (invoke "i32.atomic.load" (i32.const 4)) (i32.const 0x07060504))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x0706050403020100))
(assert_return (invoke "i32.atomic.load8_u" (i32.const 0)) (i32.const 0x00))
(assert_return (invoke "i32.atomic.load8_u" (i32.const 5)) (i32.const 0x05))
(assert_return (invoke "i32.atomic.load16_u" (i32.const 0)) (i32.const 0x0100))
(assert_return (invoke "i32.atomic.load16_u" (i32.const 6)) (i32.const 0x0706))
(assert_return (invoke "i64.atomic.load8_u" (i32.const 0)) (i64.const 0x00))
(assert_return (invoke "i64.atomic.load8_u" (i32.const 5)) (i64.const 0x05))
(assert_return (invoke "i64.atomic.load16_u" (i32.const 0)) (i64.const 0x0100))
(assert_return (invoke "i64.atomic.load16_u" (i32.const 6)) (i64.const 0x0706))
(assert_return (invoke "i64.atomic.load32_u" (i32.const 0)) (i64.const 0x03020100))
(assert_return (invoke "i64.atomic.load32_u" (i32.const 4)) (i64.const 0x07060504))
;; *.atomic.store*
(invoke "init" (i64.const 0x0000000000000000))
(assert_return (invoke "i32.atomic.store" (i32.const 0) (i32.const 0xffeeddcc)))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x00000000ffeeddcc))
(assert_return (invoke "i64.atomic.store" (i32.const 0) (i64.const 0x0123456789abcdef)))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x0123456789abcdef))
(assert_return (invoke "i32.atomic.store8" (i32.const 1) (i32.const 0x42)))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x0123456789ab42ef))
(assert_return (invoke "i32.atomic.store16" (i32.const 4) (i32.const 0x8844)))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x0123884489ab42ef))
(assert_return (invoke "i64.atomic.store8" (i32.const 1) (i64.const 0x99)))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x0123884489ab99ef))
(assert_return (invoke "i64.atomic.store16" (i32.const 4) (i64.const 0xcafe)))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x0123cafe89ab99ef))
(assert_return (invoke "i64.atomic.store32" (i32.const 4) (i64.const 0xdeadbeef)))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0xdeadbeef89ab99ef))
;; *.atomic.rmw*.add
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw.add" (i32.const 0) (i32.const 0x12345678)) (i32.const 0x11111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111123456789))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw.add" (i32.const 0) (i64.const 0x0101010102020202)) (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1212121213131313))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw8.add_u" (i32.const 0) (i32.const 0xcdcdcdcd)) (i32.const 0x11))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x11111111111111de))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw16.add_u" (i32.const 0) (i32.const 0xcafecafe)) (i32.const 0x1111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x111111111111dc0f))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw8.add_u" (i32.const 0) (i64.const 0x4242424242424242)) (i64.const 0x11))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111153))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw16.add_u" (i32.const 0) (i64.const 0xbeefbeefbeefbeef)) (i64.const 0x1111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x111111111111d000))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw32.add_u" (i32.const 0) (i64.const 0xcabba6e5cabba6e5)) (i64.const 0x11111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x11111111dbccb7f6))
;; *.atomic.rmw*.sub
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw.sub" (i32.const 0) (i32.const 0x12345678)) (i32.const 0x11111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x11111111fedcba99))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw.sub" (i32.const 0) (i64.const 0x0101010102020202)) (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x101010100f0f0f0f))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw8.sub_u" (i32.const 0) (i32.const 0xcdcdcdcd)) (i32.const 0x11))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111144))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw16.sub_u" (i32.const 0) (i32.const 0xcafecafe)) (i32.const 0x1111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111114613))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw8.sub_u" (i32.const 0) (i64.const 0x4242424242424242)) (i64.const 0x11))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x11111111111111cf))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw16.sub_u" (i32.const 0) (i64.const 0xbeefbeefbeefbeef)) (i64.const 0x1111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111115222))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw32.sub_u" (i32.const 0) (i64.const 0xcabba6e5cabba6e5)) (i64.const 0x11111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111146556a2c))
;; *.atomic.rmw*.and
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw.and" (i32.const 0) (i32.const 0x12345678)) (i32.const 0x11111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111110101010))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw.and" (i32.const 0) (i64.const 0x0101010102020202)) (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x0101010100000000))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw8.and_u" (i32.const 0) (i32.const 0xcdcdcdcd)) (i32.const 0x11))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111101))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw16.and_u" (i32.const 0) (i32.const 0xcafecafe)) (i32.const 0x1111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111110010))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw8.and_u" (i32.const 0) (i64.const 0x4242424242424242)) (i64.const 0x11))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111100))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw16.and_u" (i32.const 0) (i64.const 0xbeefbeefbeefbeef)) (i64.const 0x1111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111001))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw32.and_u" (i32.const 0) (i64.const 0xcabba6e5cabba6e5)) (i64.const 0x11111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111100110001))
;; *.atomic.rmw*.or
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw.or" (i32.const 0) (i32.const 0x12345678)) (i32.const 0x11111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111113355779))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw.or" (i32.const 0) (i64.const 0x0101010102020202)) (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111113131313))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw8.or_u" (i32.const 0) (i32.const 0xcdcdcdcd)) (i32.const 0x11))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x11111111111111dd))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw16.or_u" (i32.const 0) (i32.const 0xcafecafe)) (i32.const 0x1111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x111111111111dbff))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw8.or_u" (i32.const 0) (i64.const 0x4242424242424242)) (i64.const 0x11))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111153))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw16.or_u" (i32.const 0) (i64.const 0xbeefbeefbeefbeef)) (i64.const 0x1111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x111111111111bfff))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw32.or_u" (i32.const 0) (i64.const 0xcabba6e5cabba6e5)) (i64.const 0x11111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x11111111dbbbb7f5))
;; *.atomic.rmw*.xor
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw.xor" (i32.const 0) (i32.const 0x12345678)) (i32.const 0x11111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111103254769))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw.xor" (i32.const 0) (i64.const 0x0101010102020202)) (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1010101013131313))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw8.xor_u" (i32.const 0) (i32.const 0xcdcdcdcd)) (i32.const 0x11))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x11111111111111dc))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw16.xor_u" (i32.const 0) (i32.const 0xcafecafe)) (i32.const 0x1111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x111111111111dbef))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw8.xor_u" (i32.const 0) (i64.const 0x4242424242424242)) (i64.const 0x11))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111153))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw16.xor_u" (i32.const 0) (i64.const 0xbeefbeefbeefbeef)) (i64.const 0x1111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x111111111111affe))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw32.xor_u" (i32.const 0) (i64.const 0xcabba6e5cabba6e5)) (i64.const 0x11111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x11111111dbaab7f4))
;; *.atomic.rmw*.xchg
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw.xchg" (i32.const 0) (i32.const 0x12345678)) (i32.const 0x11111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111112345678))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw.xchg" (i32.const 0) (i64.const 0x0101010102020202)) (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x0101010102020202))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw8.xchg_u" (i32.const 0) (i32.const 0xcdcdcdcd)) (i32.const 0x11))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x11111111111111cd))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw16.xchg_u" (i32.const 0) (i32.const 0xcafecafe)) (i32.const 0x1111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x111111111111cafe))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw8.xchg_u" (i32.const 0) (i64.const 0x4242424242424242)) (i64.const 0x11))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111142))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw16.xchg_u" (i32.const 0) (i64.const 0xbeefbeefbeefbeef)) (i64.const 0x1111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x111111111111beef))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw32.xchg_u" (i32.const 0) (i64.const 0xcabba6e5cabba6e5)) (i64.const 0x11111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x11111111cabba6e5))
;; *.atomic.rmw*.cmpxchg (compare false)
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw.cmpxchg" (i32.const 0) (i32.const 0) (i32.const 0x12345678)) (i32.const 0x11111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111111))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw.cmpxchg" (i32.const 0) (i64.const 0) (i64.const 0x0101010102020202)) (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111111))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw8.cmpxchg_u" (i32.const 0) (i32.const 0) (i32.const 0xcdcdcdcd)) (i32.const 0x11))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111111))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw16.cmpxchg_u" (i32.const 0) (i32.const 0) (i32.const 0xcafecafe)) (i32.const 0x1111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111111))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw8.cmpxchg_u" (i32.const 0) (i64.const 0) (i64.const 0x4242424242424242)) (i64.const 0x11))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111111))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw16.cmpxchg_u" (i32.const 0) (i64.const 0) (i64.const 0xbeefbeefbeefbeef)) (i64.const 0x1111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111111))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw32.cmpxchg_u" (i32.const 0) (i64.const 0) (i64.const 0xcabba6e5cabba6e5)) (i64.const 0x11111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111111))
;; *.atomic.rmw*.cmpxchg (compare true)
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw.cmpxchg" (i32.const 0) (i32.const 0x11111111) (i32.const 0x12345678)) (i32.const 0x11111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111112345678))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw.cmpxchg" (i32.const 0) (i64.const 0x1111111111111111) (i64.const 0x0101010102020202)) (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x0101010102020202))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw8.cmpxchg_u" (i32.const 0) (i32.const 0x11) (i32.const 0xcdcdcdcd)) (i32.const 0x11))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x11111111111111cd))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i32.atomic.rmw16.cmpxchg_u" (i32.const 0) (i32.const 0x1111) (i32.const 0xcafecafe)) (i32.const 0x1111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x111111111111cafe))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw8.cmpxchg_u" (i32.const 0) (i64.const 0x11) (i64.const 0x4242424242424242)) (i64.const 0x11))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x1111111111111142))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw16.cmpxchg_u" (i32.const 0) (i64.const 0x1111) (i64.const 0xbeefbeefbeefbeef)) (i64.const 0x1111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x111111111111beef))
(invoke "init" (i64.const 0x1111111111111111))
(assert_return (invoke "i64.atomic.rmw32.cmpxchg_u" (i32.const 0) (i64.const 0x11111111) (i64.const 0xcabba6e5cabba6e5)) (i64.const 0x11111111))
(assert_return (invoke "i64.atomic.load" (i32.const 0)) (i64.const 0x11111111cabba6e5))
;; unaligned accesses
(assert_trap (invoke "i32.atomic.load" (i32.const 1)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.load" (i32.const 1)) "unaligned atomic")
(assert_trap (invoke "i32.atomic.load16_u" (i32.const 1)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.load16_u" (i32.const 1)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.load32_u" (i32.const 1)) "unaligned atomic")
(assert_trap (invoke "i32.atomic.store" (i32.const 1) (i32.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.store" (i32.const 1) (i64.const 0)) "unaligned atomic")
(assert_trap (invoke "i32.atomic.store16" (i32.const 1) (i32.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.store16" (i32.const 1) (i64.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.store32" (i32.const 1) (i64.const 0)) "unaligned atomic")
(assert_trap (invoke "i32.atomic.rmw.add" (i32.const 1) (i32.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.rmw.add" (i32.const 1) (i64.const 0)) "unaligned atomic")
(assert_trap (invoke "i32.atomic.rmw16.add_u" (i32.const 1) (i32.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.rmw16.add_u" (i32.const 1) (i64.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.rmw32.add_u" (i32.const 1) (i64.const 0)) "unaligned atomic")
(assert_trap (invoke "i32.atomic.rmw.sub" (i32.const 1) (i32.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.rmw.sub" (i32.const 1) (i64.const 0)) "unaligned atomic")
(assert_trap (invoke "i32.atomic.rmw16.sub_u" (i32.const 1) (i32.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.rmw16.sub_u" (i32.const 1) (i64.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.rmw32.sub_u" (i32.const 1) (i64.const 0)) "unaligned atomic")
(assert_trap (invoke "i32.atomic.rmw.and" (i32.const 1) (i32.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.rmw.and" (i32.const 1) (i64.const 0)) "unaligned atomic")
(assert_trap (invoke "i32.atomic.rmw16.and_u" (i32.const 1) (i32.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.rmw16.and_u" (i32.const 1) (i64.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.rmw32.and_u" (i32.const 1) (i64.const 0)) "unaligned atomic")
(assert_trap (invoke "i32.atomic.rmw.or" (i32.const 1) (i32.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.rmw.or" (i32.const 1) (i64.const 0)) "unaligned atomic")
(assert_trap (invoke "i32.atomic.rmw16.or_u" (i32.const 1) (i32.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.rmw16.or_u" (i32.const 1) (i64.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.rmw32.or_u" (i32.const 1) (i64.const 0)) "unaligned atomic")
(assert_trap (invoke "i32.atomic.rmw.xor" (i32.const 1) (i32.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.rmw.xor" (i32.const 1) (i64.const 0)) "unaligned atomic")
(assert_trap (invoke "i32.atomic.rmw16.xor_u" (i32.const 1) (i32.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.rmw16.xor_u" (i32.const 1) (i64.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.rmw32.xor_u" (i32.const 1) (i64.const 0)) "unaligned atomic")
(assert_trap (invoke "i32.atomic.rmw.xchg" (i32.const 1) (i32.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.rmw.xchg" (i32.const 1) (i64.const 0)) "unaligned atomic")
(assert_trap (invoke "i32.atomic.rmw16.xchg_u" (i32.const 1) (i32.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.rmw16.xchg_u" (i32.const 1) (i64.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.rmw32.xchg_u" (i32.const 1) (i64.const 0)) "unaligned atomic")
(assert_trap (invoke "i32.atomic.rmw.cmpxchg" (i32.const 1) (i32.const 0) (i32.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.rmw.cmpxchg" (i32.const 1) (i64.const 0) (i64.const 0)) "unaligned atomic")
(assert_trap (invoke "i32.atomic.rmw16.cmpxchg_u" (i32.const 1) (i32.const 0) (i32.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.rmw16.cmpxchg_u" (i32.const 1) (i64.const 0) (i64.const 0)) "unaligned atomic")
(assert_trap (invoke "i64.atomic.rmw32.cmpxchg_u" (i32.const 1) (i64.const 0) (i64.const 0)) "unaligned atomic")
;; unshared memory
(assert_invalid (module (memory 1 1) (func (drop (i32.atomic.load (i32.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.load (i32.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i32.atomic.load16_u (i32.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.load16_u (i32.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.load32_u (i32.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (i32.atomic.store (i32.const 0) (i32.const 0)))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (i64.atomic.store (i32.const 0) (i64.const 0)))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (i32.atomic.store16 (i32.const 0) (i32.const 0)))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (i64.atomic.store16 (i32.const 0) (i64.const 0)))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (i64.atomic.store32 (i32.const 0) (i64.const 0)))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i32.atomic.rmw.add (i32.const 0) (i32.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.rmw.add (i32.const 0) (i64.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i32.atomic.rmw16.add_u (i32.const 0) (i32.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.rmw16.add_u (i32.const 0) (i64.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.rmw32.add_u (i32.const 0) (i64.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i32.atomic.rmw.sub (i32.const 0) (i32.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.rmw.sub (i32.const 0) (i64.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i32.atomic.rmw16.sub_u (i32.const 0) (i32.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.rmw16.sub_u (i32.const 0) (i64.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.rmw32.sub_u (i32.const 0) (i64.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i32.atomic.rmw.and (i32.const 0) (i32.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.rmw.and (i32.const 0) (i64.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i32.atomic.rmw16.and_u (i32.const 0) (i32.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.rmw16.and_u (i32.const 0) (i64.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.rmw32.and_u (i32.const 0) (i64.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i32.atomic.rmw.or (i32.const 0) (i32.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.rmw.or (i32.const 0) (i64.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i32.atomic.rmw16.or_u (i32.const 0) (i32.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.rmw16.or_u (i32.const 0) (i64.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.rmw32.or_u (i32.const 0) (i64.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i32.atomic.rmw.xor (i32.const 0) (i32.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.rmw.xor (i32.const 0) (i64.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i32.atomic.rmw16.xor_u (i32.const 0) (i32.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.rmw16.xor_u (i32.const 0) (i64.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.rmw32.xor_u (i32.const 0) (i64.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i32.atomic.rmw.xchg (i32.const 0) (i32.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.rmw.xchg (i32.const 0) (i64.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i32.atomic.rmw16.xchg_u (i32.const 0) (i32.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.rmw16.xchg_u (i32.const 0) (i64.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.rmw32.xchg_u (i32.const 0) (i64.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i32.atomic.rmw.cmpxchg (i32.const 0) (i32.const 0) (i32.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.rmw.cmpxchg (i32.const 0) (i64.const 0) (i64.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i32.atomic.rmw16.cmpxchg_u (i32.const 0) (i32.const 0) (i32.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.rmw16.cmpxchg_u (i32.const 0) (i64.const 0) (i64.const 0))))) "atomic accesses require shared memory")
(assert_invalid (module (memory 1 1) (func (drop (i64.atomic.rmw32.cmpxchg_u (i32.const 0) (i64.const 0) (i64.const 0))))) "atomic accesses require shared memory")

View File

@ -589,9 +589,7 @@
;; Test that LLVM undef isn't introduced by SIMD shifts greater than the scalar width.
(module
;; wabt says "memories may not be shared"
;; (memory 1 1 shared)
(memory 1 1)
(memory 1 1 shared)
(func (export "test-simd-shift-mask") (param $v v128) (result i32)
(block $0
(block $1

View File

@ -17,6 +17,7 @@
#
# Cranelift
clif:skip:atomic.wast:* # Threads not implemented
clif:skip:simd.wast:* # SIMD not implemented
clif:skip:simd_binaryen.wast:* # SIMD not implemented
@ -860,6 +861,7 @@ llvm:skip:unreachable.wast:*:windows
llvm:skip:unwind.wast:*:windows
# Singlepass
singlepass:skip:atomic.wast:* # Threads not implemented
singlepass:skip:simd.wast:* # SIMD not implemented
singlepass:skip:simd_binaryen.wast:* # SIMD not implemented

View File

@ -167,6 +167,7 @@ mod tests {
let mut features = wabt::Features::new();
features.enable_simd();
features.enable_threads();
let mut parser: ScriptParser =
ScriptParser::from_source_and_name_with_features(&source, filename, features)
.expect(&format!("Failed to parse script {}", &filename));
@ -203,7 +204,10 @@ mod tests {
let spectest_import_object =
get_spectest_import_object(&registered_modules);
let config = CompilerConfig {
features: Features { simd: true },
features: Features {
simd: true,
threads: true,
},
..Default::default()
};
let module = wasmer_runtime_core::compile_with_config(
@ -630,7 +634,10 @@ mod tests {
// println!("AssertInvalid");
let result = panic::catch_unwind(|| {
let config = CompilerConfig {
features: Features { simd: true },
features: Features {
simd: true,
threads: true,
},
..Default::default()
};
wasmer_runtime_core::compile_with_config(
@ -681,7 +688,10 @@ mod tests {
let result = panic::catch_unwind(|| {
let config = CompilerConfig {
features: Features { simd: true },
features: Features {
simd: true,
threads: true,
},
..Default::default()
};
wasmer_runtime_core::compile_with_config(
@ -798,7 +808,10 @@ mod tests {
let spectest_import_object =
get_spectest_import_object(&registered_modules);
let config = CompilerConfig {
features: Features { simd: true },
features: Features {
simd: true,
threads: true,
},
..Default::default()
};
let module = wasmer_runtime_core::compile_with_config(

View File

@ -87,6 +87,10 @@ struct PrestandardFeatures {
#[structopt(long = "enable-simd")]
simd: bool,
/// Enable support for the threads proposal.
#[structopt(long = "enable-threads")]
threads: bool,
/// Enable support for all pre-standard proposals.
#[structopt(long = "enable-all")]
all: bool,
@ -386,6 +390,9 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
if options.features.simd || options.features.all {
features.enable_simd();
}
if options.features.threads || options.features.all {
features.enable_threads();
}
wasm_binary = wabt::wat2wasm_with_features(wasm_binary, features)
.map_err(|e| format!("Can't convert from wast to wasm: {:?}", e))?;
}
@ -417,6 +424,7 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
track_state,
features: Features {
simd: options.features.simd || options.features.all,
threads: options.features.threads || options.features.all,
},
},
&*compiler,
@ -430,6 +438,7 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
track_state,
features: Features {
simd: options.features.simd || options.features.all,
threads: options.features.threads || options.features.all,
},
..Default::default()
},
@ -477,6 +486,7 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
track_state,
features: Features {
simd: options.features.simd || options.features.all,
threads: options.features.threads || options.features.all,
},
..Default::default()
},
@ -766,6 +776,7 @@ fn validate_wasm(validate: Validate) -> Result<(), String> {
&wasm_binary,
Features {
simd: validate.features.simd || validate.features.all,
threads: validate.features.threads || validate.features.all,
},
)
.map_err(|err| format!("Validation failed: {}", err))?;