diff --git a/lib/runtime-core/src/backing.rs b/lib/runtime-core/src/backing.rs index 3c9a02075..4e0a07318 100644 --- a/lib/runtime-core/src/backing.rs +++ b/lib/runtime-core/src/backing.rs @@ -97,9 +97,7 @@ impl LocalBacking { assert!(memory_desc.minimum.bytes().0 >= data_top); let mem = &memories[local_memory_index]; - for (mem_byte, data_byte) in mem - .view(init_base..init_base + init.data.len()) - .unwrap() + for (mem_byte, data_byte) in mem.view()[init_base..init_base + init.data.len()] .iter() .zip(init.data.iter()) { diff --git a/lib/runtime-core/src/memory/atomic.rs b/lib/runtime-core/src/memory/atomic.rs index 7ecff48b8..f4ed3d3d0 100644 --- a/lib/runtime-core/src/memory/atomic.rs +++ b/lib/runtime-core/src/memory/atomic.rs @@ -29,22 +29,22 @@ pub trait IntCast: macro_rules! intcast { ($($type:ident)+) => { $( - impl IntCast for Wrapping<$type> { + impl IntCast for $type { type Public = $type; fn from(u: usize) -> Self { - Wrapping(u as $type) + u as $type } fn to(self) -> usize { - self.0 as usize + self as usize } fn new(p: $type) -> Self { - Wrapping(p) + p } fn unwrap(self) -> $type { - self.0 + self } } )+ diff --git a/lib/runtime-core/src/memory/mod.rs b/lib/runtime-core/src/memory/mod.rs index 9b25a6858..40c416e82 100644 --- a/lib/runtime-core/src/memory/mod.rs +++ b/lib/runtime-core/src/memory/mod.rs @@ -9,14 +9,9 @@ use crate::{ vm, }; use std::{ - cell::{Cell, Ref, RefCell, RefMut}, - fmt, - marker::PhantomData, - mem, - ops::{Bound, Deref, DerefMut, Index, RangeBounds}, - ptr, + cell::{Cell, RefCell}, + fmt, mem, ptr, rc::Rc, - slice, }; pub use self::atomic::Atomic; @@ -35,6 +30,9 @@ enum MemoryVariant { Shared(SharedMemory), } +/// A shared or unshared wasm linear memory. +/// +/// A `Memory` represents the memory used by a wasm instance. #[derive(Clone)] pub struct Memory { desc: MemoryDescriptor, @@ -98,36 +96,53 @@ impl Memory { } } - pub fn view>(&self, range: R) -> Option> { + /// Return a "view" of the currently accessible memory. By + /// default, the view is unsyncronized, using regular memory + /// accesses. You can force a memory view to use atomic accesses + /// by calling the [`atomically`] method. + /// + /// [`atomically`]: memory/struct.MemoryView.html#method.atomically + /// + /// # Notes: + /// + /// This method is safe (as in, it won't cause the host to crash or have UB), + /// but it doesn't obey rust's rules involving data races, especially concurrent ones. + /// Therefore, if this memory is shared between multiple threads, a single memory + /// location can be mutated concurrently without synchronization. + /// + /// # Usage: + /// + /// ``` + /// # use wasmer_runtime_core::memory::{Memory, MemoryView}; + /// # use std::sync::atomic::Ordering; + /// # fn view_memory(memory: Memory) { + /// // Without synchronization. + /// let view: MemoryView = memory.view(); + /// for byte in view[0x1000 .. 0x1010].iter().map(|cell| cell.get()) { + /// println!("byte: {}", byte); + /// } + /// + /// // With synchronization. + /// let atomic_view = view.atomically(); + /// for byte in atomic_view[0x1000 .. 0x1010].iter().map(|atom| atom.load(Ordering::SeqCst)) { + /// println!("byte: {}", byte); + /// } + /// # } + /// ``` + pub fn view(&self) -> MemoryView { let vm::LocalMemory { base, - bound, + bound: _, memory: _, } = unsafe { *self.vm_local_memory() }; - let range_start = match range.start_bound() { - Bound::Included(start) => *start, - Bound::Excluded(start) => *start + 1, - Bound::Unbounded => 0, - }; + let length = self.size().bytes().0 / mem::size_of::(); - let range_end = match range.end_bound() { - Bound::Included(end) => *end + 1, - Bound::Excluded(end) => *end, - Bound::Unbounded => bound as usize, - }; - - let length = range_end - range_start; - - let size_in_bytes = mem::size_of::() * length; - - if range_end < range_start || range_start + size_in_bytes >= bound { - return None; - } - - Some(unsafe { MemoryView::new(base as _, length as u32) }) + unsafe { MemoryView::new(base as _, length as u32) } } + /// Convert this memory to a shared memory if the shared flag + /// is present in the description used to create it. pub fn shared(self) -> Option { if self.desc.shared { Some(SharedMemory { desc: self.desc }) @@ -139,7 +154,7 @@ impl Memory { pub(crate) fn vm_local_memory(&self) -> *mut vm::LocalMemory { match &self.variant { MemoryVariant::Unshared(unshared_mem) => unshared_mem.vm_local_memory(), - MemoryVariant::Shared(shared_mem) => unimplemented!(), + MemoryVariant::Shared(_) => unimplemented!(), } } } diff --git a/lib/runtime-core/src/memory/view.rs b/lib/runtime-core/src/memory/view.rs index b3f1abd69..3376c6e17 100644 --- a/lib/runtime-core/src/memory/view.rs +++ b/lib/runtime-core/src/memory/view.rs @@ -28,8 +28,8 @@ where } } -impl<'a, T> MemoryView<'a, T, NonAtomically> { - pub fn atomically(self) -> MemoryView<'a, T, Atomically> { +impl<'a, T: IntCast> MemoryView<'a, T, NonAtomically> { + pub fn atomically(&self) -> MemoryView<'a, T, Atomically> { MemoryView { ptr: self.ptr, length: self.length, diff --git a/lib/runtime-core/src/vm.rs b/lib/runtime-core/src/vm.rs index 8afd86c6b..5d6a1925f 100644 --- a/lib/runtime-core/src/vm.rs +++ b/lib/runtime-core/src/vm.rs @@ -106,12 +106,11 @@ impl Ctx { /// ``` /// # use wasmer_runtime_core::{ /// # vm::Ctx, - /// # memory::Memory, /// # }; /// fn read_memory(ctx: &Ctx) -> u8 { - /// let first_memory: &Memory = ctx.memory(0); + /// let first_memory = ctx.memory(0); /// // Read the first byte of that linear memory. - /// first_memory.access()[0] + /// first_memory.view()[0].get() /// } /// ``` pub fn memory(&self, mem_index: u32) -> &Memory {