1263 lines
45 KiB
Rust
Raw Normal View History

//! The state module is used to track state of a running web assembly instances so that
//! state could read or updated at runtime. Use cases include generating stack traces, switching
//! generated code from one tier to another, or serializing state of a running instace.
2019-12-20 20:11:56 -08:00
use crate::backend::RunnableModule;
2019-06-12 13:38:58 +08:00
use std::collections::BTreeMap;
2019-06-26 12:56:37 +08:00
use std::ops::Bound::{Included, Unbounded};
2019-12-20 20:33:50 -08:00
use std::sync::Arc;
2019-06-12 13:38:58 +08:00
/// An index to a register
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)]
2019-06-09 21:21:18 +08:00
pub struct RegisterIndex(pub usize);
/// A kind of wasm or constant value
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)]
pub enum WasmAbstractValue {
/// A wasm runtime value
Runtime,
/// A wasm constant value
Const(u64),
}
/// A container for the state of a running wasm instance.
#[derive(Clone, Debug, Serialize, Deserialize)]
2019-06-11 20:49:30 +08:00
pub struct MachineState {
/// Stack values.
2019-06-11 20:49:30 +08:00
pub stack_values: Vec<MachineValue>,
/// Register values.
2019-06-11 20:49:30 +08:00
pub register_values: Vec<MachineValue>,
/// Previous frame.
pub prev_frame: BTreeMap<usize, MachineValue>,
/// Wasm stack.
pub wasm_stack: Vec<WasmAbstractValue>,
/// Private depth of the wasm stack.
pub wasm_stack_private_depth: usize,
/// Wasm instruction offset.
2019-06-25 20:01:56 +08:00
pub wasm_inst_offset: usize,
2019-06-09 21:21:18 +08:00
}
/// A diff of two `MachineState`s.
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
2019-06-11 20:49:30 +08:00
pub struct MachineStateDiff {
/// Last.
2019-06-09 21:21:18 +08:00
pub last: Option<usize>,
/// Stack push.
2019-06-11 20:49:30 +08:00
pub stack_push: Vec<MachineValue>,
/// Stack pop.
2019-06-11 20:49:30 +08:00
pub stack_pop: usize,
/// Register diff.
2019-06-11 20:49:30 +08:00
pub reg_diff: Vec<(RegisterIndex, MachineValue)>,
/// Previous frame diff.
pub prev_frame_diff: BTreeMap<usize, Option<MachineValue>>, // None for removal
/// Wasm stack push.
pub wasm_stack_push: Vec<WasmAbstractValue>,
/// Wasm stack pop.
pub wasm_stack_pop: usize,
/// Private depth of the wasm stack.
pub wasm_stack_private_depth: usize, // absolute value; not a diff.
/// Wasm instruction offset.
2019-06-25 20:01:56 +08:00
pub wasm_inst_offset: usize, // absolute value; not a diff.
2019-06-11 20:49:30 +08:00
}
/// A kind of machine value.
#[derive(Clone, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)]
2019-06-11 20:49:30 +08:00
pub enum MachineValue {
/// Undefined.
2019-06-11 20:49:30 +08:00
Undefined,
/// Vmctx.
2019-06-25 20:01:56 +08:00
Vmctx,
/// Vmctx Deref.
2019-08-01 23:28:39 +08:00
VmctxDeref(Vec<usize>),
/// Preserve Register.
2019-06-11 20:49:30 +08:00
PreserveRegister(RegisterIndex),
/// Copy Stack BP Relative.
CopyStackBPRelative(i32), // relative to Base Pointer, in byte offset
/// Explicit Shadow.
ExplicitShadow, // indicates that all values above this are above the shadow region
/// Wasm Stack.
2019-06-11 20:49:30 +08:00
WasmStack(usize),
/// Wasm Local.
2019-06-11 20:49:30 +08:00
WasmLocal(usize),
/// Two Halves.
TwoHalves(Box<(MachineValue, MachineValue)>), // 32-bit values. TODO: optimize: add another type for inner "half" value to avoid boxing?
2019-06-09 21:21:18 +08:00
}
/// A map of function states.
#[derive(Clone, Debug, Serialize, Deserialize)]
2019-06-11 20:49:30 +08:00
pub struct FunctionStateMap {
/// Initial.
2019-06-11 20:49:30 +08:00
pub initial: MachineState,
/// Local Function Id.
pub local_function_id: usize,
/// Locals.
pub locals: Vec<WasmAbstractValue>,
/// Shadow size.
pub shadow_size: usize, // for single-pass backend, 32 bytes on x86-64
/// Diffs.
2019-06-11 20:49:30 +08:00
pub diffs: Vec<MachineStateDiff>,
/// Wasm Function Header target offset.
2019-06-27 17:54:06 +08:00
pub wasm_function_header_target_offset: Option<SuspendOffset>,
/// Wasm offset to target offset
2019-06-27 17:54:06 +08:00
pub wasm_offset_to_target_offset: BTreeMap<usize, SuspendOffset>,
/// Loop offsets.
2019-06-27 15:49:43 +08:00
pub loop_offsets: BTreeMap<usize, OffsetInfo>, /* suspend_offset -> info */
/// Call offsets.
2019-06-27 15:49:43 +08:00
pub call_offsets: BTreeMap<usize, OffsetInfo>, /* suspend_offset -> info */
/// Trappable offsets.
2019-06-27 15:49:43 +08:00
pub trappable_offsets: BTreeMap<usize, OffsetInfo>, /* suspend_offset -> info */
}
/// A kind of suspend offset.
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
2019-06-27 17:54:06 +08:00
pub enum SuspendOffset {
/// A loop.
2019-06-27 17:54:06 +08:00
Loop(usize),
/// A call.
2019-06-27 17:54:06 +08:00
Call(usize),
/// A trappable.
2019-06-27 17:54:06 +08:00
Trappable(usize),
}
/// Info for an offset.
#[derive(Clone, Debug, Serialize, Deserialize)]
2019-06-27 15:49:43 +08:00
pub struct OffsetInfo {
/// End offset.
2019-07-30 22:25:58 +08:00
pub end_offset: usize, // excluded bound
/// Diff Id.
2019-06-27 15:49:43 +08:00
pub diff_id: usize,
/// Activate offset.
2019-06-27 15:49:43 +08:00
pub activate_offset: usize,
2019-06-12 13:38:58 +08:00
}
/// A map of module state.
#[derive(Clone, Debug, Serialize, Deserialize)]
2019-06-12 13:38:58 +08:00
pub struct ModuleStateMap {
/// Local functions.
2019-06-12 13:38:58 +08:00
pub local_functions: BTreeMap<usize, FunctionStateMap>,
/// Total size.
2019-06-12 13:38:58 +08:00
pub total_size: usize,
}
/// State dump of a wasm function.
2019-06-25 20:01:56 +08:00
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct WasmFunctionStateDump {
/// Local function id.
pub local_function_id: usize,
/// Wasm instruction offset.
2019-06-25 20:01:56 +08:00
pub wasm_inst_offset: usize,
/// Stack.
pub stack: Vec<Option<u64>>,
/// Locals.
pub locals: Vec<Option<u64>>,
2019-06-12 22:02:15 +08:00
}
/// An image of the execution state.
2019-06-25 20:01:56 +08:00
#[derive(Clone, Debug, Serialize, Deserialize)]
2019-06-26 01:38:39 +08:00
pub struct ExecutionStateImage {
/// Frames.
2019-06-25 20:01:56 +08:00
pub frames: Vec<WasmFunctionStateDump>,
}
/// Represents an image of an `Instance` including its memory, globals, and execution state.
2019-06-26 01:38:39 +08:00
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct InstanceImage {
/// Memory for this `InstanceImage`
2019-06-26 01:38:39 +08:00
pub memory: Option<Vec<u8>>,
/// Stored globals for this `InstanceImage`
pub globals: Vec<u128>,
/// `ExecutionStateImage` for this `InstanceImage`
2019-06-26 01:38:39 +08:00
pub execution_state: ExecutionStateImage,
}
/// A `CodeVersion` is a container for a unit of generated code for a module.
2019-12-20 19:03:49 -08:00
#[derive(Clone)]
2019-08-10 02:32:14 +08:00
pub struct CodeVersion {
/// Indicates if this code version is the baseline version.
pub baseline: bool,
/// `ModuleStateMap` for this code version.
2019-08-10 02:32:14 +08:00
pub msm: ModuleStateMap,
2019-11-13 14:52:22 +08:00
/// A pointer to the machine code for this module.
2019-08-10 02:32:14 +08:00
pub base: usize,
2019-11-13 14:52:22 +08:00
/// The backend used to compile this module.
2019-12-20 20:11:56 -08:00
pub backend: String,
/// `RunnableModule` for this code version.
2019-12-20 20:33:50 -08:00
pub runnable_module: Arc<Box<dyn RunnableModule>>,
2019-08-10 02:32:14 +08:00
}
impl ModuleStateMap {
/// Looks up an ip from self using the given ip, base, and offset table provider.
2019-08-14 17:14:01 -07:00
pub fn lookup_ip<F: FnOnce(&FunctionStateMap) -> &BTreeMap<usize, OffsetInfo>>(
2019-07-30 22:25:58 +08:00
&self,
ip: usize,
base: usize,
offset_table_provider: F,
) -> Option<(&FunctionStateMap, MachineState)> {
if ip < base || ip - base >= self.total_size {
None
} else {
let (_, fsm) = self
.local_functions
.range((Unbounded, Included(&(ip - base))))
.last()
.unwrap();
2019-06-12 22:02:15 +08:00
2019-07-30 22:25:58 +08:00
match offset_table_provider(fsm)
.range((Unbounded, Included(&(ip - base))))
.last()
{
Some((_, x)) => {
if ip - base >= x.end_offset {
None
} else if x.diff_id < fsm.diffs.len() {
2019-07-13 00:17:33 +08:00
Some((fsm, fsm.diffs[x.diff_id].build_state(fsm)))
} else {
None
}
}
None => None,
2019-06-12 22:02:15 +08:00
}
}
}
/// Looks up a call ip from self using the given ip and base values.
2019-08-14 17:16:30 -07:00
pub fn lookup_call_ip(
&self,
ip: usize,
base: usize,
) -> Option<(&FunctionStateMap, MachineState)> {
2019-07-30 22:25:58 +08:00
self.lookup_ip(ip, base, |fsm| &fsm.call_offsets)
}
2019-06-12 22:02:15 +08:00
/// Looks up a trappable ip from self using the given ip and base values.
2019-08-14 17:14:01 -07:00
pub fn lookup_trappable_ip(
2019-06-12 22:02:15 +08:00
&self,
ip: usize,
base: usize,
) -> Option<(&FunctionStateMap, MachineState)> {
2019-07-30 22:25:58 +08:00
self.lookup_ip(ip, base, |fsm| &fsm.trappable_offsets)
2019-06-27 15:49:43 +08:00
}
/// Looks up a loop ip from self using the given ip and base values.
2019-08-14 17:16:30 -07:00
pub fn lookup_loop_ip(
&self,
ip: usize,
base: usize,
) -> Option<(&FunctionStateMap, MachineState)> {
2019-07-30 22:25:58 +08:00
self.lookup_ip(ip, base, |fsm| &fsm.loop_offsets)
2019-06-12 13:38:58 +08:00
}
2019-06-11 20:49:30 +08:00
}
impl FunctionStateMap {
/// Creates a new `FunctionStateMap` with the given parameters.
2019-06-25 03:56:20 +08:00
pub fn new(
initial: MachineState,
local_function_id: usize,
shadow_size: usize,
locals: Vec<WasmAbstractValue>,
) -> FunctionStateMap {
2019-06-11 20:49:30 +08:00
FunctionStateMap {
initial,
local_function_id,
shadow_size,
locals,
2019-06-11 20:49:30 +08:00
diffs: vec![],
2019-06-27 15:49:43 +08:00
wasm_function_header_target_offset: None,
2019-06-27 17:54:06 +08:00
wasm_offset_to_target_offset: BTreeMap::new(),
2019-06-12 13:38:58 +08:00
loop_offsets: BTreeMap::new(),
call_offsets: BTreeMap::new(),
trappable_offsets: BTreeMap::new(),
2019-06-11 20:49:30 +08:00
}
}
}
impl MachineState {
/// Creates a `MachineStateDiff` from self and the given `&MachineState`.
2019-06-11 20:49:30 +08:00
pub fn diff(&self, old: &MachineState) -> MachineStateDiff {
2019-06-12 22:02:15 +08:00
let first_diff_stack_depth: usize = self
.stack_values
.iter()
.zip(old.stack_values.iter())
.enumerate()
.find(|&(_, (a, b))| a != b)
2019-06-12 22:02:15 +08:00
.map(|x| x.0)
2019-06-11 20:49:30 +08:00
.unwrap_or(old.stack_values.len().min(self.stack_values.len()));
assert_eq!(self.register_values.len(), old.register_values.len());
2019-06-12 22:02:15 +08:00
let reg_diff: Vec<_> = self
.register_values
.iter()
.zip(old.register_values.iter())
.enumerate()
.filter(|&(_, (a, b))| a != b)
.map(|(i, (a, _))| (RegisterIndex(i), a.clone()))
2019-06-11 20:49:30 +08:00
.collect();
let prev_frame_diff: BTreeMap<usize, Option<MachineValue>> = self
.prev_frame
.iter()
2019-08-19 19:17:50 -07:00
.filter(|(k, v)| {
if let Some(ref old_v) = old.prev_frame.get(k) {
v != old_v
} else {
true
}
})
.map(|(&k, v)| (k, Some(v.clone())))
.chain(
old.prev_frame
.iter()
.filter(|(k, _)| self.prev_frame.get(k).is_none())
2019-08-19 19:17:50 -07:00
.map(|(&k, _)| (k, None)),
)
.collect();
let first_diff_wasm_stack_depth: usize = self
.wasm_stack
.iter()
.zip(old.wasm_stack.iter())
.enumerate()
.find(|&(_, (a, b))| a != b)
.map(|x| x.0)
.unwrap_or(old.wasm_stack.len().min(self.wasm_stack.len()));
2019-06-11 20:49:30 +08:00
MachineStateDiff {
last: None,
stack_push: self.stack_values[first_diff_stack_depth..].to_vec(),
stack_pop: old.stack_values.len() - first_diff_stack_depth,
reg_diff,
prev_frame_diff,
wasm_stack_push: self.wasm_stack[first_diff_wasm_stack_depth..].to_vec(),
wasm_stack_pop: old.wasm_stack.len() - first_diff_wasm_stack_depth,
wasm_stack_private_depth: self.wasm_stack_private_depth,
2019-06-25 20:01:56 +08:00
wasm_inst_offset: self.wasm_inst_offset,
2019-06-11 20:49:30 +08:00
}
}
}
impl MachineStateDiff {
/// Creates a `MachineState` from the given `&FunctionStateMap`.
2019-06-11 20:49:30 +08:00
pub fn build_state(&self, m: &FunctionStateMap) -> MachineState {
let mut chain: Vec<&MachineStateDiff> = vec![];
chain.push(self);
let mut current = self.last;
while let Some(x) = current {
let that = &m.diffs[x];
current = that.last;
chain.push(that);
}
chain.reverse();
let mut state = m.initial.clone();
for x in chain {
for _ in 0..x.stack_pop {
state.stack_values.pop().unwrap();
}
for v in &x.stack_push {
state.stack_values.push(v.clone());
2019-06-11 20:49:30 +08:00
}
for &(index, ref v) in &x.reg_diff {
state.register_values[index.0] = v.clone();
2019-06-11 20:49:30 +08:00
}
for (index, ref v) in &x.prev_frame_diff {
if let Some(ref x) = v {
state.prev_frame.insert(*index, x.clone());
} else {
state.prev_frame.remove(index).unwrap();
}
}
for _ in 0..x.wasm_stack_pop {
state.wasm_stack.pop().unwrap();
}
for v in &x.wasm_stack_push {
state.wasm_stack.push(*v);
}
2019-06-11 20:49:30 +08:00
}
state.wasm_stack_private_depth = self.wasm_stack_private_depth;
2019-06-25 20:01:56 +08:00
state.wasm_inst_offset = self.wasm_inst_offset;
2019-06-11 20:49:30 +08:00
state
}
2019-06-09 21:21:18 +08:00
}
2019-06-26 01:38:39 +08:00
impl ExecutionStateImage {
/// Prints a backtrace if the `WASMER_BACKTRACE` environment variable is 1.
2019-06-26 11:28:46 +08:00
pub fn print_backtrace_if_needed(&self) {
use std::env;
if let Ok(x) = env::var("WASMER_BACKTRACE") {
if x == "1" {
eprintln!("{}", self.output());
2019-06-26 11:28:46 +08:00
return;
}
}
eprintln!("Run with `WASMER_BACKTRACE=1` environment variable to display a backtrace.");
}
/// Converts self into a `String`, used for display purposes.
pub fn output(&self) -> String {
2019-06-26 11:28:46 +08:00
fn join_strings(x: impl Iterator<Item = String>, sep: &str) -> String {
let mut ret = String::new();
let mut first = true;
for s in x {
if first {
first = false;
} else {
ret += sep;
}
ret += &s;
}
ret
}
fn format_optional_u64_sequence(x: &[Option<u64>]) -> String {
if x.len() == 0 {
"(empty)".into()
} else {
join_strings(
x.iter().enumerate().map(|(i, x)| {
format!(
"[{}] = {}",
i,
x.map(|x| format!("{}", x))
.unwrap_or_else(|| "?".to_string())
)
}),
", ",
)
}
}
let mut ret = String::new();
if self.frames.len() == 0 {
ret += &"Unknown fault address, cannot read stack.";
2019-06-26 11:28:46 +08:00
ret += "\n";
} else {
ret += &"Backtrace:";
2019-06-26 11:28:46 +08:00
ret += "\n";
for (i, f) in self.frames.iter().enumerate() {
ret += &format!("* Frame {} @ Local function {}", i, f.local_function_id);
2019-06-26 11:28:46 +08:00
ret += "\n";
2019-09-15 03:31:05 +02:00
ret += &format!(" {} {}\n", "Offset:", format!("{}", f.wasm_inst_offset),);
2019-06-26 11:28:46 +08:00
ret += &format!(
" {} {}\n",
"Locals:",
2019-06-26 11:28:46 +08:00
format_optional_u64_sequence(&f.locals)
);
ret += &format!(
" {} {}\n\n",
"Stack:",
2019-06-26 11:28:46 +08:00
format_optional_u64_sequence(&f.stack)
);
}
}
ret
}
2019-06-25 20:01:56 +08:00
}
impl InstanceImage {
/// Converts a slice of bytes into an `Option<InstanceImage>`
pub fn from_bytes(input: &[u8]) -> Option<InstanceImage> {
use bincode::deserialize;
match deserialize(input) {
Ok(x) => Some(x),
Err(_) => None,
}
}
/// Converts self into a vector of bytes.
pub fn to_bytes(&self) -> Vec<u8> {
use bincode::serialize;
serialize(self).unwrap()
}
}
2019-11-13 14:52:22 +08:00
/// Declarations for x86-64 registers.
2019-11-22 00:36:34 +08:00
#[cfg(unix)]
pub mod x64_decl {
use super::*;
2019-11-13 14:52:22 +08:00
/// General-purpose registers.
#[repr(u8)]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub enum GPR {
2019-11-13 14:52:22 +08:00
/// RAX register
RAX,
2019-11-13 14:52:22 +08:00
/// RCX register
RCX,
2019-11-13 14:52:22 +08:00
/// RDX register
RDX,
2019-11-13 14:52:22 +08:00
/// RBX register
RBX,
2019-11-13 14:52:22 +08:00
/// RSP register
RSP,
2019-11-13 14:52:22 +08:00
/// RBP register
RBP,
2019-11-13 14:52:22 +08:00
/// RSI register
RSI,
2019-11-13 14:52:22 +08:00
/// RDI register
RDI,
2019-11-13 14:52:22 +08:00
/// R8 register
R8,
2019-11-13 14:52:22 +08:00
/// R9 register
R9,
2019-11-13 14:52:22 +08:00
/// R10 register
R10,
2019-11-13 14:52:22 +08:00
/// R11 register
R11,
2019-11-13 14:52:22 +08:00
/// R12 register
R12,
2019-11-13 14:52:22 +08:00
/// R13 register
R13,
2019-11-13 14:52:22 +08:00
/// R14 register
R14,
2019-11-13 14:52:22 +08:00
/// R15 register
R15,
}
2019-11-13 14:52:22 +08:00
/// XMM registers.
#[repr(u8)]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub enum XMM {
2019-11-13 14:52:22 +08:00
/// XMM register 0
XMM0,
2019-11-13 14:52:22 +08:00
/// XMM register 1
XMM1,
2019-11-13 14:52:22 +08:00
/// XMM register 2
XMM2,
2019-11-13 14:52:22 +08:00
/// XMM register 3
XMM3,
2019-11-13 14:52:22 +08:00
/// XMM register 4
XMM4,
2019-11-13 14:52:22 +08:00
/// XMM register 5
XMM5,
2019-11-13 14:52:22 +08:00
/// XMM register 6
XMM6,
2019-11-13 14:52:22 +08:00
/// XMM register 7
XMM7,
2019-11-13 14:52:22 +08:00
/// XMM register 8
2019-11-11 01:42:43 +08:00
XMM8,
2019-11-13 14:52:22 +08:00
/// XMM register 9
2019-11-11 01:42:43 +08:00
XMM9,
2019-11-13 14:52:22 +08:00
/// XMM register 10
2019-11-11 01:42:43 +08:00
XMM10,
2019-11-13 14:52:22 +08:00
/// XMM register 11
2019-11-11 01:42:43 +08:00
XMM11,
2019-11-13 14:52:22 +08:00
/// XMM register 12
2019-11-11 01:42:43 +08:00
XMM12,
2019-11-13 14:52:22 +08:00
/// XMM register 13
2019-11-11 01:42:43 +08:00
XMM13,
2019-11-13 14:52:22 +08:00
/// XMM register 14
2019-11-11 01:42:43 +08:00
XMM14,
2019-11-13 14:52:22 +08:00
/// XMM register 15
2019-11-11 01:42:43 +08:00
XMM15,
}
2019-11-13 14:52:22 +08:00
/// A machine register under the x86-64 architecture.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum X64Register {
2019-11-13 14:52:22 +08:00
/// General-purpose registers.
GPR(GPR),
2019-11-13 14:52:22 +08:00
/// XMM (floating point/SIMD) registers.
XMM(XMM),
}
impl X64Register {
2019-11-13 14:52:22 +08:00
/// Returns the index of the register.
pub fn to_index(&self) -> RegisterIndex {
match *self {
X64Register::GPR(x) => RegisterIndex(x as usize),
X64Register::XMM(x) => RegisterIndex(x as usize + 16),
}
}
2019-11-13 14:52:22 +08:00
/// Converts a DWARD regnum to X64Register.
pub fn from_dwarf_regnum(x: u16) -> Option<X64Register> {
Some(match x {
0 => X64Register::GPR(GPR::RAX),
1 => X64Register::GPR(GPR::RDX),
2 => X64Register::GPR(GPR::RCX),
3 => X64Register::GPR(GPR::RBX),
4 => X64Register::GPR(GPR::RSI),
5 => X64Register::GPR(GPR::RDI),
6 => X64Register::GPR(GPR::RBP),
7 => X64Register::GPR(GPR::RSP),
8 => X64Register::GPR(GPR::R8),
9 => X64Register::GPR(GPR::R9),
10 => X64Register::GPR(GPR::R10),
11 => X64Register::GPR(GPR::R11),
12 => X64Register::GPR(GPR::R12),
13 => X64Register::GPR(GPR::R13),
14 => X64Register::GPR(GPR::R14),
15 => X64Register::GPR(GPR::R15),
17 => X64Register::XMM(XMM::XMM0),
18 => X64Register::XMM(XMM::XMM1),
19 => X64Register::XMM(XMM::XMM2),
20 => X64Register::XMM(XMM::XMM3),
21 => X64Register::XMM(XMM::XMM4),
22 => X64Register::XMM(XMM::XMM5),
23 => X64Register::XMM(XMM::XMM6),
24 => X64Register::XMM(XMM::XMM7),
_ => return None,
})
}
}
}
2019-11-22 00:36:34 +08:00
#[cfg(unix)]
2019-06-09 21:21:18 +08:00
pub mod x64 {
//! The x64 state module contains functions to generate state and code for x64 targets.
pub use super::x64_decl::*;
2019-06-09 21:21:18 +08:00
use super::*;
2019-07-04 01:45:06 +08:00
use crate::codegen::BreakpointMap;
2019-08-15 19:13:00 -07:00
use crate::fault::{
catch_unsafe_unwind, get_boundary_register_preservation, run_on_alternative_stack,
};
2019-06-26 01:38:39 +08:00
use crate::structures::TypedIndex;
2019-06-26 01:39:30 +08:00
use crate::types::LocalGlobalIndex;
use crate::vm::Ctx;
2019-06-27 01:04:59 +08:00
use std::any::Any;
2019-06-09 21:21:18 +08:00
2019-08-01 23:28:39 +08:00
unsafe fn compute_vmctx_deref(vmctx: *const Ctx, seq: &[usize]) -> u64 {
let mut ptr = &vmctx as *const *const Ctx as *const u8;
for x in seq {
ptr = (*(ptr as *const *const u8)).offset(*x as isize);
}
ptr as usize as u64
}
/// Create a new `MachineState` with default values.
2019-06-11 20:49:30 +08:00
pub fn new_machine_state() -> MachineState {
MachineState {
stack_values: vec![],
register_values: vec![MachineValue::Undefined; 16 + 8],
prev_frame: BTreeMap::new(),
wasm_stack: vec![],
wasm_stack_private_depth: 0,
2019-06-25 20:01:56 +08:00
wasm_inst_offset: ::std::usize::MAX,
2019-06-11 20:49:30 +08:00
}
}
/// Invokes a call return on the stack for the given module state map, code base, instance
/// image and context.
2019-06-25 20:01:56 +08:00
#[warn(unused_variables)]
pub unsafe fn invoke_call_return_on_stack(
msm: &ModuleStateMap,
code_base: usize,
image: InstanceImage,
2019-06-25 20:01:56 +08:00
vmctx: &mut Ctx,
2019-07-04 01:45:06 +08:00
breakpoints: Option<BreakpointMap>,
) -> Result<u64, Box<dyn Any + Send>> {
2019-06-25 20:01:56 +08:00
let mut stack: Vec<u64> = vec![0; 1048576 * 8 / 8]; // 8MB stack
let mut stack_offset: usize = stack.len();
stack_offset -= 3; // placeholder for call return
let mut last_stack_offset: u64 = 0; // rbp
2019-11-13 01:00:27 +08:00
let mut known_registers: [Option<u64>; 32] = [None; 32];
2019-06-25 20:01:56 +08:00
2019-06-26 01:39:30 +08:00
let local_functions_vec: Vec<&FunctionStateMap> =
msm.local_functions.iter().map(|(_, v)| v).collect();
2019-06-25 20:01:56 +08:00
// Bottom to top
2019-06-26 01:38:39 +08:00
for f in image.execution_state.frames.iter().rev() {
2019-06-25 20:01:56 +08:00
let fsm = local_functions_vec[f.local_function_id];
2019-06-27 17:54:06 +08:00
let suspend_offset = if f.wasm_inst_offset == ::std::usize::MAX {
fsm.wasm_function_header_target_offset
2019-06-27 15:49:43 +08:00
} else {
2019-06-27 17:54:06 +08:00
fsm.wasm_offset_to_target_offset
.get(&f.wasm_inst_offset)
.map(|x| *x)
}
.expect("instruction is not a critical point");
let (activate_offset, diff_id) = match suspend_offset {
SuspendOffset::Loop(x) => fsm.loop_offsets.get(&x),
SuspendOffset::Call(x) => fsm.call_offsets.get(&x),
SuspendOffset::Trappable(x) => fsm.trappable_offsets.get(&x),
}
.map(|x| (x.activate_offset, x.diff_id))
.expect("offset cannot be found in table");
2019-06-26 11:28:46 +08:00
2019-06-25 20:01:56 +08:00
let diff = &fsm.diffs[diff_id];
let state = diff.build_state(fsm);
stack_offset -= 1;
stack[stack_offset] = stack.as_ptr().offset(last_stack_offset as isize) as usize as u64; // push rbp
last_stack_offset = stack_offset as _;
let mut got_explicit_shadow = false;
for v in state.stack_values.iter() {
match *v {
2019-06-26 01:39:30 +08:00
MachineValue::Undefined => stack_offset -= 1,
2019-06-25 20:01:56 +08:00
MachineValue::Vmctx => {
stack_offset -= 1;
stack[stack_offset] = vmctx as *mut Ctx as usize as u64;
}
2019-08-01 23:28:39 +08:00
MachineValue::VmctxDeref(ref seq) => {
stack_offset -= 1;
stack[stack_offset] = compute_vmctx_deref(vmctx as *const Ctx, seq);
}
2019-06-25 20:01:56 +08:00
MachineValue::PreserveRegister(index) => {
stack_offset -= 1;
stack[stack_offset] = known_registers[index.0].unwrap_or(0);
}
MachineValue::CopyStackBPRelative(byte_offset) => {
assert!(byte_offset % 8 == 0);
let target_offset = (byte_offset / 8) as isize;
let v = stack[(last_stack_offset as isize + target_offset) as usize];
stack_offset -= 1;
stack[stack_offset] = v;
}
MachineValue::ExplicitShadow => {
assert!(fsm.shadow_size % 8 == 0);
stack_offset -= fsm.shadow_size / 8;
got_explicit_shadow = true;
}
MachineValue::WasmStack(x) => {
stack_offset -= 1;
match state.wasm_stack[x] {
WasmAbstractValue::Const(x) => {
stack[stack_offset] = x;
}
WasmAbstractValue::Runtime => {
stack[stack_offset] = f.stack[x].unwrap();
}
}
}
MachineValue::WasmLocal(x) => {
stack_offset -= 1;
match fsm.locals[x] {
WasmAbstractValue::Const(x) => {
stack[stack_offset] = x;
}
WasmAbstractValue::Runtime => {
stack[stack_offset] = f.locals[x].unwrap();
}
}
}
MachineValue::TwoHalves(ref inner) => {
stack_offset -= 1;
// TODO: Cleanup
match inner.0 {
2019-07-30 22:25:58 +08:00
MachineValue::WasmStack(x) => match state.wasm_stack[x] {
WasmAbstractValue::Const(x) => {
2019-08-21 14:53:33 -07:00
assert!(x <= std::u32::MAX as u64);
2019-07-30 22:25:58 +08:00
stack[stack_offset] |= x;
}
2019-07-30 22:25:58 +08:00
WasmAbstractValue::Runtime => {
let v = f.stack[x].unwrap();
2019-08-21 14:53:33 -07:00
assert!(v <= std::u32::MAX as u64);
2019-07-30 22:25:58 +08:00
stack[stack_offset] |= v;
}
},
MachineValue::WasmLocal(x) => match fsm.locals[x] {
WasmAbstractValue::Const(x) => {
2019-08-21 14:53:33 -07:00
assert!(x <= std::u32::MAX as u64);
stack[stack_offset] |= x;
}
WasmAbstractValue::Runtime => {
let v = f.locals[x].unwrap();
2019-08-21 14:53:33 -07:00
assert!(v <= std::u32::MAX as u64);
stack[stack_offset] |= v;
}
},
2019-08-01 23:28:39 +08:00
MachineValue::VmctxDeref(ref seq) => {
stack[stack_offset] |=
compute_vmctx_deref(vmctx as *const Ctx, seq)
2019-08-21 14:53:33 -07:00
& (std::u32::MAX as u64);
2019-08-01 23:28:39 +08:00
}
MachineValue::Undefined => {}
_ => unimplemented!("TwoHalves.0"),
}
match inner.1 {
2019-07-30 22:25:58 +08:00
MachineValue::WasmStack(x) => match state.wasm_stack[x] {
WasmAbstractValue::Const(x) => {
2019-08-21 14:53:33 -07:00
assert!(x <= std::u32::MAX as u64);
2019-07-30 22:25:58 +08:00
stack[stack_offset] |= x << 32;
}
2019-07-30 22:25:58 +08:00
WasmAbstractValue::Runtime => {
let v = f.stack[x].unwrap();
2019-08-21 14:53:33 -07:00
assert!(v <= std::u32::MAX as u64);
2019-07-30 22:25:58 +08:00
stack[stack_offset] |= v << 32;
}
},
MachineValue::WasmLocal(x) => match fsm.locals[x] {
WasmAbstractValue::Const(x) => {
2019-08-21 14:53:33 -07:00
assert!(x <= std::u32::MAX as u64);
stack[stack_offset] |= x << 32;
}
WasmAbstractValue::Runtime => {
let v = f.locals[x].unwrap();
2019-08-21 14:53:33 -07:00
assert!(v <= std::u32::MAX as u64);
stack[stack_offset] |= v << 32;
}
},
2019-08-01 23:28:39 +08:00
MachineValue::VmctxDeref(ref seq) => {
stack[stack_offset] |=
(compute_vmctx_deref(vmctx as *const Ctx, seq)
2019-08-21 14:53:33 -07:00
& (std::u32::MAX as u64))
<< 32;
2019-08-01 23:28:39 +08:00
}
MachineValue::Undefined => {}
_ => unimplemented!("TwoHalves.1"),
}
}
2019-06-25 20:01:56 +08:00
}
}
2019-06-27 15:49:43 +08:00
if !got_explicit_shadow {
assert!(fsm.shadow_size % 8 == 0);
stack_offset -= fsm.shadow_size / 8;
}
2019-06-25 20:01:56 +08:00
for (i, v) in state.register_values.iter().enumerate() {
match *v {
2019-06-26 01:39:30 +08:00
MachineValue::Undefined => {}
2019-06-25 20:01:56 +08:00
MachineValue::Vmctx => {
known_registers[i] = Some(vmctx as *mut Ctx as usize as u64);
}
2019-08-01 23:28:39 +08:00
MachineValue::VmctxDeref(ref seq) => {
known_registers[i] = Some(compute_vmctx_deref(vmctx as *const Ctx, seq));
}
2019-06-26 01:39:30 +08:00
MachineValue::WasmStack(x) => match state.wasm_stack[x] {
WasmAbstractValue::Const(x) => {
known_registers[i] = Some(x);
2019-06-25 20:01:56 +08:00
}
2019-06-26 01:39:30 +08:00
WasmAbstractValue::Runtime => {
known_registers[i] = Some(f.stack[x].unwrap());
2019-06-25 20:01:56 +08:00
}
2019-06-26 01:39:30 +08:00
},
MachineValue::WasmLocal(x) => match fsm.locals[x] {
WasmAbstractValue::Const(x) => {
known_registers[i] = Some(x);
}
WasmAbstractValue::Runtime => {
known_registers[i] = Some(f.locals[x].unwrap());
}
},
_ => unreachable!(),
2019-06-25 20:01:56 +08:00
}
}
2019-06-27 15:49:43 +08:00
// no need to check 16-byte alignment here because it's possible that we're not at a call entry.
2019-06-25 20:01:56 +08:00
stack_offset -= 1;
2019-06-27 17:54:06 +08:00
stack[stack_offset] = (code_base + activate_offset) as u64; // return address
2019-06-25 20:01:56 +08:00
}
stack_offset -= 1;
stack[stack_offset] = known_registers[X64Register::GPR(GPR::R15).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] = known_registers[X64Register::GPR(GPR::R14).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] = known_registers[X64Register::GPR(GPR::R13).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] = known_registers[X64Register::GPR(GPR::R12).to_index().0].unwrap_or(0);
2019-06-27 15:49:43 +08:00
stack_offset -= 1;
stack[stack_offset] = known_registers[X64Register::GPR(GPR::R11).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] = known_registers[X64Register::GPR(GPR::R10).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] = known_registers[X64Register::GPR(GPR::R9).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] = known_registers[X64Register::GPR(GPR::R8).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] = known_registers[X64Register::GPR(GPR::RSI).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] = known_registers[X64Register::GPR(GPR::RDI).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] = known_registers[X64Register::GPR(GPR::RDX).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] = known_registers[X64Register::GPR(GPR::RCX).to_index().0].unwrap_or(0);
2019-06-25 20:01:56 +08:00
stack_offset -= 1;
stack[stack_offset] = known_registers[X64Register::GPR(GPR::RBX).to_index().0].unwrap_or(0);
2019-06-27 15:49:43 +08:00
stack_offset -= 1;
stack[stack_offset] = known_registers[X64Register::GPR(GPR::RAX).to_index().0].unwrap_or(0);
2019-06-25 20:01:56 +08:00
stack_offset -= 1;
stack[stack_offset] = stack.as_ptr().offset(last_stack_offset as isize) as usize as u64; // rbp
stack_offset -= 1;
stack[stack_offset] =
known_registers[X64Register::XMM(XMM::XMM15).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] =
known_registers[X64Register::XMM(XMM::XMM14).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] =
known_registers[X64Register::XMM(XMM::XMM13).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] =
known_registers[X64Register::XMM(XMM::XMM12).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] =
known_registers[X64Register::XMM(XMM::XMM11).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] =
known_registers[X64Register::XMM(XMM::XMM10).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] =
known_registers[X64Register::XMM(XMM::XMM9).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] =
known_registers[X64Register::XMM(XMM::XMM8).to_index().0].unwrap_or(0);
2019-06-27 15:49:43 +08:00
stack_offset -= 1;
stack[stack_offset] =
known_registers[X64Register::XMM(XMM::XMM7).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] =
known_registers[X64Register::XMM(XMM::XMM6).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] =
known_registers[X64Register::XMM(XMM::XMM5).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] =
known_registers[X64Register::XMM(XMM::XMM4).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] =
known_registers[X64Register::XMM(XMM::XMM3).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] =
known_registers[X64Register::XMM(XMM::XMM2).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] =
known_registers[X64Register::XMM(XMM::XMM1).to_index().0].unwrap_or(0);
stack_offset -= 1;
stack[stack_offset] =
known_registers[X64Register::XMM(XMM::XMM0).to_index().0].unwrap_or(0);
2019-06-26 01:38:39 +08:00
if let Some(ref memory) = image.memory {
assert!(vmctx.internal.memory_bound <= memory.len());
2019-06-26 01:39:30 +08:00
2019-06-26 01:38:39 +08:00
if vmctx.internal.memory_bound < memory.len() {
2019-06-26 01:39:30 +08:00
let grow: unsafe extern "C" fn(ctx: &mut Ctx, memory_index: usize, delta: usize) =
::std::mem::transmute((*vmctx.internal.intrinsics).memory_grow);
grow(
vmctx,
0,
(memory.len() - vmctx.internal.memory_bound) / 65536,
);
2019-06-26 01:38:39 +08:00
assert_eq!(vmctx.internal.memory_bound, memory.len());
}
2019-08-21 15:23:56 -07:00
std::slice::from_raw_parts_mut(vmctx.internal.memory_base, vmctx.internal.memory_bound)
.copy_from_slice(memory);
2019-06-26 01:38:39 +08:00
}
let globals_len = (*vmctx.module).info.globals.len();
for i in 0..globals_len {
2019-06-26 01:39:30 +08:00
(*(*vmctx.local_backing).globals[LocalGlobalIndex::new(i)].vm_local_global()).data =
image.globals[i];
2019-06-26 01:38:39 +08:00
}
drop(image); // free up host memory
2019-06-27 15:49:43 +08:00
catch_unsafe_unwind(
|| {
run_on_alternative_stack(
stack.as_mut_ptr().offset(stack.len() as isize),
stack.as_mut_ptr().offset(stack_offset as isize),
)
},
breakpoints,
)
2019-06-25 20:01:56 +08:00
}
/// Builds an `InstanceImage` for the given `Ctx` and `ExecutionStateImage`.
2019-06-26 01:38:39 +08:00
pub fn build_instance_image(
vmctx: &mut Ctx,
execution_state: ExecutionStateImage,
) -> InstanceImage {
unsafe {
let memory = if vmctx.internal.memory_base.is_null() {
None
} else {
2019-06-26 01:39:30 +08:00
Some(
2019-08-21 14:53:33 -07:00
std::slice::from_raw_parts(
2019-06-26 01:39:30 +08:00
vmctx.internal.memory_base,
vmctx.internal.memory_bound,
)
.to_vec(),
)
2019-06-26 01:38:39 +08:00
};
// FIXME: Imported globals
let globals_len = (*vmctx.module).info.globals.len();
let globals: Vec<u128> = (0..globals_len)
2019-06-26 01:39:30 +08:00
.map(|i| {
(*vmctx.local_backing).globals[LocalGlobalIndex::new(i)]
.get()
.to_u128()
2019-06-26 01:39:30 +08:00
})
.collect();
2019-06-26 01:38:39 +08:00
InstanceImage {
memory: memory,
globals: globals,
execution_state: execution_state,
}
}
}
/// Returns a `ExecutionStateImage` for the given versions, stack, initial registers and
/// initial address.
2019-06-12 22:02:15 +08:00
#[warn(unused_variables)]
2019-08-10 02:32:14 +08:00
pub unsafe fn read_stack<'a, I: Iterator<Item = &'a CodeVersion>, F: Fn() -> I + 'a>(
versions: F,
2019-06-25 03:56:20 +08:00
mut stack: *const u64,
initially_known_registers: [Option<u64>; 32],
2019-06-25 03:56:20 +08:00
mut initial_address: Option<u64>,
max_depth: Option<usize>,
2019-06-26 01:38:39 +08:00
) -> ExecutionStateImage {
let mut known_registers: [Option<u64>; 32] = initially_known_registers;
let mut results: Vec<WasmFunctionStateDump> = vec![];
let mut was_baseline = true;
for depth in 0.. {
if let Some(max_depth) = max_depth {
if depth >= max_depth {
return ExecutionStateImage { frames: results };
}
}
let ret_addr = initial_address.take().unwrap_or_else(|| {
let x = *stack;
stack = stack.offset(1);
x
});
2019-08-10 02:32:14 +08:00
let mut fsm_state: Option<(&FunctionStateMap, MachineState)> = None;
let mut is_baseline: Option<bool> = None;
2019-08-10 02:32:14 +08:00
for version in versions() {
2019-08-10 02:44:44 +08:00
match version
.msm
2019-08-10 02:32:14 +08:00
.lookup_call_ip(ret_addr as usize, version.base)
2019-08-10 02:44:44 +08:00
.or_else(|| {
version
.msm
.lookup_trappable_ip(ret_addr as usize, version.base)
})
2019-08-10 02:32:14 +08:00
.or_else(|| version.msm.lookup_loop_ip(ret_addr as usize, version.base))
{
Some(x) => {
fsm_state = Some(x);
is_baseline = Some(version.baseline);
2019-08-10 02:32:14 +08:00
break;
2019-08-10 02:44:44 +08:00
}
None => {}
2019-08-10 02:32:14 +08:00
};
}
let (fsm, state) = if let Some(x) = fsm_state {
x
} else {
return ExecutionStateImage { frames: results };
2019-06-12 22:02:15 +08:00
};
2019-06-12 23:54:15 +08:00
{
let is_baseline = is_baseline.unwrap();
// Are we unwinding through an optimized/baseline boundary?
if is_baseline && !was_baseline {
let callee_saved = &*get_boundary_register_preservation();
2019-08-15 19:13:00 -07:00
known_registers[X64Register::GPR(GPR::R15).to_index().0] =
Some(callee_saved.r15);
known_registers[X64Register::GPR(GPR::R14).to_index().0] =
Some(callee_saved.r14);
known_registers[X64Register::GPR(GPR::R13).to_index().0] =
Some(callee_saved.r13);
known_registers[X64Register::GPR(GPR::R12).to_index().0] =
Some(callee_saved.r12);
known_registers[X64Register::GPR(GPR::RBX).to_index().0] =
Some(callee_saved.rbx);
}
was_baseline = is_baseline;
}
2019-06-25 03:56:20 +08:00
let mut wasm_stack: Vec<Option<u64>> = state
.wasm_stack
.iter()
.map(|x| match *x {
WasmAbstractValue::Const(x) => Some(x),
WasmAbstractValue::Runtime => None,
2019-06-25 03:56:20 +08:00
})
.collect();
let mut wasm_locals: Vec<Option<u64>> = fsm
.locals
.iter()
.map(|x| match *x {
WasmAbstractValue::Const(x) => Some(x),
WasmAbstractValue::Runtime => None,
2019-06-25 03:56:20 +08:00
})
.collect();
2019-06-12 23:54:15 +08:00
// This must be before the next loop because that modifies `known_registers`.
for (i, v) in state.register_values.iter().enumerate() {
match *v {
MachineValue::Undefined => {}
2019-06-25 20:01:56 +08:00
MachineValue::Vmctx => {}
2019-08-01 23:28:39 +08:00
MachineValue::VmctxDeref(_) => {}
2019-06-12 23:54:15 +08:00
MachineValue::WasmStack(idx) => {
if let Some(v) = known_registers[i] {
wasm_stack[idx] = Some(v);
2019-06-26 01:38:39 +08:00
} else {
2019-06-26 01:39:30 +08:00
eprintln!(
"BUG: Register {} for WebAssembly stack slot {} has unknown value.",
i, idx
);
2019-06-12 23:54:15 +08:00
}
}
MachineValue::WasmLocal(idx) => {
if let Some(v) = known_registers[i] {
wasm_locals[idx] = Some(v);
2019-06-12 23:54:15 +08:00
}
}
_ => unreachable!(),
}
}
2019-06-12 22:02:15 +08:00
let mut found_shadow = false;
for v in state.stack_values.iter() {
match *v {
MachineValue::ExplicitShadow => {
found_shadow = true;
break;
}
_ => {}
}
}
if !found_shadow {
stack = stack.offset((fsm.shadow_size / 8) as isize);
}
2019-06-12 22:02:15 +08:00
for v in state.stack_values.iter().rev() {
match *v {
MachineValue::ExplicitShadow => {
stack = stack.offset((fsm.shadow_size / 8) as isize);
}
MachineValue::Undefined => {
stack = stack.offset(1);
}
2019-06-25 20:01:56 +08:00
MachineValue::Vmctx => {
stack = stack.offset(1);
}
2019-08-01 23:28:39 +08:00
MachineValue::VmctxDeref(_) => {
stack = stack.offset(1);
}
2019-06-12 22:02:15 +08:00
MachineValue::PreserveRegister(idx) => {
2019-06-12 23:54:15 +08:00
known_registers[idx.0] = Some(*stack);
2019-06-12 22:02:15 +08:00
stack = stack.offset(1);
}
MachineValue::CopyStackBPRelative(_) => {
2019-06-12 22:02:15 +08:00
stack = stack.offset(1);
}
MachineValue::WasmStack(idx) => {
wasm_stack[idx] = Some(*stack);
2019-06-12 22:02:15 +08:00
stack = stack.offset(1);
}
MachineValue::WasmLocal(idx) => {
wasm_locals[idx] = Some(*stack);
2019-06-12 22:02:15 +08:00
stack = stack.offset(1);
}
MachineValue::TwoHalves(ref inner) => {
let v = *stack;
stack = stack.offset(1);
match inner.0 {
MachineValue::WasmStack(idx) => {
wasm_stack[idx] = Some(v & 0xffffffffu64);
}
MachineValue::WasmLocal(idx) => {
wasm_locals[idx] = Some(v & 0xffffffffu64);
}
2019-08-01 23:28:39 +08:00
MachineValue::VmctxDeref(_) => {}
2019-07-30 22:25:58 +08:00
MachineValue::Undefined => {}
_ => unimplemented!("TwoHalves.0 (read)"),
}
match inner.1 {
MachineValue::WasmStack(idx) => {
wasm_stack[idx] = Some(v >> 32);
}
MachineValue::WasmLocal(idx) => {
wasm_locals[idx] = Some(v >> 32);
}
2019-08-01 23:28:39 +08:00
MachineValue::VmctxDeref(_) => {}
2019-07-30 22:25:58 +08:00
MachineValue::Undefined => {}
_ => unimplemented!("TwoHalves.1 (read)"),
}
}
2019-06-12 22:02:15 +08:00
}
}
for (offset, v) in state.prev_frame.iter() {
let offset = (*offset + 2) as isize; // (saved_rbp, return_address)
match *v {
MachineValue::WasmStack(idx) => {
wasm_stack[idx] = Some(*stack.offset(offset));
}
MachineValue::WasmLocal(idx) => {
wasm_locals[idx] = Some(*stack.offset(offset));
}
2019-08-19 19:17:50 -07:00
_ => unreachable!("values in prev frame can only be stack/local"),
}
}
stack = stack.offset(1); // saved_rbp
2019-06-12 22:02:15 +08:00
2019-06-25 03:56:20 +08:00
wasm_stack.truncate(
wasm_stack
.len()
.checked_sub(state.wasm_stack_private_depth)
.unwrap(),
);
let wfs = WasmFunctionStateDump {
local_function_id: fsm.local_function_id,
2019-06-25 20:01:56 +08:00
wasm_inst_offset: state.wasm_inst_offset,
stack: wasm_stack,
locals: wasm_locals,
2019-06-12 22:02:15 +08:00
};
results.push(wfs);
2019-06-12 13:38:58 +08:00
}
unreachable!();
2019-06-12 13:38:58 +08:00
}
2019-06-09 21:21:18 +08:00
}