Stack parsing now works with LLVM.

This commit is contained in:
losfair
2019-07-27 02:50:49 +08:00
parent cc4f0e31a6
commit efb5277d1d
5 changed files with 122 additions and 78 deletions

View File

@ -1,4 +1,4 @@
use super::stackmap::{self, StackmapRegistry, StkMapRecord}; use super::stackmap::{self, StackmapRegistry, StkMapRecord, StkSizeRecord};
use crate::intrinsics::Intrinsics; use crate::intrinsics::Intrinsics;
use inkwell::{ use inkwell::{
memory_buffer::MemoryBuffer, memory_buffer::MemoryBuffer,
@ -246,6 +246,7 @@ impl LLVMBackend {
module: Module, module: Module,
_intrinsics: Intrinsics, _intrinsics: Intrinsics,
stackmaps: &StackmapRegistry, stackmaps: &StackmapRegistry,
module_info: &ModuleInfo,
) -> (Self, LLVMCache) { ) -> (Self, LLVMCache) {
Target::initialize_x86(&InitializationConfig { Target::initialize_x86(&InitializationConfig {
asm_parser: true, asm_parser: true,
@ -305,7 +306,6 @@ impl LLVMBackend {
}; };
if raw_stackmap.len() > 0 { if raw_stackmap.len() > 0 {
let map = stackmap::StackMap::parse(raw_stackmap).unwrap(); let map = stackmap::StackMap::parse(raw_stackmap).unwrap();
println!("{:?}", map);
let (code_ptr, code_size) = unsafe { let (code_ptr, code_size) = unsafe {
( (
@ -318,40 +318,73 @@ impl LLVMBackend {
total_size: code_size, total_size: code_size,
}; };
let mut local_func_id_to_addr: Vec<usize> = Vec::new();
// All local functions.
for index in module_info.imported_functions.len()..module_info.func_assoc.len() {
let name = if cfg!(target_os = "macos") {
format!("_fn{}", index)
} else {
format!("fn{}", index)
};
let c_str = CString::new(name).unwrap();
let ptr = unsafe { get_func_symbol(module, c_str.as_ptr()) };
assert!(!ptr.is_null());
local_func_id_to_addr.push(ptr as usize);
}
let mut addr_to_size_record: BTreeMap<usize, &StkSizeRecord> = BTreeMap::new();
for record in &map.stk_size_records {
addr_to_size_record.insert(record.function_address as usize, record);
}
let mut map_records: BTreeMap<usize, &StkMapRecord> = BTreeMap::new(); let mut map_records: BTreeMap<usize, &StkMapRecord> = BTreeMap::new();
for r in &map.stk_map_records { for r in &map.stk_map_records {
map_records.insert(r.patchpoint_id as usize, r); map_records.insert(r.patchpoint_id as usize, r);
} }
let mut map_record_idx: usize = 0; for (i, entry) in stackmaps.entries.iter().enumerate() {
for size_record in &map.stk_size_records { if let Some(map_record) = map_records.get(&i) {
for _ in 0..size_record.record_count as usize { assert_eq!(i, map_record.patchpoint_id as usize);
let map_record = map_records.get(&map_record_idx).expect("map record not found"); let addr = local_func_id_to_addr[entry.local_function_id];
let map_entry = &stackmaps.entries[map_record_idx]; let size_record = *addr_to_size_record.get(&addr).expect("size_record not found");
assert_eq!(map_record.patchpoint_id, map_record_idx as u64); entry.populate_msm(
map_record_idx += 1;
map_entry.populate_msm(
code_ptr as usize, code_ptr as usize,
&map, &map,
size_record, size_record,
map_record, map_record,
&mut msm, &mut msm,
); );
} else {
// TODO: optimized out?
} }
} }
//println!("MSM: {:?}", msm);
(
Self {
module,
buffer: Arc::clone(&buffer),
msm: Some(msm),
},
LLVMCache { buffer },
)
} else { } else {
eprintln!("WARNING: No stack map"); eprintln!("WARNING: No stack map");
(
Self {
module,
buffer: Arc::clone(&buffer),
msm: None,
},
LLVMCache { buffer },
)
} }
(
Self {
module,
buffer: Arc::clone(&buffer),
msm: None,
},
LLVMCache { buffer },
)
} }
pub unsafe fn from_buffer(memory: Memory) -> Result<(Self, LLVMCache), String> { pub unsafe fn from_buffer(memory: Memory) -> Result<(Self, LLVMCache), String> {

View File

@ -849,7 +849,12 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
&self.locals, &self.locals,
state, state,
offset, offset,
) );
builder.build_call(
intrinsics.trap,
&[],
"trap",
);
} }
builder.build_call( builder.build_call(
@ -2675,7 +2680,7 @@ impl ModuleCodeGenerator<LLVMFunctionCodeGenerator, LLVMBackend, CodegenError>
let stackmaps = self.stackmaps.borrow(); let stackmaps = self.stackmaps.borrow();
let (backend, cache_gen) = let (backend, cache_gen) =
LLVMBackend::new(self.module, self.intrinsics.take().unwrap(), &*stackmaps); LLVMBackend::new(self.module, self.intrinsics.take().unwrap(), &*stackmaps, module_info);
Ok((backend, Box::new(cache_gen))) Ok((backend, Box::new(cache_gen)))
} }

View File

@ -88,9 +88,13 @@ impl StackmapEntry {
}); });
assert_eq!(self.value_semantics.len(), map_record.locations.len()); assert_eq!(self.value_semantics.len(), map_record.locations.len());
//assert!(size_record.stack_size % 8 == 0); // is this also aligned to 16 bytes?
let mut machine_stack_half_layout: Vec<MachineValue> = Vec::new(); // System V requires 16-byte alignment before each call instruction.
// Considering the saved rbp we need to ensure the stack size % 16 always equals to 8.
assert!(size_record.stack_size % 16 == 8);
// Layout begins just below saved rbp. (push rbp; mov rbp, rsp)
let mut machine_stack_half_layout: Vec<MachineValue> = vec![MachineValue::Undefined; (size_record.stack_size - 8) as usize / 4];
let mut regs: Vec<(RegisterIndex, MachineValue)> = vec![]; let mut regs: Vec<(RegisterIndex, MachineValue)> = vec![];
let mut stack_constants: HashMap<usize, u64> = HashMap::new(); let mut stack_constants: HashMap<usize, u64> = HashMap::new();
@ -150,15 +154,13 @@ impl StackmapEntry {
== X64Register::GPR(GPR::RBP) == X64Register::GPR(GPR::RBP)
); );
if loc.offset_or_small_constant >= 0 { if loc.offset_or_small_constant >= 0 {
eprintln!("XXX: {}", loc.offset_or_small_constant); // FIXME: parameters passed on stack?
//eprintln!("XXX: {}", loc.offset_or_small_constant);
} else {
let stack_offset = ((-loc.offset_or_small_constant) / 4) as usize;
assert!(stack_offset > 0 && stack_offset <= machine_stack_half_layout.len());
machine_stack_half_layout[stack_offset - 1] = mv;
} }
assert!(loc.offset_or_small_constant < 0);
let stack_offset = ((-loc.offset_or_small_constant) / 4) as usize;
while stack_offset > machine_stack_half_layout.len() {
machine_stack_half_layout.push(MachineValue::Undefined);
}
assert!(stack_offset > 0 && stack_offset <= machine_stack_half_layout.len());
machine_stack_half_layout[stack_offset - 1] = mv;
} }
_ => unreachable!( _ => unreachable!(
"Direct location type is not expected for values other than local" "Direct location type is not expected for values other than local"
@ -171,9 +173,6 @@ impl StackmapEntry {
== X64Register::GPR(GPR::RBP) == X64Register::GPR(GPR::RBP)
); );
let stack_offset = ((-loc.offset_or_small_constant) / 4) as usize; let stack_offset = ((-loc.offset_or_small_constant) / 4) as usize;
while stack_offset > machine_stack_half_layout.len() {
machine_stack_half_layout.push(MachineValue::Undefined);
}
assert!(stack_offset > 0 && stack_offset <= machine_stack_half_layout.len()); assert!(stack_offset > 0 && stack_offset <= machine_stack_half_layout.len());
machine_stack_half_layout[stack_offset - 1] = mv; machine_stack_half_layout[stack_offset - 1] = mv;
} }
@ -183,23 +182,19 @@ impl StackmapEntry {
assert_eq!(wasm_stack.len(), self.stack_count); assert_eq!(wasm_stack.len(), self.stack_count);
assert_eq!(wasm_locals.len(), self.local_count); assert_eq!(wasm_locals.len(), self.local_count);
if machine_stack_half_layout.len() % 2 != 0 {
machine_stack_half_layout.push(MachineValue::Undefined);
}
let mut machine_stack_layout: Vec<MachineValue> = Vec::with_capacity(machine_stack_half_layout.len() / 2); let mut machine_stack_layout: Vec<MachineValue> = Vec::with_capacity(machine_stack_half_layout.len() / 2);
for i in 0..machine_stack_half_layout.len() / 2 { for i in 0..machine_stack_half_layout.len() / 2 {
let left = &machine_stack_half_layout[i * 2]; let major = &machine_stack_half_layout[i * 2 + 1]; // mod 8 == 0
let right = &machine_stack_half_layout[i * 2 + 1]; let minor = &machine_stack_half_layout[i * 2]; // mod 8 == 4
let only_left = match *right { let only_major = match *minor {
MachineValue::Undefined => true, MachineValue::Undefined => true,
_ => false, _ => false,
}; };
if only_left { if only_major {
machine_stack_layout.push(left.clone()); machine_stack_layout.push(major.clone());
} else { } else {
machine_stack_layout.push(MachineValue::TwoHalves(Box::new((right.clone(), left.clone())))); machine_stack_layout.push(MachineValue::TwoHalves(Box::new((major.clone(), minor.clone()))));
} }
} }

View File

@ -40,6 +40,7 @@ struct UnwindInfo {
thread_local! { thread_local! {
static UNWIND: UnsafeCell<Option<UnwindInfo>> = UnsafeCell::new(None); static UNWIND: UnsafeCell<Option<UnwindInfo>> = UnsafeCell::new(None);
static CURRENT_CTX: UnsafeCell<*mut vm::Ctx> = UnsafeCell::new(::std::ptr::null_mut());
} }
struct InterruptSignalMem(*mut u8); struct InterruptSignalMem(*mut u8);
@ -68,6 +69,15 @@ lazy_static! {
} }
static INTERRUPT_SIGNAL_DELIVERED: AtomicBool = AtomicBool::new(false); static INTERRUPT_SIGNAL_DELIVERED: AtomicBool = AtomicBool::new(false);
pub unsafe fn with_ctx<R, F: FnOnce() -> R>(ctx: *mut vm::Ctx, cb: F) -> R {
let addr = CURRENT_CTX.with(|x| x.get());
let old = *addr;
*addr = ctx;
let ret = cb();
*addr = old;
ret
}
pub unsafe fn get_wasm_interrupt_signal_mem() -> *mut u8 { pub unsafe fn get_wasm_interrupt_signal_mem() -> *mut u8 {
INTERRUPT_SIGNAL_MEM.0 INTERRUPT_SIGNAL_MEM.0
} }
@ -209,9 +219,7 @@ extern "C" fn signal_trap_handler(
_ => {} _ => {}
} }
// TODO: make this safer let ctx: &mut vm::Ctx = &mut **CURRENT_CTX.with(|x| x.get());
let ctx = &mut *(fault.known_registers[X64Register::GPR(GPR::R15).to_index().0].unwrap()
as *mut vm::Ctx);
let rsp = fault.known_registers[X64Register::GPR(GPR::RSP).to_index().0].unwrap(); let rsp = fault.known_registers[X64Register::GPR(GPR::RSP).to_index().0].unwrap();
let msm = (*ctx.module) let msm = (*ctx.module)

View File

@ -113,7 +113,7 @@ struct Run {
loader: Option<LoaderName>, loader: Option<LoaderName>,
/// Path to previously saved instance image to resume. /// Path to previously saved instance image to resume.
#[cfg(feature = "backend-singlepass")] #[cfg(any(feature = "backend-singlepass", feature = "backend-llvm"))]
#[structopt(long = "resume")] #[structopt(long = "resume")]
resume: Option<String>, resume: Option<String>,
@ -512,10 +512,10 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
let start: Func<(), ()> = instance.func("_start").map_err(|e| format!("{:?}", e))?; let start: Func<(), ()> = instance.func("_start").map_err(|e| format!("{:?}", e))?;
#[cfg(feature = "backend-singlepass")] #[cfg(any(feature = "backend-singlepass", feature = "backend-llvm"))]
unsafe { unsafe {
if options.backend == Backend::Singlepass { if options.backend == Backend::Singlepass || options.backend == Backend::LLVM {
use wasmer_runtime_core::fault::{catch_unsafe_unwind, ensure_sighandler}; use wasmer_runtime_core::fault::{catch_unsafe_unwind, ensure_sighandler, with_ctx};
use wasmer_runtime_core::state::{ use wasmer_runtime_core::state::{
x64::invoke_call_return_on_stack, InstanceImage, x64::invoke_call_return_on_stack, InstanceImage,
}; };
@ -537,29 +537,32 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
let breakpoints = instance.module.runnable_module.get_breakpoints(); let breakpoints = instance.module.runnable_module.get_breakpoints();
loop { loop {
let ret = if let Some(image) = image.take() { let ctx = instance.context_mut() as *mut _;
let msm = instance let ret = with_ctx(ctx, || {
.module if let Some(image) = image.take() {
.runnable_module let msm = instance
.get_module_state_map() .module
.unwrap(); .runnable_module
let code_base = .get_module_state_map()
instance.module.runnable_module.get_code().unwrap().as_ptr() .unwrap();
as usize; let code_base =
invoke_call_return_on_stack( instance.module.runnable_module.get_code().unwrap().as_ptr()
&msm, as usize;
code_base, invoke_call_return_on_stack(
image, &msm,
instance.context_mut(), code_base,
breakpoints.clone(), image,
) instance.context_mut(),
.map(|_| ()) breakpoints.clone(),
} else { )
catch_unsafe_unwind( .map(|_| ())
|| start_raw(instance.context_mut()), } else {
breakpoints.clone(), catch_unsafe_unwind(
) || start_raw(instance.context_mut()),
}; breakpoints.clone(),
)
}
});
if let Err(e) = ret { if let Err(e) = ret {
if let Some(new_image) = e.downcast_ref::<InstanceImage>() { if let Some(new_image) = e.downcast_ref::<InstanceImage>() {
let op = interactive_shell(InteractiveShellContext { let op = interactive_shell(InteractiveShellContext {
@ -619,18 +622,18 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
Ok(()) Ok(())
} }
#[cfg(feature = "backend-singlepass")] #[cfg(any(feature = "backend-singlepass", feature = "backend-llvm"))]
struct InteractiveShellContext { struct InteractiveShellContext {
image: Option<wasmer_runtime_core::state::InstanceImage>, image: Option<wasmer_runtime_core::state::InstanceImage>,
} }
#[cfg(feature = "backend-singlepass")] #[cfg(any(feature = "backend-singlepass", feature = "backend-llvm"))]
#[derive(Debug)] #[derive(Debug)]
enum ShellExitOperation { enum ShellExitOperation {
ContinueWith(wasmer_runtime_core::state::InstanceImage), ContinueWith(wasmer_runtime_core::state::InstanceImage),
} }
#[cfg(feature = "backend-singlepass")] #[cfg(any(feature = "backend-singlepass", feature = "backend-llvm"))]
fn interactive_shell(mut ctx: InteractiveShellContext) -> ShellExitOperation { fn interactive_shell(mut ctx: InteractiveShellContext) -> ShellExitOperation {
use std::io::Write; use std::io::Write;