mirror of
https://github.com/fluencelabs/wasmer
synced 2025-06-22 05:01:33 +00:00
Stack parsing now works with LLVM.
This commit is contained in:
@ -1,4 +1,4 @@
|
||||
use super::stackmap::{self, StackmapRegistry, StkMapRecord};
|
||||
use super::stackmap::{self, StackmapRegistry, StkMapRecord, StkSizeRecord};
|
||||
use crate::intrinsics::Intrinsics;
|
||||
use inkwell::{
|
||||
memory_buffer::MemoryBuffer,
|
||||
@ -246,6 +246,7 @@ impl LLVMBackend {
|
||||
module: Module,
|
||||
_intrinsics: Intrinsics,
|
||||
stackmaps: &StackmapRegistry,
|
||||
module_info: &ModuleInfo,
|
||||
) -> (Self, LLVMCache) {
|
||||
Target::initialize_x86(&InitializationConfig {
|
||||
asm_parser: true,
|
||||
@ -305,7 +306,6 @@ impl LLVMBackend {
|
||||
};
|
||||
if raw_stackmap.len() > 0 {
|
||||
let map = stackmap::StackMap::parse(raw_stackmap).unwrap();
|
||||
println!("{:?}", map);
|
||||
|
||||
let (code_ptr, code_size) = unsafe {
|
||||
(
|
||||
@ -318,32 +318,64 @@ impl LLVMBackend {
|
||||
total_size: code_size,
|
||||
};
|
||||
|
||||
let mut local_func_id_to_addr: Vec<usize> = Vec::new();
|
||||
|
||||
// All local functions.
|
||||
for index in module_info.imported_functions.len()..module_info.func_assoc.len() {
|
||||
let name = if cfg!(target_os = "macos") {
|
||||
format!("_fn{}", index)
|
||||
} else {
|
||||
format!("fn{}", index)
|
||||
};
|
||||
|
||||
let c_str = CString::new(name).unwrap();
|
||||
let ptr = unsafe { get_func_symbol(module, c_str.as_ptr()) };
|
||||
|
||||
assert!(!ptr.is_null());
|
||||
local_func_id_to_addr.push(ptr as usize);
|
||||
}
|
||||
|
||||
let mut addr_to_size_record: BTreeMap<usize, &StkSizeRecord> = BTreeMap::new();
|
||||
|
||||
for record in &map.stk_size_records {
|
||||
addr_to_size_record.insert(record.function_address as usize, record);
|
||||
}
|
||||
|
||||
let mut map_records: BTreeMap<usize, &StkMapRecord> = BTreeMap::new();
|
||||
|
||||
for r in &map.stk_map_records {
|
||||
map_records.insert(r.patchpoint_id as usize, r);
|
||||
}
|
||||
|
||||
let mut map_record_idx: usize = 0;
|
||||
for size_record in &map.stk_size_records {
|
||||
for _ in 0..size_record.record_count as usize {
|
||||
let map_record = map_records.get(&map_record_idx).expect("map record not found");
|
||||
let map_entry = &stackmaps.entries[map_record_idx];
|
||||
assert_eq!(map_record.patchpoint_id, map_record_idx as u64);
|
||||
map_record_idx += 1;
|
||||
|
||||
map_entry.populate_msm(
|
||||
for (i, entry) in stackmaps.entries.iter().enumerate() {
|
||||
if let Some(map_record) = map_records.get(&i) {
|
||||
assert_eq!(i, map_record.patchpoint_id as usize);
|
||||
let addr = local_func_id_to_addr[entry.local_function_id];
|
||||
let size_record = *addr_to_size_record.get(&addr).expect("size_record not found");
|
||||
entry.populate_msm(
|
||||
code_ptr as usize,
|
||||
&map,
|
||||
size_record,
|
||||
map_record,
|
||||
&mut msm,
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
eprintln!("WARNING: No stack map");
|
||||
// TODO: optimized out?
|
||||
}
|
||||
}
|
||||
|
||||
//println!("MSM: {:?}", msm);
|
||||
|
||||
(
|
||||
Self {
|
||||
module,
|
||||
buffer: Arc::clone(&buffer),
|
||||
msm: Some(msm),
|
||||
},
|
||||
LLVMCache { buffer },
|
||||
)
|
||||
} else {
|
||||
eprintln!("WARNING: No stack map");
|
||||
(
|
||||
Self {
|
||||
module,
|
||||
@ -353,6 +385,7 @@ impl LLVMBackend {
|
||||
LLVMCache { buffer },
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe fn from_buffer(memory: Memory) -> Result<(Self, LLVMCache), String> {
|
||||
let callbacks = get_callbacks();
|
||||
|
@ -849,7 +849,12 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
|
||||
&self.locals,
|
||||
state,
|
||||
offset,
|
||||
)
|
||||
);
|
||||
builder.build_call(
|
||||
intrinsics.trap,
|
||||
&[],
|
||||
"trap",
|
||||
);
|
||||
}
|
||||
|
||||
builder.build_call(
|
||||
@ -2675,7 +2680,7 @@ impl ModuleCodeGenerator<LLVMFunctionCodeGenerator, LLVMBackend, CodegenError>
|
||||
let stackmaps = self.stackmaps.borrow();
|
||||
|
||||
let (backend, cache_gen) =
|
||||
LLVMBackend::new(self.module, self.intrinsics.take().unwrap(), &*stackmaps);
|
||||
LLVMBackend::new(self.module, self.intrinsics.take().unwrap(), &*stackmaps, module_info);
|
||||
Ok((backend, Box::new(cache_gen)))
|
||||
}
|
||||
|
||||
|
@ -88,9 +88,13 @@ impl StackmapEntry {
|
||||
});
|
||||
|
||||
assert_eq!(self.value_semantics.len(), map_record.locations.len());
|
||||
//assert!(size_record.stack_size % 8 == 0); // is this also aligned to 16 bytes?
|
||||
|
||||
let mut machine_stack_half_layout: Vec<MachineValue> = Vec::new();
|
||||
// System V requires 16-byte alignment before each call instruction.
|
||||
// Considering the saved rbp we need to ensure the stack size % 16 always equals to 8.
|
||||
assert!(size_record.stack_size % 16 == 8);
|
||||
|
||||
// Layout begins just below saved rbp. (push rbp; mov rbp, rsp)
|
||||
let mut machine_stack_half_layout: Vec<MachineValue> = vec![MachineValue::Undefined; (size_record.stack_size - 8) as usize / 4];
|
||||
let mut regs: Vec<(RegisterIndex, MachineValue)> = vec![];
|
||||
let mut stack_constants: HashMap<usize, u64> = HashMap::new();
|
||||
|
||||
@ -150,16 +154,14 @@ impl StackmapEntry {
|
||||
== X64Register::GPR(GPR::RBP)
|
||||
);
|
||||
if loc.offset_or_small_constant >= 0 {
|
||||
eprintln!("XXX: {}", loc.offset_or_small_constant);
|
||||
}
|
||||
assert!(loc.offset_or_small_constant < 0);
|
||||
// FIXME: parameters passed on stack?
|
||||
//eprintln!("XXX: {}", loc.offset_or_small_constant);
|
||||
} else {
|
||||
let stack_offset = ((-loc.offset_or_small_constant) / 4) as usize;
|
||||
while stack_offset > machine_stack_half_layout.len() {
|
||||
machine_stack_half_layout.push(MachineValue::Undefined);
|
||||
}
|
||||
assert!(stack_offset > 0 && stack_offset <= machine_stack_half_layout.len());
|
||||
machine_stack_half_layout[stack_offset - 1] = mv;
|
||||
}
|
||||
}
|
||||
_ => unreachable!(
|
||||
"Direct location type is not expected for values other than local"
|
||||
),
|
||||
@ -171,9 +173,6 @@ impl StackmapEntry {
|
||||
== X64Register::GPR(GPR::RBP)
|
||||
);
|
||||
let stack_offset = ((-loc.offset_or_small_constant) / 4) as usize;
|
||||
while stack_offset > machine_stack_half_layout.len() {
|
||||
machine_stack_half_layout.push(MachineValue::Undefined);
|
||||
}
|
||||
assert!(stack_offset > 0 && stack_offset <= machine_stack_half_layout.len());
|
||||
machine_stack_half_layout[stack_offset - 1] = mv;
|
||||
}
|
||||
@ -183,23 +182,19 @@ impl StackmapEntry {
|
||||
assert_eq!(wasm_stack.len(), self.stack_count);
|
||||
assert_eq!(wasm_locals.len(), self.local_count);
|
||||
|
||||
if machine_stack_half_layout.len() % 2 != 0 {
|
||||
machine_stack_half_layout.push(MachineValue::Undefined);
|
||||
}
|
||||
|
||||
let mut machine_stack_layout: Vec<MachineValue> = Vec::with_capacity(machine_stack_half_layout.len() / 2);
|
||||
|
||||
for i in 0..machine_stack_half_layout.len() / 2 {
|
||||
let left = &machine_stack_half_layout[i * 2];
|
||||
let right = &machine_stack_half_layout[i * 2 + 1];
|
||||
let only_left = match *right {
|
||||
let major = &machine_stack_half_layout[i * 2 + 1]; // mod 8 == 0
|
||||
let minor = &machine_stack_half_layout[i * 2]; // mod 8 == 4
|
||||
let only_major = match *minor {
|
||||
MachineValue::Undefined => true,
|
||||
_ => false,
|
||||
};
|
||||
if only_left {
|
||||
machine_stack_layout.push(left.clone());
|
||||
if only_major {
|
||||
machine_stack_layout.push(major.clone());
|
||||
} else {
|
||||
machine_stack_layout.push(MachineValue::TwoHalves(Box::new((right.clone(), left.clone()))));
|
||||
machine_stack_layout.push(MachineValue::TwoHalves(Box::new((major.clone(), minor.clone()))));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -40,6 +40,7 @@ struct UnwindInfo {
|
||||
|
||||
thread_local! {
|
||||
static UNWIND: UnsafeCell<Option<UnwindInfo>> = UnsafeCell::new(None);
|
||||
static CURRENT_CTX: UnsafeCell<*mut vm::Ctx> = UnsafeCell::new(::std::ptr::null_mut());
|
||||
}
|
||||
|
||||
struct InterruptSignalMem(*mut u8);
|
||||
@ -68,6 +69,15 @@ lazy_static! {
|
||||
}
|
||||
static INTERRUPT_SIGNAL_DELIVERED: AtomicBool = AtomicBool::new(false);
|
||||
|
||||
pub unsafe fn with_ctx<R, F: FnOnce() -> R>(ctx: *mut vm::Ctx, cb: F) -> R {
|
||||
let addr = CURRENT_CTX.with(|x| x.get());
|
||||
let old = *addr;
|
||||
*addr = ctx;
|
||||
let ret = cb();
|
||||
*addr = old;
|
||||
ret
|
||||
}
|
||||
|
||||
pub unsafe fn get_wasm_interrupt_signal_mem() -> *mut u8 {
|
||||
INTERRUPT_SIGNAL_MEM.0
|
||||
}
|
||||
@ -209,9 +219,7 @@ extern "C" fn signal_trap_handler(
|
||||
_ => {}
|
||||
}
|
||||
|
||||
// TODO: make this safer
|
||||
let ctx = &mut *(fault.known_registers[X64Register::GPR(GPR::R15).to_index().0].unwrap()
|
||||
as *mut vm::Ctx);
|
||||
let ctx: &mut vm::Ctx = &mut **CURRENT_CTX.with(|x| x.get());
|
||||
let rsp = fault.known_registers[X64Register::GPR(GPR::RSP).to_index().0].unwrap();
|
||||
|
||||
let msm = (*ctx.module)
|
||||
|
@ -113,7 +113,7 @@ struct Run {
|
||||
loader: Option<LoaderName>,
|
||||
|
||||
/// Path to previously saved instance image to resume.
|
||||
#[cfg(feature = "backend-singlepass")]
|
||||
#[cfg(any(feature = "backend-singlepass", feature = "backend-llvm"))]
|
||||
#[structopt(long = "resume")]
|
||||
resume: Option<String>,
|
||||
|
||||
@ -512,10 +512,10 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
|
||||
|
||||
let start: Func<(), ()> = instance.func("_start").map_err(|e| format!("{:?}", e))?;
|
||||
|
||||
#[cfg(feature = "backend-singlepass")]
|
||||
#[cfg(any(feature = "backend-singlepass", feature = "backend-llvm"))]
|
||||
unsafe {
|
||||
if options.backend == Backend::Singlepass {
|
||||
use wasmer_runtime_core::fault::{catch_unsafe_unwind, ensure_sighandler};
|
||||
if options.backend == Backend::Singlepass || options.backend == Backend::LLVM {
|
||||
use wasmer_runtime_core::fault::{catch_unsafe_unwind, ensure_sighandler, with_ctx};
|
||||
use wasmer_runtime_core::state::{
|
||||
x64::invoke_call_return_on_stack, InstanceImage,
|
||||
};
|
||||
@ -537,7 +537,9 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
|
||||
let breakpoints = instance.module.runnable_module.get_breakpoints();
|
||||
|
||||
loop {
|
||||
let ret = if let Some(image) = image.take() {
|
||||
let ctx = instance.context_mut() as *mut _;
|
||||
let ret = with_ctx(ctx, || {
|
||||
if let Some(image) = image.take() {
|
||||
let msm = instance
|
||||
.module
|
||||
.runnable_module
|
||||
@ -559,7 +561,8 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
|
||||
|| start_raw(instance.context_mut()),
|
||||
breakpoints.clone(),
|
||||
)
|
||||
};
|
||||
}
|
||||
});
|
||||
if let Err(e) = ret {
|
||||
if let Some(new_image) = e.downcast_ref::<InstanceImage>() {
|
||||
let op = interactive_shell(InteractiveShellContext {
|
||||
@ -619,18 +622,18 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(feature = "backend-singlepass")]
|
||||
#[cfg(any(feature = "backend-singlepass", feature = "backend-llvm"))]
|
||||
struct InteractiveShellContext {
|
||||
image: Option<wasmer_runtime_core::state::InstanceImage>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "backend-singlepass")]
|
||||
#[cfg(any(feature = "backend-singlepass", feature = "backend-llvm"))]
|
||||
#[derive(Debug)]
|
||||
enum ShellExitOperation {
|
||||
ContinueWith(wasmer_runtime_core::state::InstanceImage),
|
||||
}
|
||||
|
||||
#[cfg(feature = "backend-singlepass")]
|
||||
#[cfg(any(feature = "backend-singlepass", feature = "backend-llvm"))]
|
||||
fn interactive_shell(mut ctx: InteractiveShellContext) -> ShellExitOperation {
|
||||
use std::io::Write;
|
||||
|
||||
|
Reference in New Issue
Block a user