mirror of
https://github.com/fluencelabs/wasmer
synced 2025-06-16 10:21:22 +00:00
Fix a few issues from PR comments.
This commit is contained in:
@ -70,6 +70,8 @@ extern "C" {
|
||||
) -> bool;
|
||||
}
|
||||
|
||||
static SIGNAL_HANDLER_INSTALLED: Once = Once::new();
|
||||
|
||||
fn get_callbacks() -> Callbacks {
|
||||
extern "C" fn alloc_memory(
|
||||
size: usize,
|
||||
@ -218,16 +220,6 @@ impl LLVMBackend {
|
||||
)
|
||||
};
|
||||
|
||||
// Uncomment this to make spectests pass.
|
||||
// TODO: fix this
|
||||
/*
|
||||
|
||||
static SIGNAL_HANDLER_INSTALLED: Once = Once::new();
|
||||
|
||||
SIGNAL_HANDLER_INSTALLED.call_once(|| unsafe {
|
||||
crate::platform::install_signal_handler();
|
||||
});*/
|
||||
|
||||
if res != LLVMResult::OK {
|
||||
panic!("failed to load object")
|
||||
}
|
||||
@ -235,7 +227,7 @@ impl LLVMBackend {
|
||||
let buffer = Arc::new(Buffer::LlvmMemory(memory_buffer));
|
||||
|
||||
let raw_stackmap = unsafe {
|
||||
::std::slice::from_raw_parts(
|
||||
std::slice::from_raw_parts(
|
||||
llvm_backend_get_stack_map_ptr(module),
|
||||
llvm_backend_get_stack_map_size(module),
|
||||
)
|
||||
@ -281,8 +273,8 @@ impl LLVMBackend {
|
||||
|
||||
let mut map_records: BTreeMap<usize, &StkMapRecord> = BTreeMap::new();
|
||||
|
||||
for r in &map.stk_map_records {
|
||||
map_records.insert(r.patchpoint_id as usize, r);
|
||||
for record in &map.stk_map_records {
|
||||
map_records.insert(record.patchpoint_id as usize, record);
|
||||
}
|
||||
|
||||
for ((start_id, start_entry), (end_id, end_entry)) in stackmaps
|
||||
@ -314,7 +306,7 @@ impl LLVMBackend {
|
||||
&mut msm,
|
||||
);
|
||||
} else {
|
||||
// TODO: optimized out?
|
||||
// The record is optimized out.
|
||||
}
|
||||
}
|
||||
|
||||
@ -329,8 +321,6 @@ impl LLVMBackend {
|
||||
})
|
||||
.collect();
|
||||
|
||||
//println!("MSM: {:?}", msm);
|
||||
|
||||
(
|
||||
Self {
|
||||
module,
|
||||
@ -341,7 +331,7 @@ impl LLVMBackend {
|
||||
LLVMCache { buffer },
|
||||
)
|
||||
} else {
|
||||
eprintln!("WARNING: No stack map");
|
||||
// This module contains no functions so no stackmaps.
|
||||
(
|
||||
Self {
|
||||
module,
|
||||
@ -366,8 +356,6 @@ impl LLVMBackend {
|
||||
return Err("failed to load object".to_string());
|
||||
}
|
||||
|
||||
static SIGNAL_HANDLER_INSTALLED: Once = Once::new();
|
||||
|
||||
SIGNAL_HANDLER_INSTALLED.call_once(|| {
|
||||
crate::platform::install_signal_handler();
|
||||
});
|
||||
@ -431,12 +419,16 @@ impl RunnableModule for LLVMBackend {
|
||||
mem::transmute(symbol)
|
||||
};
|
||||
|
||||
SIGNAL_HANDLER_INSTALLED.call_once(|| unsafe {
|
||||
crate::platform::install_signal_handler();
|
||||
});
|
||||
|
||||
Some(unsafe { Wasm::from_raw_parts(trampoline, invoke_trampoline, None) })
|
||||
}
|
||||
|
||||
fn get_code(&self) -> Option<&[u8]> {
|
||||
Some(unsafe {
|
||||
::std::slice::from_raw_parts(
|
||||
std::slice::from_raw_parts(
|
||||
llvm_backend_get_code_ptr(self.module),
|
||||
llvm_backend_get_code_size(self.module),
|
||||
)
|
||||
|
@ -185,12 +185,12 @@ impl Drop for CodeMemory {
|
||||
impl Deref for CodeMemory {
|
||||
type Target = [u8];
|
||||
fn deref(&self) -> &[u8] {
|
||||
unsafe { ::std::slice::from_raw_parts(self.ptr, self.size) }
|
||||
unsafe { std::slice::from_raw_parts(self.ptr, self.size) }
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for CodeMemory {
|
||||
fn deref_mut(&mut self) -> &mut [u8] {
|
||||
unsafe { ::std::slice::from_raw_parts_mut(self.ptr, self.size) }
|
||||
unsafe { std::slice::from_raw_parts_mut(self.ptr, self.size) }
|
||||
}
|
||||
}
|
||||
|
@ -154,7 +154,6 @@ impl ModuleStateMap {
|
||||
self.lookup_ip(ip, base, |fsm| &fsm.call_offsets)
|
||||
}
|
||||
|
||||
#[warn(dead_code)]
|
||||
pub fn lookup_trappable_ip(
|
||||
&self,
|
||||
ip: usize,
|
||||
@ -163,7 +162,6 @@ impl ModuleStateMap {
|
||||
self.lookup_ip(ip, base, |fsm| &fsm.trappable_offsets)
|
||||
}
|
||||
|
||||
#[warn(dead_code)]
|
||||
pub fn lookup_loop_ip(
|
||||
&self,
|
||||
ip: usize,
|
||||
@ -535,30 +533,30 @@ pub mod x64 {
|
||||
match inner.0 {
|
||||
MachineValue::WasmStack(x) => match state.wasm_stack[x] {
|
||||
WasmAbstractValue::Const(x) => {
|
||||
assert!(x <= ::std::u32::MAX as u64);
|
||||
assert!(x <= std::u32::MAX as u64);
|
||||
stack[stack_offset] |= x;
|
||||
}
|
||||
WasmAbstractValue::Runtime => {
|
||||
let v = f.stack[x].unwrap();
|
||||
assert!(v <= ::std::u32::MAX as u64);
|
||||
assert!(v <= std::u32::MAX as u64);
|
||||
stack[stack_offset] |= v;
|
||||
}
|
||||
},
|
||||
MachineValue::WasmLocal(x) => match fsm.locals[x] {
|
||||
WasmAbstractValue::Const(x) => {
|
||||
assert!(x <= ::std::u32::MAX as u64);
|
||||
assert!(x <= std::u32::MAX as u64);
|
||||
stack[stack_offset] |= x;
|
||||
}
|
||||
WasmAbstractValue::Runtime => {
|
||||
let v = f.locals[x].unwrap();
|
||||
assert!(v <= ::std::u32::MAX as u64);
|
||||
assert!(v <= std::u32::MAX as u64);
|
||||
stack[stack_offset] |= v;
|
||||
}
|
||||
},
|
||||
MachineValue::VmctxDeref(ref seq) => {
|
||||
stack[stack_offset] |=
|
||||
compute_vmctx_deref(vmctx as *const Ctx, seq)
|
||||
& (::std::u32::MAX as u64);
|
||||
& (std::u32::MAX as u64);
|
||||
}
|
||||
MachineValue::Undefined => {}
|
||||
_ => unimplemented!("TwoHalves.0"),
|
||||
@ -566,30 +564,30 @@ pub mod x64 {
|
||||
match inner.1 {
|
||||
MachineValue::WasmStack(x) => match state.wasm_stack[x] {
|
||||
WasmAbstractValue::Const(x) => {
|
||||
assert!(x <= ::std::u32::MAX as u64);
|
||||
assert!(x <= std::u32::MAX as u64);
|
||||
stack[stack_offset] |= x << 32;
|
||||
}
|
||||
WasmAbstractValue::Runtime => {
|
||||
let v = f.stack[x].unwrap();
|
||||
assert!(v <= ::std::u32::MAX as u64);
|
||||
assert!(v <= std::u32::MAX as u64);
|
||||
stack[stack_offset] |= v << 32;
|
||||
}
|
||||
},
|
||||
MachineValue::WasmLocal(x) => match fsm.locals[x] {
|
||||
WasmAbstractValue::Const(x) => {
|
||||
assert!(x <= ::std::u32::MAX as u64);
|
||||
assert!(x <= std::u32::MAX as u64);
|
||||
stack[stack_offset] |= x << 32;
|
||||
}
|
||||
WasmAbstractValue::Runtime => {
|
||||
let v = f.locals[x].unwrap();
|
||||
assert!(v <= ::std::u32::MAX as u64);
|
||||
assert!(v <= std::u32::MAX as u64);
|
||||
stack[stack_offset] |= v << 32;
|
||||
}
|
||||
},
|
||||
MachineValue::VmctxDeref(ref seq) => {
|
||||
stack[stack_offset] |=
|
||||
(compute_vmctx_deref(vmctx as *const Ctx, seq)
|
||||
& (::std::u32::MAX as u64))
|
||||
& (std::u32::MAX as u64))
|
||||
<< 32;
|
||||
}
|
||||
MachineValue::Undefined => {}
|
||||
@ -728,7 +726,7 @@ pub mod x64 {
|
||||
assert_eq!(vmctx.internal.memory_bound, memory.len());
|
||||
}
|
||||
|
||||
::std::slice::from_raw_parts_mut(
|
||||
std::slice::from_raw_parts_mut(
|
||||
vmctx.internal.memory_base,
|
||||
vmctx.internal.memory_bound,
|
||||
)
|
||||
@ -763,7 +761,7 @@ pub mod x64 {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
::std::slice::from_raw_parts(
|
||||
std::slice::from_raw_parts(
|
||||
vmctx.internal.memory_base,
|
||||
vmctx.internal.memory_bound,
|
||||
)
|
||||
|
@ -70,7 +70,7 @@ unsafe fn do_optimize(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn run_tiering<F: Fn(InteractiveShellContext) -> ShellExitOperation>(
|
||||
pub unsafe fn run_tiering<F: Fn(InteractiveShellContext) -> ShellExitOperation>(
|
||||
module_info: &ModuleInfo,
|
||||
wasm_binary: &[u8],
|
||||
mut resume_image: Option<InstanceImage>,
|
||||
@ -80,157 +80,155 @@ pub fn run_tiering<F: Fn(InteractiveShellContext) -> ShellExitOperation>(
|
||||
optimized_backends: Vec<Box<dyn Fn() -> Box<dyn Compiler> + Send>>,
|
||||
interactive_shell: F,
|
||||
) -> Result<(), String> {
|
||||
unsafe {
|
||||
ensure_sighandler();
|
||||
ensure_sighandler();
|
||||
|
||||
let ctx_box = Arc::new(Mutex::new(CtxWrapper(baseline.context_mut() as *mut _)));
|
||||
// Ensure that the ctx pointer's lifetime is not longer than Instance's.
|
||||
let _deferred_ctx_box_cleanup: Defer<_> = {
|
||||
let ctx_box = ctx_box.clone();
|
||||
Defer(Some(move || {
|
||||
ctx_box.lock().unwrap().0 = ::std::ptr::null_mut();
|
||||
}))
|
||||
};
|
||||
let opt_state = Arc::new(OptimizationState {
|
||||
outcome: Mutex::new(None),
|
||||
});
|
||||
let ctx_box = Arc::new(Mutex::new(CtxWrapper(baseline.context_mut() as *mut _)));
|
||||
// Ensure that the ctx pointer's lifetime is not longer than Instance's.
|
||||
let _deferred_ctx_box_cleanup: Defer<_> = {
|
||||
let ctx_box = ctx_box.clone();
|
||||
Defer(Some(move || {
|
||||
ctx_box.lock().unwrap().0 = ::std::ptr::null_mut();
|
||||
}))
|
||||
};
|
||||
let opt_state = Arc::new(OptimizationState {
|
||||
outcome: Mutex::new(None),
|
||||
});
|
||||
|
||||
{
|
||||
let wasm_binary = wasm_binary.to_vec();
|
||||
let ctx_box = ctx_box.clone();
|
||||
let opt_state = opt_state.clone();
|
||||
::std::thread::spawn(move || {
|
||||
for backend in optimized_backends {
|
||||
if !ctx_box.lock().unwrap().0.is_null() {
|
||||
do_optimize(&wasm_binary, backend(), &ctx_box, &opt_state);
|
||||
}
|
||||
{
|
||||
let wasm_binary = wasm_binary.to_vec();
|
||||
let ctx_box = ctx_box.clone();
|
||||
let opt_state = opt_state.clone();
|
||||
::std::thread::spawn(move || {
|
||||
for backend in optimized_backends {
|
||||
if !ctx_box.lock().unwrap().0.is_null() {
|
||||
do_optimize(&wasm_binary, backend(), &ctx_box, &opt_state);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
let mut optimized_instances: Vec<Instance> = vec![];
|
||||
|
||||
push_code_version(CodeVersion {
|
||||
baseline: true,
|
||||
msm: baseline
|
||||
.module
|
||||
.runnable_module
|
||||
.get_module_state_map()
|
||||
.unwrap(),
|
||||
base: baseline.module.runnable_module.get_code().unwrap().as_ptr() as usize,
|
||||
});
|
||||
let n_versions: Cell<usize> = Cell::new(1);
|
||||
|
||||
let _deferred_pop_versions = Defer(Some(|| {
|
||||
for _ in 0..n_versions.get() {
|
||||
pop_code_version().unwrap();
|
||||
}
|
||||
}));
|
||||
|
||||
let mut optimized_instances: Vec<Instance> = vec![];
|
||||
|
||||
push_code_version(CodeVersion {
|
||||
baseline: true,
|
||||
msm: baseline
|
||||
loop {
|
||||
let new_optimized: Option<&mut Instance> = {
|
||||
let mut outcome = opt_state.outcome.lock().unwrap();
|
||||
if let Some(x) = outcome.take() {
|
||||
let instance = x
|
||||
.module
|
||||
.instantiate(&import_object)
|
||||
.map_err(|e| format!("Can't instantiate module: {:?}", e))?;
|
||||
// Keep the optimized code alive.
|
||||
optimized_instances.push(instance);
|
||||
optimized_instances.last_mut()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
if let Some(optimized) = new_optimized {
|
||||
let base = module_info.imported_functions.len();
|
||||
let code_ptr = optimized
|
||||
.module
|
||||
.runnable_module
|
||||
.get_module_state_map()
|
||||
.unwrap(),
|
||||
base: baseline.module.runnable_module.get_code().unwrap().as_ptr() as usize,
|
||||
});
|
||||
let n_versions: Cell<usize> = Cell::new(1);
|
||||
|
||||
let _deferred_pop_versions = Defer(Some(|| {
|
||||
for _ in 0..n_versions.get() {
|
||||
pop_code_version().unwrap();
|
||||
.get_code()
|
||||
.unwrap()
|
||||
.as_ptr() as usize;
|
||||
let target_addresses: Vec<usize> = optimized
|
||||
.module
|
||||
.runnable_module
|
||||
.get_local_function_offsets()
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|x| code_ptr + x)
|
||||
.collect();
|
||||
assert_eq!(target_addresses.len(), module_info.func_assoc.len() - base);
|
||||
for i in base..module_info.func_assoc.len() {
|
||||
baseline
|
||||
.module
|
||||
.runnable_module
|
||||
.patch_local_function(i - base, target_addresses[i - base]);
|
||||
}
|
||||
}));
|
||||
|
||||
loop {
|
||||
let new_optimized: Option<&mut Instance> = {
|
||||
let mut outcome = opt_state.outcome.lock().unwrap();
|
||||
if let Some(x) = outcome.take() {
|
||||
let instance = x
|
||||
.module
|
||||
.instantiate(&import_object)
|
||||
.map_err(|e| format!("Can't instantiate module: {:?}", e))?;
|
||||
// Keep the optimized code alive.
|
||||
optimized_instances.push(instance);
|
||||
optimized_instances.last_mut()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
if let Some(optimized) = new_optimized {
|
||||
let base = module_info.imported_functions.len();
|
||||
let code_ptr = optimized
|
||||
push_code_version(CodeVersion {
|
||||
baseline: false,
|
||||
msm: optimized
|
||||
.module
|
||||
.runnable_module
|
||||
.get_module_state_map()
|
||||
.unwrap(),
|
||||
base: optimized
|
||||
.module
|
||||
.runnable_module
|
||||
.get_code()
|
||||
.unwrap()
|
||||
.as_ptr() as usize;
|
||||
let target_addresses: Vec<usize> = optimized
|
||||
.as_ptr() as usize,
|
||||
});
|
||||
n_versions.set(n_versions.get() + 1);
|
||||
|
||||
baseline.context_mut().local_functions = optimized.context_mut().local_functions;
|
||||
}
|
||||
// Assuming we do not want to do breakpoint-based debugging on optimized backends.
|
||||
let breakpoints = baseline.module.runnable_module.get_breakpoints();
|
||||
let ctx = baseline.context_mut() as *mut _;
|
||||
let ret = with_ctx(ctx, || {
|
||||
if let Some(image) = resume_image.take() {
|
||||
let msm = baseline
|
||||
.module
|
||||
.runnable_module
|
||||
.get_local_function_offsets()
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|x| code_ptr + x)
|
||||
.collect();
|
||||
assert_eq!(target_addresses.len(), module_info.func_assoc.len() - base);
|
||||
for i in base..module_info.func_assoc.len() {
|
||||
baseline
|
||||
.module
|
||||
.runnable_module
|
||||
.patch_local_function(i - base, target_addresses[i - base]);
|
||||
}
|
||||
|
||||
push_code_version(CodeVersion {
|
||||
baseline: false,
|
||||
msm: optimized
|
||||
.module
|
||||
.runnable_module
|
||||
.get_module_state_map()
|
||||
.unwrap(),
|
||||
base: optimized
|
||||
.module
|
||||
.runnable_module
|
||||
.get_code()
|
||||
.unwrap()
|
||||
.as_ptr() as usize,
|
||||
});
|
||||
n_versions.set(n_versions.get() + 1);
|
||||
|
||||
baseline.context_mut().local_functions = optimized.context_mut().local_functions;
|
||||
.get_module_state_map()
|
||||
.unwrap();
|
||||
let code_base =
|
||||
baseline.module.runnable_module.get_code().unwrap().as_ptr() as usize;
|
||||
invoke_call_return_on_stack(
|
||||
&msm,
|
||||
code_base,
|
||||
image,
|
||||
baseline.context_mut(),
|
||||
breakpoints.clone(),
|
||||
)
|
||||
.map(|_| ())
|
||||
} else {
|
||||
catch_unsafe_unwind(|| start_raw(baseline.context_mut()), breakpoints.clone())
|
||||
}
|
||||
// Assuming we do not want to do breakpoint-based debugging on optimized backends.
|
||||
let breakpoints = baseline.module.runnable_module.get_breakpoints();
|
||||
let ctx = baseline.context_mut() as *mut _;
|
||||
let ret = with_ctx(ctx, || {
|
||||
if let Some(image) = resume_image.take() {
|
||||
let msm = baseline
|
||||
.module
|
||||
.runnable_module
|
||||
.get_module_state_map()
|
||||
.unwrap();
|
||||
let code_base =
|
||||
baseline.module.runnable_module.get_code().unwrap().as_ptr() as usize;
|
||||
invoke_call_return_on_stack(
|
||||
&msm,
|
||||
code_base,
|
||||
image,
|
||||
baseline.context_mut(),
|
||||
breakpoints.clone(),
|
||||
)
|
||||
.map(|_| ())
|
||||
} else {
|
||||
catch_unsafe_unwind(|| start_raw(baseline.context_mut()), breakpoints.clone())
|
||||
});
|
||||
if let Err(e) = ret {
|
||||
if let Ok(new_image) = e.downcast::<InstanceImage>() {
|
||||
// Tier switch event
|
||||
if !was_sigint_triggered_fault() && opt_state.outcome.lock().unwrap().is_some()
|
||||
{
|
||||
resume_image = Some(*new_image);
|
||||
continue;
|
||||
}
|
||||
});
|
||||
if let Err(e) = ret {
|
||||
if let Ok(new_image) = e.downcast::<InstanceImage>() {
|
||||
// Tier switch event
|
||||
if !was_sigint_triggered_fault() && opt_state.outcome.lock().unwrap().is_some()
|
||||
{
|
||||
resume_image = Some(*new_image);
|
||||
continue;
|
||||
let op = interactive_shell(InteractiveShellContext {
|
||||
image: Some(*new_image),
|
||||
patched: n_versions.get() > 1,
|
||||
});
|
||||
match op {
|
||||
ShellExitOperation::ContinueWith(new_image) => {
|
||||
resume_image = Some(new_image);
|
||||
}
|
||||
let op = interactive_shell(InteractiveShellContext {
|
||||
image: Some(*new_image),
|
||||
patched: n_versions.get() > 1,
|
||||
});
|
||||
match op {
|
||||
ShellExitOperation::ContinueWith(new_image) => {
|
||||
resume_image = Some(new_image);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return Err("Error while executing WebAssembly".into());
|
||||
}
|
||||
} else {
|
||||
return Ok(());
|
||||
return Err("Error while executing WebAssembly".into());
|
||||
}
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -277,7 +277,7 @@ impl RunnableModule for X64ExecutionContext {
|
||||
let execution_context =
|
||||
::std::mem::transmute_copy::<&dyn RunnableModule, &X64ExecutionContext>(&&**rm);
|
||||
|
||||
let args = ::std::slice::from_raw_parts(
|
||||
let args = std::slice::from_raw_parts(
|
||||
args,
|
||||
num_params_plus_one.unwrap().as_ptr() as usize - 1,
|
||||
);
|
||||
@ -1690,7 +1690,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
||||
let start_label = a.get_label();
|
||||
// skip the patchpoint during normal execution
|
||||
a.emit_jmp(Condition::None, start_label);
|
||||
// patchpoint of 32 bytes
|
||||
// patchpoint of 32 1-byte nops
|
||||
for _ in 0..32 {
|
||||
a.emit_nop();
|
||||
}
|
||||
|
@ -587,7 +587,7 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
|
||||
let start_raw: extern "C" fn(&mut wasmer_runtime_core::vm::Ctx) =
|
||||
unsafe { ::std::mem::transmute(start.get_vm_func()) };
|
||||
|
||||
run_tiering(
|
||||
unsafe { run_tiering(
|
||||
module.info(),
|
||||
&wasm_binary,
|
||||
if let Some(ref path) = options.resume {
|
||||
@ -612,7 +612,7 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
|
||||
})
|
||||
.collect(),
|
||||
interactive_shell,
|
||||
)?;
|
||||
)? };
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "managed"))]
|
||||
|
Reference in New Issue
Block a user