Swap code lazily when tiering up from singlepass to LLVM.

Does not handle long-running functions, but should work at least.
This commit is contained in:
losfair
2019-08-09 04:26:17 +08:00
parent 0d604b754b
commit c1619026d5
10 changed files with 350 additions and 94 deletions

View File

@ -239,6 +239,7 @@ pub struct LLVMBackend {
#[allow(dead_code)]
buffer: Arc<Buffer>,
msm: Option<ModuleStateMap>,
local_func_id_to_offset: Vec<usize>,
}
impl LLVMBackend {
@ -380,6 +381,17 @@ impl LLVMBackend {
}
}
let code_ptr = unsafe { llvm_backend_get_code_ptr(module) } as usize;
let code_len = unsafe { llvm_backend_get_code_size(module) } as usize;
let local_func_id_to_offset: Vec<usize> = local_func_id_to_addr
.iter()
.map(|&x| {
assert!(x >= code_ptr && x < code_ptr + code_len);
x - code_ptr
})
.collect();
//println!("MSM: {:?}", msm);
(
@ -387,6 +399,7 @@ impl LLVMBackend {
module,
buffer: Arc::clone(&buffer),
msm: Some(msm),
local_func_id_to_offset,
},
LLVMCache { buffer },
)
@ -397,6 +410,7 @@ impl LLVMBackend {
module,
buffer: Arc::clone(&buffer),
msm: None,
local_func_id_to_offset: vec![],
},
LLVMCache { buffer },
)
@ -428,6 +442,7 @@ impl LLVMBackend {
module,
buffer: Arc::clone(&buffer),
msm: None,
local_func_id_to_offset: vec![],
},
LLVMCache { buffer },
))
@ -491,6 +506,10 @@ impl RunnableModule for LLVMBackend {
})
}
fn get_local_function_offsets(&self) -> Option<Vec<usize>> {
Some(self.local_func_id_to_offset.clone())
}
fn get_module_state_map(&self) -> Option<ModuleStateMap> {
self.msm.clone()
}