Merge branch 'master' into feature/clif-cgapi

This commit is contained in:
Brandon Fish
2019-05-26 12:04:45 -05:00
132 changed files with 11302 additions and 1771 deletions

View File

@ -1,11 +1,11 @@
[package]
name = "wasmer-llvm-backend"
version = "0.4.1"
version = "0.4.2"
authors = ["Lachlan Sneff <lachlan.sneff@gmail.com>"]
edition = "2018"
[dependencies]
wasmer-runtime-core = { path = "../runtime-core", version = "0.4.1" }
wasmer-runtime-core = { path = "../runtime-core", version = "0.4.2" }
inkwell = { git = "https://github.com/wasmerio/inkwell", branch = "llvm7-0" }
wasmparser = "0.29.2"
hashbrown = "0.1.8"

View File

@ -285,13 +285,16 @@ fn resolve_memory_ptr(
ctx: &mut CtxType,
memarg: &MemoryImmediate,
ptr_ty: PointerType,
value_size: usize,
) -> Result<PointerValue, BinaryReaderError> {
// Ignore alignment hint for the time being.
let imm_offset = intrinsics.i64_ty.const_int(memarg.offset as u64, false);
let value_size_v = intrinsics.i64_ty.const_int(value_size as u64, false);
let var_offset_i32 = state.pop1()?.into_int_value();
let var_offset =
builder.build_int_z_extend(var_offset_i32, intrinsics.i64_ty, &state.var_name());
let effective_offset = builder.build_int_add(var_offset, imm_offset, &state.var_name());
let end_offset = builder.build_int_add(effective_offset, value_size_v, &state.var_name());
let memory_cache = ctx.memory(MemoryIndex::new(0), intrinsics);
let mem_base_int = match memory_cache {
@ -306,12 +309,20 @@ fn resolve_memory_ptr(
let base_as_int = builder.build_ptr_to_int(base, intrinsics.i64_ty, "base_as_int");
let base_in_bounds = builder.build_int_compare(
let base_in_bounds_1 = builder.build_int_compare(
IntPredicate::ULE,
end_offset,
bounds,
"base_in_bounds_1",
);
let base_in_bounds_2 = builder.build_int_compare(
IntPredicate::ULT,
effective_offset,
bounds,
"base_in_bounds",
end_offset,
"base_in_bounds_2",
);
let base_in_bounds =
builder.build_and(base_in_bounds_1, base_in_bounds_2, "base_in_bounds");
let base_in_bounds = builder
.build_call(
@ -2000,6 +2011,7 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
&mut ctx,
memarg,
intrinsics.i32_ptr_ty,
4,
)?;
let result = builder.build_load(effective_address, &state.var_name());
state.push1(result);
@ -2014,6 +2026,7 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
&mut ctx,
memarg,
intrinsics.i64_ptr_ty,
8,
)?;
let result = builder.build_load(effective_address, &state.var_name());
state.push1(result);
@ -2028,6 +2041,7 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
&mut ctx,
memarg,
intrinsics.f32_ptr_ty,
4,
)?;
let result = builder.build_load(effective_address, &state.var_name());
state.push1(result);
@ -2042,6 +2056,7 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
&mut ctx,
memarg,
intrinsics.f64_ptr_ty,
8,
)?;
let result = builder.build_load(effective_address, &state.var_name());
state.push1(result);
@ -2058,6 +2073,7 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
&mut ctx,
memarg,
intrinsics.i32_ptr_ty,
4,
)?;
builder.build_store(effective_address, value);
}
@ -2072,6 +2088,7 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
&mut ctx,
memarg,
intrinsics.i64_ptr_ty,
8,
)?;
builder.build_store(effective_address, value);
}
@ -2086,6 +2103,7 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
&mut ctx,
memarg,
intrinsics.f32_ptr_ty,
4,
)?;
builder.build_store(effective_address, value);
}
@ -2100,6 +2118,7 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
&mut ctx,
memarg,
intrinsics.f64_ptr_ty,
8,
)?;
builder.build_store(effective_address, value);
}
@ -2114,6 +2133,7 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
&mut ctx,
memarg,
intrinsics.i8_ptr_ty,
1,
)?;
let narrow_result = builder
.build_load(effective_address, &state.var_name())
@ -2132,6 +2152,7 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
&mut ctx,
memarg,
intrinsics.i16_ptr_ty,
2,
)?;
let narrow_result = builder
.build_load(effective_address, &state.var_name())
@ -2150,6 +2171,7 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
&mut ctx,
memarg,
intrinsics.i8_ptr_ty,
1,
)?;
let narrow_result = builder
.build_load(effective_address, &state.var_name())
@ -2168,6 +2190,7 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
&mut ctx,
memarg,
intrinsics.i16_ptr_ty,
2,
)?;
let narrow_result = builder
.build_load(effective_address, &state.var_name())
@ -2186,6 +2209,7 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
&mut ctx,
memarg,
intrinsics.i32_ptr_ty,
4,
)?;
let narrow_result = builder
.build_load(effective_address, &state.var_name())
@ -2205,6 +2229,7 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
&mut ctx,
memarg,
intrinsics.i8_ptr_ty,
1,
)?;
let narrow_result = builder
.build_load(effective_address, &state.var_name())
@ -2223,6 +2248,7 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
&mut ctx,
memarg,
intrinsics.i16_ptr_ty,
2,
)?;
let narrow_result = builder
.build_load(effective_address, &state.var_name())
@ -2241,6 +2267,7 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
&mut ctx,
memarg,
intrinsics.i8_ptr_ty,
1,
)?;
let narrow_result = builder
.build_load(effective_address, &state.var_name())
@ -2259,6 +2286,7 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
&mut ctx,
memarg,
intrinsics.i16_ptr_ty,
2,
)?;
let narrow_result = builder
.build_load(effective_address, &state.var_name())
@ -2277,6 +2305,7 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
&mut ctx,
memarg,
intrinsics.i32_ptr_ty,
4,
)?;
let narrow_result = builder
.build_load(effective_address, &state.var_name())
@ -2297,6 +2326,7 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
&mut ctx,
memarg,
intrinsics.i8_ptr_ty,
1,
)?;
let narrow_value =
builder.build_int_truncate(value, intrinsics.i8_ty, &state.var_name());
@ -2313,6 +2343,7 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
&mut ctx,
memarg,
intrinsics.i16_ptr_ty,
2,
)?;
let narrow_value =
builder.build_int_truncate(value, intrinsics.i16_ty, &state.var_name());
@ -2329,6 +2360,7 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
&mut ctx,
memarg,
intrinsics.i32_ptr_ty,
4,
)?;
let narrow_value =
builder.build_int_truncate(value, intrinsics.i32_ty, &state.var_name());

View File

@ -16,6 +16,7 @@ use wasmer_runtime_core::{
GlobalIndex, ImportedFuncIndex, LocalFuncIndex, LocalOrImport, MemoryIndex, SigIndex,
TableIndex, Type,
},
vm::Ctx,
};
fn type_to_llvm_ptr(intrinsics: &Intrinsics, ty: Type) -> PointerType {
@ -158,6 +159,10 @@ impl Intrinsics {
let imported_func_ty =
context.struct_type(&[i8_ptr_ty_basic, ctx_ptr_ty.as_basic_type_enum()], false);
let sigindex_ty = i32_ty;
let rt_intrinsics_ty = void_ty;
let stack_lower_bound_ty = i8_ty;
let memory_base_ty = i8_ty;
let memory_bound_ty = void_ty;
let local_function_ty = i8_ptr_ty;
let anyfunc_ty = context.struct_type(
@ -201,6 +206,18 @@ impl Intrinsics {
sigindex_ty
.ptr_type(AddressSpace::Generic)
.as_basic_type_enum(),
rt_intrinsics_ty
.ptr_type(AddressSpace::Generic)
.as_basic_type_enum(),
stack_lower_bound_ty
.ptr_type(AddressSpace::Generic)
.as_basic_type_enum(),
memory_base_ty
.ptr_type(AddressSpace::Generic)
.as_basic_type_enum(),
memory_bound_ty
.ptr_type(AddressSpace::Generic)
.as_basic_type_enum(),
local_function_ty
.ptr_type(AddressSpace::Generic)
.as_basic_type_enum(),
@ -416,6 +433,10 @@ pub struct CtxType<'a> {
_phantom: PhantomData<&'a FunctionValue>,
}
fn offset_to_index(offset: u8) -> u32 {
(offset as usize / ::std::mem::size_of::<usize>()) as u32
}
impl<'a> CtxType<'a> {
pub fn new(
info: &'a ModuleInfo,
@ -454,14 +475,22 @@ impl<'a> CtxType<'a> {
let (memory_array_ptr_ptr, index, memory_type) = match index.local_or_import(info) {
LocalOrImport::Local(local_mem_index) => (
unsafe {
cache_builder.build_struct_gep(ctx_ptr_value, 0, "memory_array_ptr_ptr")
cache_builder.build_struct_gep(
ctx_ptr_value,
offset_to_index(Ctx::offset_memories()),
"memory_array_ptr_ptr",
)
},
local_mem_index.index() as u64,
info.memories[local_mem_index].memory_type(),
),
LocalOrImport::Import(import_mem_index) => (
unsafe {
cache_builder.build_struct_gep(ctx_ptr_value, 3, "memory_array_ptr_ptr")
cache_builder.build_struct_gep(
ctx_ptr_value,
offset_to_index(Ctx::offset_imported_memories()),
"memory_array_ptr_ptr",
)
},
import_mem_index.index() as u64,
info.imported_memories[import_mem_index].1.memory_type(),
@ -527,13 +556,21 @@ impl<'a> CtxType<'a> {
let (table_array_ptr_ptr, index) = match index.local_or_import(info) {
LocalOrImport::Local(local_table_index) => (
unsafe {
cache_builder.build_struct_gep(ctx_ptr_value, 1, "table_array_ptr_ptr")
cache_builder.build_struct_gep(
ctx_ptr_value,
offset_to_index(Ctx::offset_tables()),
"table_array_ptr_ptr",
)
},
local_table_index.index() as u64,
),
LocalOrImport::Import(import_table_index) => (
unsafe {
cache_builder.build_struct_gep(ctx_ptr_value, 4, "table_array_ptr_ptr")
cache_builder.build_struct_gep(
ctx_ptr_value,
offset_to_index(Ctx::offset_imported_tables()),
"table_array_ptr_ptr",
)
},
import_table_index.index() as u64,
),
@ -578,8 +615,13 @@ impl<'a> CtxType<'a> {
intrinsics: &Intrinsics,
builder: &Builder,
) -> PointerValue {
let local_func_array_ptr_ptr =
unsafe { builder.build_struct_gep(self.ctx_ptr_value, 8, "local_func_array_ptr_ptr") };
let local_func_array_ptr_ptr = unsafe {
builder.build_struct_gep(
self.ctx_ptr_value,
offset_to_index(Ctx::offset_local_functions()),
"local_func_array_ptr_ptr",
)
};
let local_func_array_ptr = builder
.build_load(local_func_array_ptr_ptr, "local_func_array_ptr")
.into_pointer_value();
@ -609,7 +651,11 @@ impl<'a> CtxType<'a> {
*cached_sigindices.entry(index).or_insert_with(|| {
let sigindex_array_ptr_ptr = unsafe {
cache_builder.build_struct_gep(ctx_ptr_value, 7, "sigindex_array_ptr_ptr")
cache_builder.build_struct_gep(
ctx_ptr_value,
offset_to_index(Ctx::offset_signatures()),
"sigindex_array_ptr_ptr",
)
};
let sigindex_array_ptr = cache_builder
.build_load(sigindex_array_ptr_ptr, "sigindex_array_ptr")
@ -647,7 +693,7 @@ impl<'a> CtxType<'a> {
unsafe {
cache_builder.build_struct_gep(
ctx_ptr_value,
2,
offset_to_index(Ctx::offset_globals()),
"globals_array_ptr_ptr",
)
},
@ -662,7 +708,7 @@ impl<'a> CtxType<'a> {
unsafe {
cache_builder.build_struct_gep(
ctx_ptr_value,
5,
offset_to_index(Ctx::offset_imported_globals()),
"globals_array_ptr_ptr",
)
},
@ -718,7 +764,11 @@ impl<'a> CtxType<'a> {
let imported_func_cache = cached_imported_functions.entry(index).or_insert_with(|| {
let func_array_ptr_ptr = unsafe {
cache_builder.build_struct_gep(ctx_ptr_value, 6, "imported_func_array_ptr_ptr")
cache_builder.build_struct_gep(
ctx_ptr_value,
offset_to_index(Ctx::offset_imported_funcs()),
"imported_func_array_ptr_ptr",
)
};
let func_array_ptr = cache_builder
.build_load(func_array_ptr_ptr, "func_array_ptr")