Don't emit bounds checks when the offset is less than the minimum memory size.

This commit is contained in:
Nick Lewycky
2019-10-28 15:53:26 -07:00
parent d46e5d499c
commit 9224db6d1e
3 changed files with 170 additions and 125 deletions

View File

@ -561,18 +561,25 @@ fn resolve_memory_ptr(
) -> Result<PointerValue, BinaryReaderError> {
// Look up the memory base (as pointer) and bounds (as unsigned integer).
let memory_cache = ctx.memory(MemoryIndex::new(0), intrinsics);
let (mem_base, mem_bound) = match memory_cache {
let (mem_base, mem_bound, minimum, _maximum) = match memory_cache {
MemoryCache::Dynamic {
ptr_to_base_ptr,
ptr_to_bounds,
minimum,
maximum,
} => {
let base = builder
.build_load(ptr_to_base_ptr, "base")
.into_pointer_value();
let bounds = builder.build_load(ptr_to_bounds, "bounds").into_int_value();
(base, bounds)
(base, bounds, minimum, maximum)
}
MemoryCache::Static { base_ptr, bounds } => (base_ptr, bounds),
MemoryCache::Static {
base_ptr,
bounds,
minimum,
maximum,
} => (base_ptr, bounds, minimum, maximum),
};
let mem_base = builder
.build_bitcast(mem_base, intrinsics.i8_ptr_ty, &state.var_name())
@ -587,48 +594,72 @@ fn resolve_memory_ptr(
if let MemoryCache::Dynamic { .. } = memory_cache {
// If the memory is dynamic, do a bounds check. For static we rely on
// the size being a multiple of the page size and hitting a reserved
// but unreadable memory.
// the size being a multiple of the page size and hitting a guard page.
let value_size_v = intrinsics.i64_ty.const_int(value_size as u64, false);
let load_offset_end =
builder.build_int_add(effective_offset, value_size_v, &state.var_name());
let ptr_in_bounds = if effective_offset.is_const() {
let load_offset_end = effective_offset.const_add(value_size_v);
let ptr_in_bounds = load_offset_end.const_int_compare(
IntPredicate::ULE,
intrinsics.i64_ty.const_int(minimum.bytes().0 as u64, false),
);
if ptr_in_bounds.is_constant_int() {
Some(ptr_in_bounds)
} else {
None
}
} else {
None
}
.unwrap_or_else(|| {
let load_offset_end =
builder.build_int_add(effective_offset, value_size_v, &state.var_name());
let ptr_in_bounds = builder.build_int_compare(
IntPredicate::ULE,
load_offset_end,
mem_bound,
&state.var_name(),
);
let ptr_in_bounds = builder
.build_call(
intrinsics.expect_i1,
&[
ptr_in_bounds.as_basic_value_enum(),
intrinsics.i1_ty.const_int(1, false).as_basic_value_enum(),
],
"ptr_in_bounds_expect",
builder.build_int_compare(
IntPredicate::ULE,
load_offset_end,
mem_bound,
&state.var_name(),
)
.try_as_basic_value()
.left()
.unwrap()
.into_int_value();
});
if !ptr_in_bounds.is_constant_int()
|| ptr_in_bounds.get_zero_extended_constant().unwrap() != 1
{
// LLVM may have folded this into 'i1 true' in which case we know
// the pointer is in bounds. LLVM may also have folded it into a
// constant expression, not known to be either true or false yet.
// If it's false, unknown-but-constant, or not-a-constant, emit a
// runtime bounds check. LLVM may yet succeed at optimizing it away.
let ptr_in_bounds = builder
.build_call(
intrinsics.expect_i1,
&[
ptr_in_bounds.as_basic_value_enum(),
intrinsics.i1_ty.const_int(1, false).as_basic_value_enum(),
],
"ptr_in_bounds_expect",
)
.try_as_basic_value()
.left()
.unwrap()
.into_int_value();
let in_bounds_continue_block =
context.append_basic_block(function, "in_bounds_continue_block");
let not_in_bounds_block = context.append_basic_block(function, "not_in_bounds_block");
builder.build_conditional_branch(
ptr_in_bounds,
&in_bounds_continue_block,
&not_in_bounds_block,
);
builder.position_at_end(&not_in_bounds_block);
builder.build_call(
intrinsics.throw_trap,
&[intrinsics.trap_memory_oob],
"throw",
);
builder.build_unreachable();
builder.position_at_end(&in_bounds_continue_block);
let in_bounds_continue_block =
context.append_basic_block(function, "in_bounds_continue_block");
let not_in_bounds_block = context.append_basic_block(function, "not_in_bounds_block");
builder.build_conditional_branch(
ptr_in_bounds,
&in_bounds_continue_block,
&not_in_bounds_block,
);
builder.position_at_end(&not_in_bounds_block);
builder.build_call(
intrinsics.throw_trap,
&[intrinsics.trap_memory_oob],
"throw",
);
builder.build_unreachable();
builder.position_at_end(&in_bounds_continue_block);
}
}
let ptr = unsafe { builder.build_gep(mem_base, &[effective_offset], &state.var_name()) };

View File

@ -21,6 +21,7 @@ use wasmer_runtime_core::{
GlobalIndex, ImportedFuncIndex, LocalFuncIndex, LocalOrImport, MemoryIndex, SigIndex,
TableIndex, Type,
},
units::Pages,
vm::{Ctx, INTERNALS_SIZE},
};
@ -559,11 +560,15 @@ pub enum MemoryCache {
Dynamic {
ptr_to_base_ptr: PointerValue,
ptr_to_bounds: PointerValue,
minimum: Pages,
maximum: Option<Pages>,
},
/// The memory is always in the same place.
Static {
base_ptr: PointerValue,
bounds: IntValue,
minimum: Pages,
maximum: Option<Pages>,
},
}
@ -662,30 +667,35 @@ impl<'a> CtxType<'a> {
);
*cached_memories.entry(index).or_insert_with(|| {
let (memory_array_ptr_ptr, index, memory_type) = match index.local_or_import(info) {
LocalOrImport::Local(local_mem_index) => (
unsafe {
cache_builder.build_struct_gep(
ctx_ptr_value,
offset_to_index(Ctx::offset_memories()),
"memory_array_ptr_ptr",
)
},
local_mem_index.index() as u64,
info.memories[local_mem_index].memory_type(),
),
LocalOrImport::Import(import_mem_index) => (
unsafe {
cache_builder.build_struct_gep(
ctx_ptr_value,
offset_to_index(Ctx::offset_imported_memories()),
"memory_array_ptr_ptr",
)
},
import_mem_index.index() as u64,
info.imported_memories[import_mem_index].1.memory_type(),
),
};
let (memory_array_ptr_ptr, index, memory_type, minimum, maximum) =
match index.local_or_import(info) {
LocalOrImport::Local(local_mem_index) => (
unsafe {
cache_builder.build_struct_gep(
ctx_ptr_value,
offset_to_index(Ctx::offset_memories()),
"memory_array_ptr_ptr",
)
},
local_mem_index.index() as u64,
info.memories[local_mem_index].memory_type(),
info.memories[local_mem_index].minimum,
info.memories[local_mem_index].maximum,
),
LocalOrImport::Import(import_mem_index) => (
unsafe {
cache_builder.build_struct_gep(
ctx_ptr_value,
offset_to_index(Ctx::offset_imported_memories()),
"memory_array_ptr_ptr",
)
},
import_mem_index.index() as u64,
info.imported_memories[import_mem_index].1.memory_type(),
info.imported_memories[import_mem_index].1.minimum,
info.imported_memories[import_mem_index].1.maximum,
),
};
let memory_array_ptr = cache_builder
.build_load(memory_array_ptr_ptr, "memory_array_ptr")
@ -713,6 +723,8 @@ impl<'a> CtxType<'a> {
MemoryType::Dynamic => MemoryCache::Dynamic {
ptr_to_base_ptr,
ptr_to_bounds,
minimum,
maximum,
},
MemoryType::Static | MemoryType::SharedStatic => MemoryCache::Static {
base_ptr: cache_builder
@ -721,6 +733,8 @@ impl<'a> CtxType<'a> {
bounds: cache_builder
.build_load(ptr_to_bounds, "bounds")
.into_int_value(),
minimum,
maximum,
},
}
})