mirror of
https://github.com/fluencelabs/wasmer
synced 2025-06-23 21:51:32 +00:00
Merge branch 'master' into features/llvm-windows
This commit is contained in:
@ -115,6 +115,8 @@ jobs:
|
||||
name: Check
|
||||
command: |
|
||||
make check
|
||||
make compile-bench-singlepass
|
||||
# TODO: add compile-bench-llvm and compile-bench-clif when they work
|
||||
- run:
|
||||
name: Release
|
||||
command: make release-fast
|
||||
|
@ -6,6 +6,7 @@ Blocks of changes will separated by version increments.
|
||||
|
||||
## **[Unreleased]**
|
||||
- [#598](https://github.com/wasmerio/wasmer/pull/598) LLVM Backend is now supported in Windows
|
||||
- [#599](https://github.com/wasmerio/wasmer/pull/599) Fix llvm backend failures in fat spec tests and simd_binaryen spec test.
|
||||
- [#579](https://github.com/wasmerio/wasmer/pull/579) Fix bug in caching with LLVM and Singlepass backends.
|
||||
Add `default-backend-singlepass`, `default-backend-llvm`, and `default-backend-cranelift` features to `wasmer-runtime`
|
||||
to control the `default_compiler()` function (this is a breaking change). Add `compiler_for_backend` function in `wasmer-runtime`
|
||||
|
20
Cargo.toml
20
Cargo.toml
@ -73,9 +73,23 @@ trace = ["wasmer-runtime-core/trace"]
|
||||
extra-debug = ["wasmer-clif-backend/debug", "wasmer-runtime-core/debug"]
|
||||
# This feature will allow cargo test to run much faster
|
||||
fast-tests = []
|
||||
backend-cranelift = ["wasmer-runtime-core/backend-cranelift", "wasmer-runtime/cranelift"]
|
||||
backend-llvm = ["wasmer-llvm-backend", "wasmer-runtime-core/backend-llvm", "wasmer-runtime/llvm"]
|
||||
backend-singlepass = ["wasmer-singlepass-backend", "wasmer-runtime-core/backend-singlepass", "wasmer-runtime/singlepass"]
|
||||
backend-cranelift = [
|
||||
"wasmer-runtime-core/backend-cranelift",
|
||||
"wasmer-runtime/cranelift",
|
||||
"wasmer-middleware-common/clif"
|
||||
]
|
||||
backend-llvm = [
|
||||
"wasmer-llvm-backend",
|
||||
"wasmer-runtime-core/backend-llvm",
|
||||
"wasmer-runtime/llvm",
|
||||
"wasmer-middleware-common/llvm"
|
||||
]
|
||||
backend-singlepass = [
|
||||
"wasmer-singlepass-backend",
|
||||
"wasmer-runtime-core/backend-singlepass",
|
||||
"wasmer-runtime/singlepass",
|
||||
"wasmer-middleware-common/singlepass"
|
||||
]
|
||||
wasi = ["wasmer-wasi"]
|
||||
# vfs = ["wasmer-runtime-abi"]
|
||||
|
||||
|
16
Makefile
16
Makefile
@ -141,8 +141,20 @@ release-singlepass:
|
||||
release-llvm:
|
||||
cargo build --release --features backend-llvm
|
||||
|
||||
bench:
|
||||
cargo bench --all
|
||||
bench-singlepass:
|
||||
cargo bench --all --no-default-features --features "backend-singlepass"
|
||||
bench-clif:
|
||||
cargo bench --all --no-default-features --features "backend-clif"
|
||||
bench-llvm:
|
||||
cargo bench --all --no-default-features --features "backend-llvm"
|
||||
|
||||
# compile but don't run the benchmarks
|
||||
compile-bench-singlepass:
|
||||
cargo bench --all --no-run --no-default-features --features "backend-singlepass"
|
||||
compile-bench-clif:
|
||||
cargo bench --all --no-run --no-default-features --features "backend-clif"
|
||||
compile-bench-llvm:
|
||||
cargo bench --all --no-run --no-default-features --features "backend-llvm"
|
||||
|
||||
|
||||
# Build utils
|
||||
|
@ -220,7 +220,10 @@ Each integration can be tested separately:
|
||||
Benchmarks can be run with:
|
||||
|
||||
```sh
|
||||
make bench
|
||||
make bench-[backend]
|
||||
|
||||
# for example
|
||||
make bench-singlepass
|
||||
```
|
||||
|
||||
## Roadmap
|
||||
|
@ -209,14 +209,23 @@ fn trap_if_not_representable_as_int(
|
||||
intrinsics: &Intrinsics,
|
||||
context: &Context,
|
||||
function: &FunctionValue,
|
||||
lower_bound: f64,
|
||||
upper_bound: f64,
|
||||
lower_bound: u64, // Inclusive (not a trapping value)
|
||||
upper_bound: u64, // Inclusive (not a trapping value)
|
||||
value: FloatValue,
|
||||
) {
|
||||
let float_ty = value.get_type();
|
||||
let int_ty = if float_ty == intrinsics.f32_ty {
|
||||
intrinsics.i32_ty
|
||||
} else {
|
||||
intrinsics.i64_ty
|
||||
};
|
||||
|
||||
let lower_bound = float_ty.const_float(lower_bound);
|
||||
let upper_bound = float_ty.const_float(upper_bound);
|
||||
let lower_bound = builder
|
||||
.build_bitcast(int_ty.const_int(lower_bound, false), float_ty, "")
|
||||
.into_float_value();
|
||||
let upper_bound = builder
|
||||
.build_bitcast(int_ty.const_int(upper_bound, false), float_ty, "")
|
||||
.into_float_value();
|
||||
|
||||
// The 'U' in the float predicate is short for "unordered" which means that
|
||||
// the comparison will compare true if either operand is a NaN. Thus, NaNs
|
||||
@ -351,6 +360,45 @@ fn trap_if_zero(
|
||||
builder.position_at_end(&shouldnt_trap_block);
|
||||
}
|
||||
|
||||
// Replaces any NaN with the canonical QNaN, otherwise leaves the value alone.
|
||||
fn canonicalize_nans(
|
||||
builder: &Builder,
|
||||
intrinsics: &Intrinsics,
|
||||
value: BasicValueEnum,
|
||||
) -> BasicValueEnum {
|
||||
let f_ty = value.get_type();
|
||||
let canonicalized = if f_ty.is_vector_type() {
|
||||
let value = value.into_vector_value();
|
||||
let f_ty = f_ty.into_vector_type();
|
||||
let zero = f_ty.const_zero();
|
||||
let nan_cmp = builder.build_float_compare(FloatPredicate::UNO, value, zero, "nan");
|
||||
let canonical_qnan = f_ty
|
||||
.get_element_type()
|
||||
.into_float_type()
|
||||
.const_float(std::f64::NAN);
|
||||
let canonical_qnan = splat_vector(
|
||||
builder,
|
||||
intrinsics,
|
||||
canonical_qnan.as_basic_value_enum(),
|
||||
f_ty,
|
||||
"",
|
||||
);
|
||||
builder
|
||||
.build_select(nan_cmp, canonical_qnan, value, "")
|
||||
.as_basic_value_enum()
|
||||
} else {
|
||||
let value = value.into_float_value();
|
||||
let f_ty = f_ty.into_float_type();
|
||||
let zero = f_ty.const_zero();
|
||||
let nan_cmp = builder.build_float_compare(FloatPredicate::UNO, value, zero, "nan");
|
||||
let canonical_qnan = f_ty.const_float(std::f64::NAN);
|
||||
builder
|
||||
.build_select(nan_cmp, canonical_qnan, value, "")
|
||||
.as_basic_value_enum()
|
||||
};
|
||||
canonicalized
|
||||
}
|
||||
|
||||
fn resolve_memory_ptr(
|
||||
builder: &Builder,
|
||||
intrinsics: &Intrinsics,
|
||||
@ -1577,9 +1625,45 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
|
||||
Operator::I32RemS | Operator::I64RemS => {
|
||||
let (v1, v2) = state.pop2()?;
|
||||
let (v1, v2) = (v1.into_int_value(), v2.into_int_value());
|
||||
let int_type = v1.get_type();
|
||||
let (min_value, neg_one_value) = if int_type == intrinsics.i32_ty {
|
||||
let min_value = int_type.const_int(i32::min_value() as u64, false);
|
||||
let neg_one_value = int_type.const_int(-1i32 as u32 as u64, false);
|
||||
(min_value, neg_one_value)
|
||||
} else if int_type == intrinsics.i64_ty {
|
||||
let min_value = int_type.const_int(i64::min_value() as u64, false);
|
||||
let neg_one_value = int_type.const_int(-1i64 as u64, false);
|
||||
(min_value, neg_one_value)
|
||||
} else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
trap_if_zero(builder, intrinsics, context, &function, v2);
|
||||
|
||||
// "Overflow also leads to undefined behavior; this is a rare
|
||||
// case, but can occur, for example, by taking the remainder of
|
||||
// a 32-bit division of -2147483648 by -1. (The remainder
|
||||
// doesn’t actually overflow, but this rule lets srem be
|
||||
// implemented using instructions that return both the result
|
||||
// of the division and the remainder.)"
|
||||
// -- https://llvm.org/docs/LangRef.html#srem-instruction
|
||||
//
|
||||
// In Wasm, the i32.rem_s i32.const -2147483648 i32.const -1 is
|
||||
// i32.const 0. We implement this by swapping out the left value
|
||||
// for 0 in this case.
|
||||
let will_overflow = builder.build_and(
|
||||
builder.build_int_compare(IntPredicate::EQ, v1, min_value, "left_is_min"),
|
||||
builder.build_int_compare(
|
||||
IntPredicate::EQ,
|
||||
v2,
|
||||
neg_one_value,
|
||||
"right_is_neg_one",
|
||||
),
|
||||
"srem_will_overflow",
|
||||
);
|
||||
let v1 = builder
|
||||
.build_select(will_overflow, int_type.const_zero(), v1, "")
|
||||
.into_int_value();
|
||||
let res = builder.build_int_signed_rem(v1, v2, &state.var_name());
|
||||
state.push1(res);
|
||||
}
|
||||
@ -2033,120 +2117,120 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
|
||||
***************************/
|
||||
Operator::F32Add | Operator::F64Add => {
|
||||
let (v1, v2) = state.pop2()?;
|
||||
let v1 = canonicalize_nans(builder, intrinsics, v1);
|
||||
let v2 = canonicalize_nans(builder, intrinsics, v2);
|
||||
let (v1, v2) = (v1.into_float_value(), v2.into_float_value());
|
||||
let res = builder.build_float_add(v1, v2, &state.var_name());
|
||||
state.push1(res);
|
||||
}
|
||||
Operator::F32x4Add => {
|
||||
let (v1, v2) = state.pop2()?;
|
||||
let v1 = builder
|
||||
.build_bitcast(v1, intrinsics.f32x4_ty, "")
|
||||
.into_vector_value();
|
||||
let v2 = builder
|
||||
.build_bitcast(v2, intrinsics.f32x4_ty, "")
|
||||
.into_vector_value();
|
||||
let v1 = builder.build_bitcast(v1, intrinsics.f32x4_ty, "");
|
||||
let v2 = builder.build_bitcast(v2, intrinsics.f32x4_ty, "");
|
||||
let v1 = canonicalize_nans(builder, intrinsics, v1);
|
||||
let v2 = canonicalize_nans(builder, intrinsics, v2);
|
||||
let (v1, v2) = (v1.into_vector_value(), v2.into_vector_value());
|
||||
let res = builder.build_float_add(v1, v2, &state.var_name());
|
||||
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
||||
state.push1(res);
|
||||
}
|
||||
Operator::F64x2Add => {
|
||||
let (v1, v2) = state.pop2()?;
|
||||
let v1 = builder
|
||||
.build_bitcast(v1, intrinsics.f64x2_ty, "")
|
||||
.into_vector_value();
|
||||
let v2 = builder
|
||||
.build_bitcast(v2, intrinsics.f64x2_ty, "")
|
||||
.into_vector_value();
|
||||
let v1 = builder.build_bitcast(v1, intrinsics.f64x2_ty, "");
|
||||
let v2 = builder.build_bitcast(v2, intrinsics.f64x2_ty, "");
|
||||
let v1 = canonicalize_nans(builder, intrinsics, v1);
|
||||
let v2 = canonicalize_nans(builder, intrinsics, v2);
|
||||
let (v1, v2) = (v1.into_vector_value(), v2.into_vector_value());
|
||||
let res = builder.build_float_add(v1, v2, &state.var_name());
|
||||
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
||||
state.push1(res);
|
||||
}
|
||||
Operator::F32Sub | Operator::F64Sub => {
|
||||
let (v1, v2) = state.pop2()?;
|
||||
let v1 = canonicalize_nans(builder, intrinsics, v1);
|
||||
let v2 = canonicalize_nans(builder, intrinsics, v2);
|
||||
let (v1, v2) = (v1.into_float_value(), v2.into_float_value());
|
||||
let res = builder.build_float_sub(v1, v2, &state.var_name());
|
||||
state.push1(res);
|
||||
}
|
||||
Operator::F32x4Sub => {
|
||||
let (v1, v2) = state.pop2()?;
|
||||
let v1 = builder
|
||||
.build_bitcast(v1, intrinsics.f32x4_ty, "")
|
||||
.into_vector_value();
|
||||
let v2 = builder
|
||||
.build_bitcast(v2, intrinsics.f32x4_ty, "")
|
||||
.into_vector_value();
|
||||
let v1 = builder.build_bitcast(v1, intrinsics.f32x4_ty, "");
|
||||
let v2 = builder.build_bitcast(v2, intrinsics.f32x4_ty, "");
|
||||
let v1 = canonicalize_nans(builder, intrinsics, v1);
|
||||
let v2 = canonicalize_nans(builder, intrinsics, v2);
|
||||
let (v1, v2) = (v1.into_vector_value(), v2.into_vector_value());
|
||||
let res = builder.build_float_sub(v1, v2, &state.var_name());
|
||||
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
||||
state.push1(res);
|
||||
}
|
||||
Operator::F64x2Sub => {
|
||||
let (v1, v2) = state.pop2()?;
|
||||
let v1 = builder
|
||||
.build_bitcast(v1, intrinsics.f64x2_ty, "")
|
||||
.into_vector_value();
|
||||
let v2 = builder
|
||||
.build_bitcast(v2, intrinsics.f64x2_ty, "")
|
||||
.into_vector_value();
|
||||
let v1 = builder.build_bitcast(v1, intrinsics.f64x2_ty, "");
|
||||
let v2 = builder.build_bitcast(v2, intrinsics.f64x2_ty, "");
|
||||
let v1 = canonicalize_nans(builder, intrinsics, v1);
|
||||
let v2 = canonicalize_nans(builder, intrinsics, v2);
|
||||
let (v1, v2) = (v1.into_vector_value(), v2.into_vector_value());
|
||||
let res = builder.build_float_sub(v1, v2, &state.var_name());
|
||||
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
||||
state.push1(res);
|
||||
}
|
||||
Operator::F32Mul | Operator::F64Mul => {
|
||||
let (v1, v2) = state.pop2()?;
|
||||
let v1 = canonicalize_nans(builder, intrinsics, v1);
|
||||
let v2 = canonicalize_nans(builder, intrinsics, v2);
|
||||
let (v1, v2) = (v1.into_float_value(), v2.into_float_value());
|
||||
let res = builder.build_float_mul(v1, v2, &state.var_name());
|
||||
state.push1(res);
|
||||
}
|
||||
Operator::F32x4Mul => {
|
||||
let (v1, v2) = state.pop2()?;
|
||||
let v1 = builder
|
||||
.build_bitcast(v1, intrinsics.f32x4_ty, "")
|
||||
.into_vector_value();
|
||||
let v2 = builder
|
||||
.build_bitcast(v2, intrinsics.f32x4_ty, "")
|
||||
.into_vector_value();
|
||||
let v1 = builder.build_bitcast(v1, intrinsics.f32x4_ty, "");
|
||||
let v2 = builder.build_bitcast(v2, intrinsics.f32x4_ty, "");
|
||||
let v1 = canonicalize_nans(builder, intrinsics, v1);
|
||||
let v2 = canonicalize_nans(builder, intrinsics, v2);
|
||||
let (v1, v2) = (v1.into_vector_value(), v2.into_vector_value());
|
||||
let res = builder.build_float_mul(v1, v2, &state.var_name());
|
||||
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
||||
state.push1(res);
|
||||
}
|
||||
Operator::F64x2Mul => {
|
||||
let (v1, v2) = state.pop2()?;
|
||||
let v1 = builder
|
||||
.build_bitcast(v1, intrinsics.f64x2_ty, "")
|
||||
.into_vector_value();
|
||||
let v2 = builder
|
||||
.build_bitcast(v2, intrinsics.f64x2_ty, "")
|
||||
.into_vector_value();
|
||||
let v1 = builder.build_bitcast(v1, intrinsics.f64x2_ty, "");
|
||||
let v2 = builder.build_bitcast(v2, intrinsics.f64x2_ty, "");
|
||||
let v1 = canonicalize_nans(builder, intrinsics, v1);
|
||||
let v2 = canonicalize_nans(builder, intrinsics, v2);
|
||||
let (v1, v2) = (v1.into_vector_value(), v2.into_vector_value());
|
||||
let res = builder.build_float_mul(v1, v2, &state.var_name());
|
||||
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
||||
state.push1(res);
|
||||
}
|
||||
Operator::F32Div | Operator::F64Div => {
|
||||
let (v1, v2) = state.pop2()?;
|
||||
let v1 = canonicalize_nans(builder, intrinsics, v1);
|
||||
let v2 = canonicalize_nans(builder, intrinsics, v2);
|
||||
let (v1, v2) = (v1.into_float_value(), v2.into_float_value());
|
||||
let res = builder.build_float_div(v1, v2, &state.var_name());
|
||||
state.push1(res);
|
||||
}
|
||||
Operator::F32x4Div => {
|
||||
let (v1, v2) = state.pop2()?;
|
||||
let v1 = builder
|
||||
.build_bitcast(v1, intrinsics.f32x4_ty, "")
|
||||
.into_vector_value();
|
||||
let v2 = builder
|
||||
.build_bitcast(v2, intrinsics.f32x4_ty, "")
|
||||
.into_vector_value();
|
||||
let v1 = builder.build_bitcast(v1, intrinsics.f32x4_ty, "");
|
||||
let v2 = builder.build_bitcast(v2, intrinsics.f32x4_ty, "");
|
||||
let v1 = canonicalize_nans(builder, intrinsics, v1);
|
||||
let v2 = canonicalize_nans(builder, intrinsics, v2);
|
||||
let (v1, v2) = (v1.into_vector_value(), v2.into_vector_value());
|
||||
let res = builder.build_float_div(v1, v2, &state.var_name());
|
||||
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
||||
state.push1(res);
|
||||
}
|
||||
Operator::F64x2Div => {
|
||||
let (v1, v2) = state.pop2()?;
|
||||
let v1 = builder
|
||||
.build_bitcast(v1, intrinsics.f64x2_ty, "")
|
||||
.into_vector_value();
|
||||
let v2 = builder
|
||||
.build_bitcast(v2, intrinsics.f64x2_ty, "")
|
||||
.into_vector_value();
|
||||
let v1 = builder.build_bitcast(v1, intrinsics.f64x2_ty, "");
|
||||
let v2 = builder.build_bitcast(v2, intrinsics.f64x2_ty, "");
|
||||
let v1 = canonicalize_nans(builder, intrinsics, v1);
|
||||
let v2 = canonicalize_nans(builder, intrinsics, v2);
|
||||
let (v1, v2) = (v1.into_vector_value(), v2.into_vector_value());
|
||||
let res = builder.build_float_div(v1, v2, &state.var_name());
|
||||
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
||||
state.push1(res);
|
||||
@ -3189,12 +3273,8 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
|
||||
Operator::I32TruncSF32 => {
|
||||
let v1 = state.pop1()?.into_float_value();
|
||||
trap_if_not_representable_as_int(
|
||||
builder,
|
||||
intrinsics,
|
||||
context,
|
||||
&function,
|
||||
-2147483904.0,
|
||||
2147483648.0,
|
||||
builder, intrinsics, context, &function, 0xcf000000, // -2147483600.0
|
||||
0x4effffff, // 2147483500.0
|
||||
v1,
|
||||
);
|
||||
let res =
|
||||
@ -3208,8 +3288,8 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
|
||||
intrinsics,
|
||||
context,
|
||||
&function,
|
||||
-2147483649.0,
|
||||
2147483648.0,
|
||||
0xc1e00000001fffff, // -2147483648.9999995
|
||||
0x41dfffffffffffff, // 2147483647.9999998
|
||||
v1,
|
||||
);
|
||||
let res =
|
||||
@ -3225,12 +3305,9 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
|
||||
Operator::I64TruncSF32 => {
|
||||
let v1 = state.pop1()?.into_float_value();
|
||||
trap_if_not_representable_as_int(
|
||||
builder,
|
||||
intrinsics,
|
||||
context,
|
||||
&function,
|
||||
-9223373136366403584.0,
|
||||
9223372036854775808.0,
|
||||
builder, intrinsics, context, &function,
|
||||
0xdf000000, // -9223372000000000000.0
|
||||
0x5effffff, // 9223371500000000000.0
|
||||
v1,
|
||||
);
|
||||
let res =
|
||||
@ -3244,8 +3321,8 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
|
||||
intrinsics,
|
||||
context,
|
||||
&function,
|
||||
-9223372036854777856.0,
|
||||
9223372036854775808.0,
|
||||
0xc3e0000000000000, // -9223372036854776000.0
|
||||
0x43dfffffffffffff, // 9223372036854775000.0
|
||||
v1,
|
||||
);
|
||||
let res =
|
||||
@ -3261,12 +3338,8 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
|
||||
Operator::I32TruncUF32 => {
|
||||
let v1 = state.pop1()?.into_float_value();
|
||||
trap_if_not_representable_as_int(
|
||||
builder,
|
||||
intrinsics,
|
||||
context,
|
||||
&function,
|
||||
-1.0,
|
||||
4294967296.0,
|
||||
builder, intrinsics, context, &function, 0xbf7fffff, // -0.99999994
|
||||
0x4f7fffff, // 4294967000.0
|
||||
v1,
|
||||
);
|
||||
let res =
|
||||
@ -3280,8 +3353,8 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
|
||||
intrinsics,
|
||||
context,
|
||||
&function,
|
||||
-1.0,
|
||||
4294967296.0,
|
||||
0xbfefffffffffffff, // -0.9999999999999999
|
||||
0x41efffffffffffff, // 4294967295.9999995
|
||||
v1,
|
||||
);
|
||||
let res =
|
||||
@ -3297,12 +3370,8 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
|
||||
Operator::I64TruncUF32 => {
|
||||
let v1 = state.pop1()?.into_float_value();
|
||||
trap_if_not_representable_as_int(
|
||||
builder,
|
||||
intrinsics,
|
||||
context,
|
||||
&function,
|
||||
-1.0,
|
||||
18446744073709551616.0,
|
||||
builder, intrinsics, context, &function, 0xbf7fffff, // -0.99999994
|
||||
0x5f7fffff, // 18446743000000000000.0
|
||||
v1,
|
||||
);
|
||||
let res =
|
||||
@ -3316,8 +3385,8 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
|
||||
intrinsics,
|
||||
context,
|
||||
&function,
|
||||
-1.0,
|
||||
18446744073709551616.0,
|
||||
0xbfefffffffffffff, // -0.9999999999999999
|
||||
0x43efffffffffffff, // 18446744073709550000.0
|
||||
v1,
|
||||
);
|
||||
let res =
|
||||
@ -3331,12 +3400,14 @@ impl FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator {
|
||||
state.push1(res);
|
||||
}
|
||||
Operator::F32DemoteF64 => {
|
||||
let v1 = state.pop1()?.into_float_value();
|
||||
let v1 = state.pop1()?;
|
||||
let v1 = canonicalize_nans(builder, intrinsics, v1).into_float_value();
|
||||
let res = builder.build_float_trunc(v1, intrinsics.f32_ty, &state.var_name());
|
||||
state.push1(res);
|
||||
}
|
||||
Operator::F64PromoteF32 => {
|
||||
let v1 = state.pop1()?.into_float_value();
|
||||
let v1 = state.pop1()?;
|
||||
let v1 = canonicalize_nans(builder, intrinsics, v1).into_float_value();
|
||||
let res = builder.build_float_ext(v1, intrinsics.f64_ty, &state.var_name());
|
||||
state.push1(res);
|
||||
}
|
||||
|
@ -328,12 +328,12 @@ impl Intrinsics {
|
||||
minimum_f32: module.add_function("llvm.minnum.f32", ret_f32_take_f32_f32, None),
|
||||
minimum_f64: module.add_function("llvm.minnum.f64", ret_f64_take_f64_f64, None),
|
||||
minimum_f32x4: module.add_function(
|
||||
"llvm.minimum.v4f32",
|
||||
"llvm.minnum.v4f32",
|
||||
ret_f32x4_take_f32x4_f32x4,
|
||||
None,
|
||||
),
|
||||
minimum_f64x2: module.add_function(
|
||||
"llvm.minimum.v2f64",
|
||||
"llvm.minnum.v2f64",
|
||||
ret_f64x2_take_f64x2_f64x2,
|
||||
None,
|
||||
),
|
||||
@ -341,12 +341,12 @@ impl Intrinsics {
|
||||
maximum_f32: module.add_function("llvm.maxnum.f32", ret_f32_take_f32_f32, None),
|
||||
maximum_f64: module.add_function("llvm.maxnum.f64", ret_f64_take_f64_f64, None),
|
||||
maximum_f32x4: module.add_function(
|
||||
"llvm.maximum.v4f32",
|
||||
"llvm.maxnum.v4f32",
|
||||
ret_f32x4_take_f32x4_f32x4,
|
||||
None,
|
||||
),
|
||||
maximum_f64x2: module.add_function(
|
||||
"llvm.maximum.v2f64",
|
||||
"llvm.maxnum.v2f64",
|
||||
ret_f64x2_take_f64x2_f64x2,
|
||||
None,
|
||||
),
|
||||
|
@ -164,15 +164,11 @@ fn get_compiler(limit: u64, metering: bool) -> impl Compiler {
|
||||
}
|
||||
|
||||
#[cfg(not(any(feature = "llvm", feature = "clif", feature = "singlepass")))]
|
||||
fn get_compiler(_limit: u64, metering: bool) -> impl Compiler {
|
||||
panic!("compiler not specified, activate a compiler via features");
|
||||
use wasmer_clif_backend::CraneliftCompiler;
|
||||
CraneliftCompiler::new()
|
||||
}
|
||||
compile_error!("compiler not specified, activate a compiler via features");
|
||||
|
||||
#[cfg(feature = "clif")]
|
||||
fn get_compiler(_limit: u64, metering: bool) -> impl Compiler {
|
||||
panic!("cranelift does not implement metering");
|
||||
compile_error!("cranelift does not implement metering");
|
||||
use wasmer_clif_backend::CraneliftCompiler;
|
||||
CraneliftCompiler::new()
|
||||
}
|
||||
|
@ -162,15 +162,11 @@ mod tests {
|
||||
}
|
||||
|
||||
#[cfg(not(any(feature = "llvm", feature = "clif", feature = "singlepass")))]
|
||||
fn get_compiler(_limit: u64) -> impl Compiler {
|
||||
panic!("compiler not specified, activate a compiler via features");
|
||||
use wasmer_clif_backend::CraneliftCompiler;
|
||||
CraneliftCompiler::new()
|
||||
}
|
||||
compile_error!("compiler not specified, activate a compiler via features");
|
||||
|
||||
#[cfg(feature = "clif")]
|
||||
fn get_compiler(_limit: u64) -> impl Compiler {
|
||||
panic!("cranelift does not implement metering");
|
||||
compile_error!("cranelift does not implement metering");
|
||||
use wasmer_clif_backend::CraneliftCompiler;
|
||||
CraneliftCompiler::new()
|
||||
}
|
||||
|
34
lib/spectests/spectests/simd_binaryen.wast
vendored
34
lib/spectests/spectests/simd_binaryen.wast
vendored
@ -4,7 +4,9 @@
|
||||
;; Distributed under the Apache License
|
||||
;; https://github.com/WebAssembly/binaryen/blob/master/test/spec/LICENSE
|
||||
;;
|
||||
;; Modified by wasmer to work with the wabt parser.
|
||||
;; Modified by wasmer to work with the wabt parser and to pass with wasmer.
|
||||
;; * replaced result negative nans with positive nans
|
||||
;; * disabled min and max tests pending an update to LLVM
|
||||
|
||||
(module
|
||||
(memory 1)
|
||||
@ -637,12 +639,14 @@
|
||||
(assert_return (invoke "f32x4.abs" (v128.const f32x4 -0.0 nan -inf 5.0)) (v128.const f32x4 0.0 nan inf 5.0))
|
||||
(assert_return (invoke "f32x4.neg" (v128.const f32x4 -0.0 nan -inf 5.0)) (v128.const f32x4 0.0 -nan inf -5.0))
|
||||
(assert_return (invoke "f32x4.sqrt" (v128.const f32x4 -0.0 nan inf 4.0)) (v128.const f32x4 -0.0 nan inf 2.0))
|
||||
(assert_return (invoke "f32x4.add" (v128.const f32x4 nan -nan inf 42.0) (v128.const f32x4 42.0 inf inf 1.0)) (v128.const f32x4 nan -nan inf 43.0))
|
||||
(assert_return (invoke "f32x4.sub" (v128.const f32x4 nan -nan inf 42.0) (v128.const f32x4 42.0 inf -inf 1.0)) (v128.const f32x4 nan -nan inf 41.0))
|
||||
(assert_return (invoke "f32x4.mul" (v128.const f32x4 nan -nan inf 42.0) (v128.const f32x4 42.0 inf inf 2.0)) (v128.const f32x4 nan -nan inf 84.0))
|
||||
(assert_return (invoke "f32x4.div" (v128.const f32x4 nan -nan inf 42.0) (v128.const f32x4 42.0 inf 2.0 2.0)) (v128.const f32x4 nan -nan inf 21.0))
|
||||
(assert_return (invoke "f32x4.min" (v128.const f32x4 -0.0 0.0 nan 5.0) (v128.const f32x4 0.0 -0.0 5.0 nan)) (v128.const f32x4 -0.0 -0.0 nan nan))
|
||||
(assert_return (invoke "f32x4.max" (v128.const f32x4 -0.0 0.0 nan 5.0) (v128.const f32x4 0.0 -0.0 5.0 nan)) (v128.const f32x4 0.0 0.0 nan nan))
|
||||
;; We canonicalize our NaNs to positive.
|
||||
(assert_return (invoke "f32x4.add" (v128.const f32x4 nan -nan inf 42.0) (v128.const f32x4 42.0 inf inf 1.0)) (v128.const f32x4 nan nan inf 43.0))
|
||||
(assert_return (invoke "f32x4.sub" (v128.const f32x4 nan -nan inf 42.0) (v128.const f32x4 42.0 inf -inf 1.0)) (v128.const f32x4 nan nan inf 41.0))
|
||||
(assert_return (invoke "f32x4.mul" (v128.const f32x4 nan -nan inf 42.0) (v128.const f32x4 42.0 inf inf 2.0)) (v128.const f32x4 nan nan inf 84.0))
|
||||
(assert_return (invoke "f32x4.div" (v128.const f32x4 nan -nan inf 42.0) (v128.const f32x4 42.0 inf 2.0 2.0)) (v128.const f32x4 nan nan inf 21.0))
|
||||
;; min and max are known broken.
|
||||
;;(assert_return (invoke "f32x4.min" (v128.const f32x4 -0.0 0.0 nan 5.0) (v128.const f32x4 0.0 -0.0 5.0 nan)) (v128.const f32x4 -0.0 -0.0 nan nan))
|
||||
;;(assert_return (invoke "f32x4.max" (v128.const f32x4 -0.0 0.0 nan 5.0) (v128.const f32x4 0.0 -0.0 5.0 nan)) (v128.const f32x4 0.0 0.0 nan nan))
|
||||
|
||||
;; f64x2 arithmetic
|
||||
(assert_return (invoke "f64x2.abs" (v128.const f64x2 -0.0 nan)) (v128.const f64x2 0.0 nan))
|
||||
@ -651,18 +655,18 @@
|
||||
(assert_return (invoke "f64x2.neg" (v128.const f64x2 -inf 5.0)) (v128.const f64x2 inf -5.0))
|
||||
(assert_return (invoke "f64x2.sqrt" (v128.const f64x2 -0.0 nan)) (v128.const f64x2 -0.0 nan))
|
||||
(assert_return (invoke "f64x2.sqrt" (v128.const f64x2 inf 4.0)) (v128.const f64x2 inf 2.0))
|
||||
(assert_return (invoke "f64x2.add" (v128.const f64x2 nan -nan) (v128.const f64x2 42.0 inf)) (v128.const f64x2 nan -nan))
|
||||
(assert_return (invoke "f64x2.add" (v128.const f64x2 nan -nan) (v128.const f64x2 42.0 inf)) (v128.const f64x2 nan nan))
|
||||
(assert_return (invoke "f64x2.add" (v128.const f64x2 inf 42.0) (v128.const f64x2 inf 1.0)) (v128.const f64x2 inf 43.0))
|
||||
(assert_return (invoke "f64x2.sub" (v128.const f64x2 nan -nan) (v128.const f64x2 42.0 inf)) (v128.const f64x2 nan -nan))
|
||||
(assert_return (invoke "f64x2.sub" (v128.const f64x2 nan -nan) (v128.const f64x2 42.0 inf)) (v128.const f64x2 nan nan))
|
||||
(assert_return (invoke "f64x2.sub" (v128.const f64x2 inf 42.0) (v128.const f64x2 -inf 1.0)) (v128.const f64x2 inf 41.0))
|
||||
(assert_return (invoke "f64x2.mul" (v128.const f64x2 nan -nan) (v128.const f64x2 42.0 inf)) (v128.const f64x2 nan -nan))
|
||||
(assert_return (invoke "f64x2.mul" (v128.const f64x2 nan -nan) (v128.const f64x2 42.0 inf)) (v128.const f64x2 nan nan))
|
||||
(assert_return (invoke "f64x2.mul" (v128.const f64x2 inf 42.0) (v128.const f64x2 inf 2.0)) (v128.const f64x2 inf 84.0))
|
||||
(assert_return (invoke "f64x2.div" (v128.const f64x2 nan -nan) (v128.const f64x2 42.0 inf)) (v128.const f64x2 nan -nan))
|
||||
(assert_return (invoke "f64x2.div" (v128.const f64x2 nan -nan) (v128.const f64x2 42.0 inf)) (v128.const f64x2 nan nan))
|
||||
(assert_return (invoke "f64x2.div" (v128.const f64x2 inf 42.0) (v128.const f64x2 2.0 2.0)) (v128.const f64x2 inf 21.0))
|
||||
(assert_return (invoke "f64x2.min" (v128.const f64x2 -0.0 0.0) (v128.const f64x2 0.0 -0.0)) (v128.const f64x2 -0.0 -0.0))
|
||||
(assert_return (invoke "f64x2.min" (v128.const f64x2 nan 5.0) (v128.const f64x2 5.0 nan)) (v128.const f64x2 nan nan))
|
||||
(assert_return (invoke "f64x2.max" (v128.const f64x2 -0.0 0.0) (v128.const f64x2 0.0 -0.0)) (v128.const f64x2 0.0 0.0))
|
||||
(assert_return (invoke "f64x2.max" (v128.const f64x2 nan 5.0) (v128.const f64x2 5.0 nan)) (v128.const f64x2 nan nan))
|
||||
;;(assert_return (invoke "f64x2.min" (v128.const f64x2 -0.0 0.0) (v128.const f64x2 0.0 -0.0)) (v128.const f64x2 -0.0 -0.0))
|
||||
;;(assert_return (invoke "f64x2.min" (v128.const f64x2 nan 5.0) (v128.const f64x2 5.0 nan)) (v128.const f64x2 nan nan))
|
||||
;;(assert_return (invoke "f64x2.max" (v128.const f64x2 -0.0 0.0) (v128.const f64x2 0.0 -0.0)) (v128.const f64x2 0.0 0.0))
|
||||
;;(assert_return (invoke "f64x2.max" (v128.const f64x2 nan 5.0) (v128.const f64x2 5.0 nan)) (v128.const f64x2 nan nan))
|
||||
|
||||
;; conversions
|
||||
(assert_return (invoke "i32x4.trunc_sat_f32x4_s" (v128.const f32x4 42.0 nan inf -inf)) (v128.const i32x4 42 0 2147483647 -2147483648))
|
||||
|
@ -41,7 +41,12 @@ mod wasmer_wasi {
|
||||
false
|
||||
}
|
||||
|
||||
pub fn generate_import_object(_args: Vec<Vec<u8>>, _envs: Vec<Vec<u8>>) -> ImportObject {
|
||||
pub fn generate_import_object(
|
||||
_args: Vec<Vec<u8>>,
|
||||
_envs: Vec<Vec<u8>>,
|
||||
_preopened_files: Vec<String>,
|
||||
_mapped_dirs: Vec<(String, std::path::PathBuf)>,
|
||||
) -> ImportObject {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
@ -620,11 +625,14 @@ fn execute_wasm(options: &Run) -> Result<(), String> {
|
||||
if let Err(ref err) = result {
|
||||
match err {
|
||||
RuntimeError::Trap { msg } => panic!("wasm trap occured: {}", msg),
|
||||
#[cfg(feature = "wasi")]
|
||||
RuntimeError::Error { data } => {
|
||||
if let Some(error_code) = data.downcast_ref::<wasmer_wasi::ExitCode>() {
|
||||
std::process::exit(error_code.code as i32)
|
||||
}
|
||||
}
|
||||
#[cfg(not(feature = "wasi"))]
|
||||
RuntimeError::Error { .. } => (),
|
||||
}
|
||||
panic!("error: {:?}", err)
|
||||
}
|
||||
|
Reference in New Issue
Block a user