mirror of
https://github.com/fluencelabs/wasmer
synced 2025-06-26 07:01:33 +00:00
Merge #1095
1095: Update to cranelift 0.52 r=nlewycky a=nlewycky # Description Update to cranelift 0.52. To use our wasmer branch of cranelift, we point to its path on git. Per @syrusakbary , we don't want to push updated wasmer-clif crates until we're ready to release the matching wasmer that will use them. # Review - [x] Add a short description of the the change to the CHANGELOG.md file Co-authored-by: Nick Lewycky <nick@wasmer.io> Co-authored-by: Mark McCaskey <mark@wasmer.io>
This commit is contained in:
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
## **[Unreleased]**
|
## **[Unreleased]**
|
||||||
|
|
||||||
|
- [#1095](https://github.com/wasmerio/wasmer/pull/1095) Update to cranelift 0.52.
|
||||||
- [#1092](https://github.com/wasmerio/wasmer/pull/1092) Add `get_utf8_string_with_nul` to `WasmPtr` to read nul-terminated strings from memory.
|
- [#1092](https://github.com/wasmerio/wasmer/pull/1092) Add `get_utf8_string_with_nul` to `WasmPtr` to read nul-terminated strings from memory.
|
||||||
- [#1071](https://github.com/wasmerio/wasmer/pull/1071) Add support for non-trapping float-to-int conversions, enabled by default.
|
- [#1071](https://github.com/wasmerio/wasmer/pull/1071) Add support for non-trapping float-to-int conversions, enabled by default.
|
||||||
|
|
||||||
|
1497
Cargo.lock
generated
1497
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -12,13 +12,13 @@ readme = "README.md"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
wasmer-runtime-core = { path = "../runtime-core", version = "0.12.0" }
|
wasmer-runtime-core = { path = "../runtime-core", version = "0.12.0" }
|
||||||
cranelift-native = "0.44.0"
|
cranelift-native = { git = "https://github.com/wasmerio/cranelift", branch = "wasmer" }
|
||||||
cranelift-codegen = "0.44.0"
|
cranelift-codegen = { git = "https://github.com/wasmerio/cranelift", branch = "wasmer" }
|
||||||
cranelift-entity = "0.44.0"
|
cranelift-entity = { git = "https://github.com/wasmerio/cranelift", branch = "wasmer" }
|
||||||
cranelift-frontend = { package = "wasmer-clif-fork-frontend", version = "0.44.0" }
|
cranelift-frontend = { git = "https://github.com/wasmerio/cranelift", branch = "wasmer" }
|
||||||
cranelift-wasm = { package = "wasmer-clif-fork-wasm", version = "0.44.0" }
|
cranelift-wasm = { git = "https://github.com/wasmerio/cranelift", branch = "wasmer" }
|
||||||
target-lexicon = "0.8.1"
|
target-lexicon = "0.9"
|
||||||
wasmparser = "0.39.1"
|
wasmparser = "0.45.0"
|
||||||
byteorder = "1.3.2"
|
byteorder = "1.3.2"
|
||||||
nix = "0.15.0"
|
nix = "0.15.0"
|
||||||
libc = "0.2.60"
|
libc = "0.2.60"
|
||||||
|
@ -11,9 +11,9 @@ use cranelift_codegen::ir::{self, Ebb, Function, InstBuilder};
|
|||||||
use cranelift_codegen::isa::CallConv;
|
use cranelift_codegen::isa::CallConv;
|
||||||
use cranelift_codegen::{cursor::FuncCursor, isa};
|
use cranelift_codegen::{cursor::FuncCursor, isa};
|
||||||
use cranelift_frontend::{FunctionBuilder, Position, Variable};
|
use cranelift_frontend::{FunctionBuilder, Position, Variable};
|
||||||
use cranelift_wasm::{self, FuncTranslator};
|
use cranelift_wasm::{self, FuncTranslator, ModuleTranslationState};
|
||||||
use cranelift_wasm::{get_vmctx_value_label, translate_operator};
|
use cranelift_wasm::{get_vmctx_value_label, translate_operator};
|
||||||
use cranelift_wasm::{FuncEnvironment, ReturnMode, WasmError};
|
use cranelift_wasm::{FuncEnvironment, ReturnMode, TargetEnvironment, WasmError};
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use wasmer_runtime_core::error::CompileError;
|
use wasmer_runtime_core::error::CompileError;
|
||||||
@ -247,7 +247,7 @@ pub struct FunctionEnvironment {
|
|||||||
clif_signatures: Map<SigIndex, ir::Signature>,
|
clif_signatures: Map<SigIndex, ir::Signature>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FuncEnvironment for FunctionEnvironment {
|
impl TargetEnvironment for FunctionEnvironment {
|
||||||
/// Gets configuration information needed for compiling functions
|
/// Gets configuration information needed for compiling functions
|
||||||
fn target_config(&self) -> isa::TargetFrontendConfig {
|
fn target_config(&self) -> isa::TargetFrontendConfig {
|
||||||
self.target_config
|
self.target_config
|
||||||
@ -265,6 +265,13 @@ impl FuncEnvironment for FunctionEnvironment {
|
|||||||
self.target_config().pointer_bytes()
|
self.target_config().pointer_bytes()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Return Cranelift reference type.
|
||||||
|
fn reference_type(&self) -> ir::Type {
|
||||||
|
ir::types::R64
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FuncEnvironment for FunctionEnvironment {
|
||||||
/// Sets up the necessary preamble definitions in `func` to access the global identified
|
/// Sets up the necessary preamble definitions in `func` to access the global identified
|
||||||
/// by `index`.
|
/// by `index`.
|
||||||
///
|
///
|
||||||
@ -802,7 +809,6 @@ impl FuncEnvironment for FunctionEnvironment {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Generates code corresponding to wasm `memory.grow`.
|
/// Generates code corresponding to wasm `memory.grow`.
|
||||||
///
|
///
|
||||||
/// `index` refers to the linear memory to query.
|
/// `index` refers to the linear memory to query.
|
||||||
@ -931,6 +937,94 @@ impl FuncEnvironment for FunctionEnvironment {
|
|||||||
|
|
||||||
Ok(*pos.func.dfg.inst_results(call_inst).first().unwrap())
|
Ok(*pos.func.dfg.inst_results(call_inst).first().unwrap())
|
||||||
}
|
}
|
||||||
|
fn translate_memory_copy(
|
||||||
|
&mut self,
|
||||||
|
_pos: FuncCursor,
|
||||||
|
_clif_mem_index: cranelift_wasm::MemoryIndex,
|
||||||
|
_heap: ir::Heap,
|
||||||
|
_dst: ir::Value,
|
||||||
|
_src: ir::Value,
|
||||||
|
_len: ir::Value,
|
||||||
|
) -> cranelift_wasm::WasmResult<()> {
|
||||||
|
unimplemented!("memory.copy not yet implemented");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn translate_memory_fill(
|
||||||
|
&mut self,
|
||||||
|
_pos: FuncCursor,
|
||||||
|
_clif_mem_index: cranelift_wasm::MemoryIndex,
|
||||||
|
_heap: ir::Heap,
|
||||||
|
_dst: ir::Value,
|
||||||
|
_val: ir::Value,
|
||||||
|
_len: ir::Value,
|
||||||
|
) -> cranelift_wasm::WasmResult<()> {
|
||||||
|
unimplemented!("memory.fill not yet implemented");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn translate_memory_init(
|
||||||
|
&mut self,
|
||||||
|
_pos: FuncCursor,
|
||||||
|
_clif_mem_index: cranelift_wasm::MemoryIndex,
|
||||||
|
_heap: ir::Heap,
|
||||||
|
_seg_index: u32,
|
||||||
|
_dst: ir::Value,
|
||||||
|
_src: ir::Value,
|
||||||
|
_len: ir::Value,
|
||||||
|
) -> cranelift_wasm::WasmResult<()> {
|
||||||
|
unimplemented!("memory.init not yet implemented");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn translate_data_drop(
|
||||||
|
&mut self,
|
||||||
|
_pos: FuncCursor,
|
||||||
|
_seg_index: u32,
|
||||||
|
) -> cranelift_wasm::WasmResult<()> {
|
||||||
|
unimplemented!("data.drop not yet implemented");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn translate_table_size(
|
||||||
|
&mut self,
|
||||||
|
_pos: FuncCursor,
|
||||||
|
_index: cranelift_wasm::TableIndex,
|
||||||
|
_table: ir::Table,
|
||||||
|
) -> cranelift_wasm::WasmResult<ir::Value> {
|
||||||
|
unimplemented!("table.size not yet implemented");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn translate_table_copy(
|
||||||
|
&mut self,
|
||||||
|
_pos: FuncCursor,
|
||||||
|
_dst_table_index: cranelift_wasm::TableIndex,
|
||||||
|
_dst_table: ir::Table,
|
||||||
|
_src_table_index: cranelift_wasm::TableIndex,
|
||||||
|
_src_table: ir::Table,
|
||||||
|
_dst: ir::Value,
|
||||||
|
_src: ir::Value,
|
||||||
|
_len: ir::Value,
|
||||||
|
) -> cranelift_wasm::WasmResult<()> {
|
||||||
|
unimplemented!("table.copy yet implemented");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn translate_table_init(
|
||||||
|
&mut self,
|
||||||
|
_pos: FuncCursor,
|
||||||
|
_seg_index: u32,
|
||||||
|
_table_index: cranelift_wasm::TableIndex,
|
||||||
|
_table: ir::Table,
|
||||||
|
_dst: ir::Value,
|
||||||
|
_src: ir::Value,
|
||||||
|
_len: ir::Value,
|
||||||
|
) -> cranelift_wasm::WasmResult<()> {
|
||||||
|
unimplemented!("table.init yet implemented");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn translate_elem_drop(
|
||||||
|
&mut self,
|
||||||
|
_pos: FuncCursor,
|
||||||
|
_seg_index: u32,
|
||||||
|
) -> cranelift_wasm::WasmResult<()> {
|
||||||
|
unimplemented!("elem.drop yet implemented");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FunctionEnvironment {
|
impl FunctionEnvironment {
|
||||||
@ -1016,8 +1110,15 @@ impl FunctionCodeGenerator<CodegenError> for CraneliftFunctionCodeGenerator {
|
|||||||
&mut self.func_translator.func_ctx,
|
&mut self.func_translator.func_ctx,
|
||||||
&mut self.position,
|
&mut self.position,
|
||||||
);
|
);
|
||||||
let state = &mut self.func_translator.state;
|
let module_state = ModuleTranslationState::new();
|
||||||
translate_operator(op, &mut builder, state, &mut self.func_env)?;
|
let func_state = &mut self.func_translator.state;
|
||||||
|
translate_operator(
|
||||||
|
&module_state,
|
||||||
|
op,
|
||||||
|
&mut builder,
|
||||||
|
func_state,
|
||||||
|
&mut self.func_env,
|
||||||
|
)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ readme = "README.md"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
wasmer-runtime-core = { path = "../runtime-core", version = "0.12.0" }
|
wasmer-runtime-core = { path = "../runtime-core", version = "0.12.0" }
|
||||||
wasmparser = "0.39.1"
|
wasmparser = "0.45.0"
|
||||||
smallvec = "0.6"
|
smallvec = "0.6"
|
||||||
goblin = "0.0.24"
|
goblin = "0.0.24"
|
||||||
libc = "0.2.60"
|
libc = "0.2.60"
|
||||||
|
@ -1745,7 +1745,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Operate on locals.
|
// Operate on locals.
|
||||||
Operator::GetLocal { local_index } => {
|
Operator::LocalGet { local_index } => {
|
||||||
let pointer_value = locals[local_index as usize];
|
let pointer_value = locals[local_index as usize];
|
||||||
let v = builder.build_load(pointer_value, &state.var_name());
|
let v = builder.build_load(pointer_value, &state.var_name());
|
||||||
tbaa_label(
|
tbaa_label(
|
||||||
@ -1757,14 +1757,14 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
);
|
);
|
||||||
state.push1(v);
|
state.push1(v);
|
||||||
}
|
}
|
||||||
Operator::SetLocal { local_index } => {
|
Operator::LocalSet { local_index } => {
|
||||||
let pointer_value = locals[local_index as usize];
|
let pointer_value = locals[local_index as usize];
|
||||||
let (v, i) = state.pop1_extra()?;
|
let (v, i) = state.pop1_extra()?;
|
||||||
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
||||||
let store = builder.build_store(pointer_value, v);
|
let store = builder.build_store(pointer_value, v);
|
||||||
tbaa_label(&self.module, intrinsics, "local", store, Some(local_index));
|
tbaa_label(&self.module, intrinsics, "local", store, Some(local_index));
|
||||||
}
|
}
|
||||||
Operator::TeeLocal { local_index } => {
|
Operator::LocalTee { local_index } => {
|
||||||
let pointer_value = locals[local_index as usize];
|
let pointer_value = locals[local_index as usize];
|
||||||
let (v, i) = state.peek1_extra()?;
|
let (v, i) = state.peek1_extra()?;
|
||||||
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
||||||
@ -1772,7 +1772,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
tbaa_label(&self.module, intrinsics, "local", store, Some(local_index));
|
tbaa_label(&self.module, intrinsics, "local", store, Some(local_index));
|
||||||
}
|
}
|
||||||
|
|
||||||
Operator::GetGlobal { global_index } => {
|
Operator::GlobalGet { global_index } => {
|
||||||
let index = GlobalIndex::new(global_index as usize);
|
let index = GlobalIndex::new(global_index as usize);
|
||||||
let global_cache = ctx.global_cache(index, intrinsics, self.module.clone());
|
let global_cache = ctx.global_cache(index, intrinsics, self.module.clone());
|
||||||
match global_cache {
|
match global_cache {
|
||||||
@ -1792,7 +1792,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Operator::SetGlobal { global_index } => {
|
Operator::GlobalSet { global_index } => {
|
||||||
let (value, info) = state.pop1_extra()?;
|
let (value, info) = state.pop1_extra()?;
|
||||||
let value = apply_pending_canonicalization(builder, intrinsics, value, info);
|
let value = apply_pending_canonicalization(builder, intrinsics, value, info);
|
||||||
let index = GlobalIndex::new(global_index as usize);
|
let index = GlobalIndex::new(global_index as usize);
|
||||||
@ -4444,21 +4444,21 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let res = builder.build_int_truncate(v, intrinsics.i32_ty, &state.var_name());
|
let res = builder.build_int_truncate(v, intrinsics.i32_ty, &state.var_name());
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I64ExtendSI32 => {
|
Operator::I64ExtendI32S => {
|
||||||
let (v, i) = state.pop1_extra()?;
|
let (v, i) = state.pop1_extra()?;
|
||||||
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
||||||
let v = v.into_int_value();
|
let v = v.into_int_value();
|
||||||
let res = builder.build_int_s_extend(v, intrinsics.i64_ty, &state.var_name());
|
let res = builder.build_int_s_extend(v, intrinsics.i64_ty, &state.var_name());
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I64ExtendUI32 => {
|
Operator::I64ExtendI32U => {
|
||||||
let (v, i) = state.pop1_extra()?;
|
let (v, i) = state.pop1_extra()?;
|
||||||
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
||||||
let v = v.into_int_value();
|
let v = v.into_int_value();
|
||||||
let res = builder.build_int_z_extend(v, intrinsics.i64_ty, &state.var_name());
|
let res = builder.build_int_z_extend(v, intrinsics.i64_ty, &state.var_name());
|
||||||
state.push1_extra(res, ExtraInfo::arithmetic_f64());
|
state.push1_extra(res, ExtraInfo::arithmetic_f64());
|
||||||
}
|
}
|
||||||
Operator::I32x4TruncSF32x4Sat => {
|
Operator::I32x4TruncSatF32x4S => {
|
||||||
let (v, i) = state.pop1_extra()?;
|
let (v, i) = state.pop1_extra()?;
|
||||||
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
||||||
let v = v.into_int_value();
|
let v = v.into_int_value();
|
||||||
@ -4476,7 +4476,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
);
|
);
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I32x4TruncUF32x4Sat => {
|
Operator::I32x4TruncSatF32x4U => {
|
||||||
let (v, i) = state.pop1_extra()?;
|
let (v, i) = state.pop1_extra()?;
|
||||||
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
||||||
let v = v.into_int_value();
|
let v = v.into_int_value();
|
||||||
@ -4494,7 +4494,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
);
|
);
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I64x2TruncSF64x2Sat => {
|
Operator::I64x2TruncSatF64x2S => {
|
||||||
let (v, i) = state.pop1_extra()?;
|
let (v, i) = state.pop1_extra()?;
|
||||||
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
||||||
let v = v.into_int_value();
|
let v = v.into_int_value();
|
||||||
@ -4512,7 +4512,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
);
|
);
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I64x2TruncUF64x2Sat => {
|
Operator::I64x2TruncSatF64x2U => {
|
||||||
let (v, i) = state.pop1_extra()?;
|
let (v, i) = state.pop1_extra()?;
|
||||||
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
||||||
let v = v.into_int_value();
|
let v = v.into_int_value();
|
||||||
@ -4530,7 +4530,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
);
|
);
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I32TruncSF32 => {
|
Operator::I32TruncF32S => {
|
||||||
let v1 = state.pop1()?.into_float_value();
|
let v1 = state.pop1()?.into_float_value();
|
||||||
trap_if_not_representable_as_int(
|
trap_if_not_representable_as_int(
|
||||||
builder, intrinsics, context, &function, 0xcf000000, // -2147483600.0
|
builder, intrinsics, context, &function, 0xcf000000, // -2147483600.0
|
||||||
@ -4541,7 +4541,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
builder.build_float_to_signed_int(v1, intrinsics.i32_ty, &state.var_name());
|
builder.build_float_to_signed_int(v1, intrinsics.i32_ty, &state.var_name());
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I32TruncSF64 => {
|
Operator::I32TruncF64S => {
|
||||||
let v1 = state.pop1()?.into_float_value();
|
let v1 = state.pop1()?.into_float_value();
|
||||||
trap_if_not_representable_as_int(
|
trap_if_not_representable_as_int(
|
||||||
builder,
|
builder,
|
||||||
@ -4556,7 +4556,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
builder.build_float_to_signed_int(v1, intrinsics.i32_ty, &state.var_name());
|
builder.build_float_to_signed_int(v1, intrinsics.i32_ty, &state.var_name());
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I32TruncSSatF32 => {
|
Operator::I32TruncSatF32S => {
|
||||||
let (v, i) = state.pop1_extra()?;
|
let (v, i) = state.pop1_extra()?;
|
||||||
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
||||||
let v = v.into_float_value();
|
let v = v.into_float_value();
|
||||||
@ -4572,7 +4572,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
);
|
);
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I32TruncSSatF64 => {
|
Operator::I32TruncSatF64S => {
|
||||||
let (v, i) = state.pop1_extra()?;
|
let (v, i) = state.pop1_extra()?;
|
||||||
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
||||||
let v = v.into_float_value();
|
let v = v.into_float_value();
|
||||||
@ -4588,7 +4588,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
);
|
);
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I64TruncSF32 => {
|
Operator::I64TruncF32S => {
|
||||||
let v1 = state.pop1()?.into_float_value();
|
let v1 = state.pop1()?.into_float_value();
|
||||||
trap_if_not_representable_as_int(
|
trap_if_not_representable_as_int(
|
||||||
builder, intrinsics, context, &function,
|
builder, intrinsics, context, &function,
|
||||||
@ -4600,7 +4600,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
builder.build_float_to_signed_int(v1, intrinsics.i64_ty, &state.var_name());
|
builder.build_float_to_signed_int(v1, intrinsics.i64_ty, &state.var_name());
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I64TruncSF64 => {
|
Operator::I64TruncF64S => {
|
||||||
let v1 = state.pop1()?.into_float_value();
|
let v1 = state.pop1()?.into_float_value();
|
||||||
trap_if_not_representable_as_int(
|
trap_if_not_representable_as_int(
|
||||||
builder,
|
builder,
|
||||||
@ -4615,7 +4615,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
builder.build_float_to_signed_int(v1, intrinsics.i64_ty, &state.var_name());
|
builder.build_float_to_signed_int(v1, intrinsics.i64_ty, &state.var_name());
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I64TruncSSatF32 => {
|
Operator::I64TruncSatF32S => {
|
||||||
let (v, i) = state.pop1_extra()?;
|
let (v, i) = state.pop1_extra()?;
|
||||||
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
||||||
let v = v.into_float_value();
|
let v = v.into_float_value();
|
||||||
@ -4631,7 +4631,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
);
|
);
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I64TruncSSatF64 => {
|
Operator::I64TruncSatF64S => {
|
||||||
let (v, i) = state.pop1_extra()?;
|
let (v, i) = state.pop1_extra()?;
|
||||||
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
||||||
let v = v.into_float_value();
|
let v = v.into_float_value();
|
||||||
@ -4647,7 +4647,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
);
|
);
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I32TruncUF32 => {
|
Operator::I32TruncF32U => {
|
||||||
let v1 = state.pop1()?.into_float_value();
|
let v1 = state.pop1()?.into_float_value();
|
||||||
trap_if_not_representable_as_int(
|
trap_if_not_representable_as_int(
|
||||||
builder, intrinsics, context, &function, 0xbf7fffff, // -0.99999994
|
builder, intrinsics, context, &function, 0xbf7fffff, // -0.99999994
|
||||||
@ -4658,7 +4658,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
builder.build_float_to_unsigned_int(v1, intrinsics.i32_ty, &state.var_name());
|
builder.build_float_to_unsigned_int(v1, intrinsics.i32_ty, &state.var_name());
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I32TruncUF64 => {
|
Operator::I32TruncF64U => {
|
||||||
let v1 = state.pop1()?.into_float_value();
|
let v1 = state.pop1()?.into_float_value();
|
||||||
trap_if_not_representable_as_int(
|
trap_if_not_representable_as_int(
|
||||||
builder,
|
builder,
|
||||||
@ -4673,7 +4673,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
builder.build_float_to_unsigned_int(v1, intrinsics.i32_ty, &state.var_name());
|
builder.build_float_to_unsigned_int(v1, intrinsics.i32_ty, &state.var_name());
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I32TruncUSatF32 => {
|
Operator::I32TruncSatF32U => {
|
||||||
let (v, i) = state.pop1_extra()?;
|
let (v, i) = state.pop1_extra()?;
|
||||||
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
||||||
let v = v.into_float_value();
|
let v = v.into_float_value();
|
||||||
@ -4689,7 +4689,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
);
|
);
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I32TruncUSatF64 => {
|
Operator::I32TruncSatF64U => {
|
||||||
let (v, i) = state.pop1_extra()?;
|
let (v, i) = state.pop1_extra()?;
|
||||||
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
||||||
let v = v.into_float_value();
|
let v = v.into_float_value();
|
||||||
@ -4705,7 +4705,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
);
|
);
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I64TruncUF32 => {
|
Operator::I64TruncF32U => {
|
||||||
let v1 = state.pop1()?.into_float_value();
|
let v1 = state.pop1()?.into_float_value();
|
||||||
trap_if_not_representable_as_int(
|
trap_if_not_representable_as_int(
|
||||||
builder, intrinsics, context, &function, 0xbf7fffff, // -0.99999994
|
builder, intrinsics, context, &function, 0xbf7fffff, // -0.99999994
|
||||||
@ -4716,7 +4716,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
builder.build_float_to_unsigned_int(v1, intrinsics.i64_ty, &state.var_name());
|
builder.build_float_to_unsigned_int(v1, intrinsics.i64_ty, &state.var_name());
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I64TruncUF64 => {
|
Operator::I64TruncF64U => {
|
||||||
let v1 = state.pop1()?.into_float_value();
|
let v1 = state.pop1()?.into_float_value();
|
||||||
trap_if_not_representable_as_int(
|
trap_if_not_representable_as_int(
|
||||||
builder,
|
builder,
|
||||||
@ -4731,7 +4731,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
builder.build_float_to_unsigned_int(v1, intrinsics.i64_ty, &state.var_name());
|
builder.build_float_to_unsigned_int(v1, intrinsics.i64_ty, &state.var_name());
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I64TruncUSatF32 => {
|
Operator::I64TruncSatF32U => {
|
||||||
let (v, i) = state.pop1_extra()?;
|
let (v, i) = state.pop1_extra()?;
|
||||||
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
||||||
let v = v.into_float_value();
|
let v = v.into_float_value();
|
||||||
@ -4747,7 +4747,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
);
|
);
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I64TruncUSatF64 => {
|
Operator::I64TruncSatF64U => {
|
||||||
let (v, i) = state.pop1_extra()?;
|
let (v, i) = state.pop1_extra()?;
|
||||||
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
||||||
let v = v.into_float_value();
|
let v = v.into_float_value();
|
||||||
@ -4775,7 +4775,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let res = builder.build_float_ext(v, intrinsics.f64_ty, &state.var_name());
|
let res = builder.build_float_ext(v, intrinsics.f64_ty, &state.var_name());
|
||||||
state.push1_extra(res, ExtraInfo::pending_f64_nan());
|
state.push1_extra(res, ExtraInfo::pending_f64_nan());
|
||||||
}
|
}
|
||||||
Operator::F32ConvertSI32 | Operator::F32ConvertSI64 => {
|
Operator::F32ConvertI32S | Operator::F32ConvertI64S => {
|
||||||
let (v, i) = state.pop1_extra()?;
|
let (v, i) = state.pop1_extra()?;
|
||||||
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
||||||
let v = v.into_int_value();
|
let v = v.into_int_value();
|
||||||
@ -4783,7 +4783,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
builder.build_signed_int_to_float(v, intrinsics.f32_ty, &state.var_name());
|
builder.build_signed_int_to_float(v, intrinsics.f32_ty, &state.var_name());
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::F64ConvertSI32 | Operator::F64ConvertSI64 => {
|
Operator::F64ConvertI32S | Operator::F64ConvertI64S => {
|
||||||
let (v, i) = state.pop1_extra()?;
|
let (v, i) = state.pop1_extra()?;
|
||||||
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
||||||
let v = v.into_int_value();
|
let v = v.into_int_value();
|
||||||
@ -4791,7 +4791,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
builder.build_signed_int_to_float(v, intrinsics.f64_ty, &state.var_name());
|
builder.build_signed_int_to_float(v, intrinsics.f64_ty, &state.var_name());
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::F32ConvertUI32 | Operator::F32ConvertUI64 => {
|
Operator::F32ConvertI32U | Operator::F32ConvertI64U => {
|
||||||
let (v, i) = state.pop1_extra()?;
|
let (v, i) = state.pop1_extra()?;
|
||||||
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
||||||
let v = v.into_int_value();
|
let v = v.into_int_value();
|
||||||
@ -4799,7 +4799,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
builder.build_unsigned_int_to_float(v, intrinsics.f32_ty, &state.var_name());
|
builder.build_unsigned_int_to_float(v, intrinsics.f32_ty, &state.var_name());
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::F64ConvertUI32 | Operator::F64ConvertUI64 => {
|
Operator::F64ConvertI32U | Operator::F64ConvertI64U => {
|
||||||
let (v, i) = state.pop1_extra()?;
|
let (v, i) = state.pop1_extra()?;
|
||||||
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
let v = apply_pending_canonicalization(builder, intrinsics, v, i);
|
||||||
let v = v.into_int_value();
|
let v = v.into_int_value();
|
||||||
@ -4807,7 +4807,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
builder.build_unsigned_int_to_float(v, intrinsics.f64_ty, &state.var_name());
|
builder.build_unsigned_int_to_float(v, intrinsics.f64_ty, &state.var_name());
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::F32x4ConvertSI32x4 => {
|
Operator::F32x4ConvertI32x4S => {
|
||||||
let v = state.pop1()?;
|
let v = state.pop1()?;
|
||||||
let v = builder
|
let v = builder
|
||||||
.build_bitcast(v, intrinsics.i32x4_ty, "")
|
.build_bitcast(v, intrinsics.i32x4_ty, "")
|
||||||
@ -4817,7 +4817,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::F32x4ConvertUI32x4 => {
|
Operator::F32x4ConvertI32x4U => {
|
||||||
let v = state.pop1()?;
|
let v = state.pop1()?;
|
||||||
let v = builder
|
let v = builder
|
||||||
.build_bitcast(v, intrinsics.i32x4_ty, "")
|
.build_bitcast(v, intrinsics.i32x4_ty, "")
|
||||||
@ -4827,7 +4827,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::F64x2ConvertSI64x2 => {
|
Operator::F64x2ConvertI64x2S => {
|
||||||
let v = state.pop1()?;
|
let v = state.pop1()?;
|
||||||
let v = builder
|
let v = builder
|
||||||
.build_bitcast(v, intrinsics.i64x2_ty, "")
|
.build_bitcast(v, intrinsics.i64x2_ty, "")
|
||||||
@ -4837,7 +4837,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::F64x2ConvertUI64x2 => {
|
Operator::F64x2ConvertI64x2U => {
|
||||||
let v = state.pop1()?;
|
let v = state.pop1()?;
|
||||||
let v = builder
|
let v = builder
|
||||||
.build_bitcast(v, intrinsics.i64x2_ty, "")
|
.build_bitcast(v, intrinsics.i64x2_ty, "")
|
||||||
@ -5891,7 +5891,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I8x16LoadSplat { ref memarg } => {
|
Operator::V8x16LoadSplat { ref memarg } => {
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
intrinsics,
|
intrinsics,
|
||||||
@ -5926,7 +5926,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I16x8LoadSplat { ref memarg } => {
|
Operator::V16x8LoadSplat { ref memarg } => {
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
intrinsics,
|
intrinsics,
|
||||||
@ -5961,7 +5961,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I32x4LoadSplat { ref memarg } => {
|
Operator::V32x4LoadSplat { ref memarg } => {
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
intrinsics,
|
intrinsics,
|
||||||
@ -5996,7 +5996,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::I64x2LoadSplat { ref memarg } => {
|
Operator::V64x2LoadSplat { ref memarg } => {
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
intrinsics,
|
intrinsics,
|
||||||
@ -6031,7 +6031,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
let res = builder.build_bitcast(res, intrinsics.i128_ty, "");
|
||||||
state.push1(res);
|
state.push1(res);
|
||||||
}
|
}
|
||||||
Operator::Fence { flags: _ } => {
|
Operator::AtomicFence { flags: _ } => {
|
||||||
// Fence is a nop.
|
// Fence is a nop.
|
||||||
//
|
//
|
||||||
// Fence was added to preserve information about fences from
|
// Fence was added to preserve information about fences from
|
||||||
@ -6415,7 +6415,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
tbaa_label(&self.module, intrinsics, "memory", store, Some(0));
|
tbaa_label(&self.module, intrinsics, "memory", store, Some(0));
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw8UAdd { ref memarg } => {
|
Operator::I32AtomicRmw8AddU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -6457,7 +6457,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name());
|
let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name());
|
||||||
state.push1_extra(old, ExtraInfo::arithmetic_f32());
|
state.push1_extra(old, ExtraInfo::arithmetic_f32());
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw16UAdd { ref memarg } => {
|
Operator::I32AtomicRmw16AddU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -6538,7 +6538,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
);
|
);
|
||||||
state.push1(old);
|
state.push1(old);
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw8UAdd { ref memarg } => {
|
Operator::I64AtomicRmw8AddU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -6580,7 +6580,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
||||||
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw16UAdd { ref memarg } => {
|
Operator::I64AtomicRmw16AddU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -6622,7 +6622,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
||||||
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw32UAdd { ref memarg } => {
|
Operator::I64AtomicRmw32AddU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -6703,7 +6703,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
);
|
);
|
||||||
state.push1(old);
|
state.push1(old);
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw8USub { ref memarg } => {
|
Operator::I32AtomicRmw8SubU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -6745,7 +6745,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name());
|
let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name());
|
||||||
state.push1_extra(old, ExtraInfo::arithmetic_f32());
|
state.push1_extra(old, ExtraInfo::arithmetic_f32());
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw16USub { ref memarg } => {
|
Operator::I32AtomicRmw16SubU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -6826,7 +6826,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
);
|
);
|
||||||
state.push1(old);
|
state.push1(old);
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw8USub { ref memarg } => {
|
Operator::I64AtomicRmw8SubU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -6868,7 +6868,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
||||||
state.push1_extra(old, ExtraInfo::arithmetic_f32());
|
state.push1_extra(old, ExtraInfo::arithmetic_f32());
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw16USub { ref memarg } => {
|
Operator::I64AtomicRmw16SubU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -6910,7 +6910,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
||||||
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw32USub { ref memarg } => {
|
Operator::I64AtomicRmw32SubU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -6991,7 +6991,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
);
|
);
|
||||||
state.push1(old);
|
state.push1(old);
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw8UAnd { ref memarg } => {
|
Operator::I32AtomicRmw8AndU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -7033,7 +7033,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name());
|
let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name());
|
||||||
state.push1_extra(old, ExtraInfo::arithmetic_f32());
|
state.push1_extra(old, ExtraInfo::arithmetic_f32());
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw16UAnd { ref memarg } => {
|
Operator::I32AtomicRmw16AndU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -7114,7 +7114,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
);
|
);
|
||||||
state.push1(old);
|
state.push1(old);
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw8UAnd { ref memarg } => {
|
Operator::I64AtomicRmw8AndU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -7156,7 +7156,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
||||||
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw16UAnd { ref memarg } => {
|
Operator::I64AtomicRmw16AndU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -7198,7 +7198,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
||||||
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw32UAnd { ref memarg } => {
|
Operator::I64AtomicRmw32AndU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -7279,7 +7279,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
);
|
);
|
||||||
state.push1(old);
|
state.push1(old);
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw8UOr { ref memarg } => {
|
Operator::I32AtomicRmw8OrU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -7321,7 +7321,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name());
|
let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name());
|
||||||
state.push1_extra(old, ExtraInfo::arithmetic_f32());
|
state.push1_extra(old, ExtraInfo::arithmetic_f32());
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw16UOr { ref memarg } => {
|
Operator::I32AtomicRmw16OrU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -7403,7 +7403,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name());
|
let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name());
|
||||||
state.push1_extra(old, ExtraInfo::arithmetic_f32());
|
state.push1_extra(old, ExtraInfo::arithmetic_f32());
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw8UOr { ref memarg } => {
|
Operator::I64AtomicRmw8OrU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -7445,7 +7445,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
||||||
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw16UOr { ref memarg } => {
|
Operator::I64AtomicRmw16OrU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -7487,7 +7487,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
||||||
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw32UOr { ref memarg } => {
|
Operator::I64AtomicRmw32OrU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -7568,7 +7568,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
);
|
);
|
||||||
state.push1(old);
|
state.push1(old);
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw8UXor { ref memarg } => {
|
Operator::I32AtomicRmw8XorU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -7610,7 +7610,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name());
|
let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name());
|
||||||
state.push1_extra(old, ExtraInfo::arithmetic_f32());
|
state.push1_extra(old, ExtraInfo::arithmetic_f32());
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw16UXor { ref memarg } => {
|
Operator::I32AtomicRmw16XorU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -7691,7 +7691,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
);
|
);
|
||||||
state.push1(old);
|
state.push1(old);
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw8UXor { ref memarg } => {
|
Operator::I64AtomicRmw8XorU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -7733,7 +7733,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
||||||
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw16UXor { ref memarg } => {
|
Operator::I64AtomicRmw16XorU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -7775,7 +7775,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
||||||
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw32UXor { ref memarg } => {
|
Operator::I64AtomicRmw32XorU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -7856,7 +7856,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
);
|
);
|
||||||
state.push1(old);
|
state.push1(old);
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw8UXchg { ref memarg } => {
|
Operator::I32AtomicRmw8XchgU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -7898,7 +7898,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name());
|
let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name());
|
||||||
state.push1_extra(old, ExtraInfo::arithmetic_f32());
|
state.push1_extra(old, ExtraInfo::arithmetic_f32());
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw16UXchg { ref memarg } => {
|
Operator::I32AtomicRmw16XchgU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -7979,7 +7979,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
);
|
);
|
||||||
state.push1(old);
|
state.push1(old);
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw8UXchg { ref memarg } => {
|
Operator::I64AtomicRmw8XchgU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -8021,7 +8021,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
||||||
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw16UXchg { ref memarg } => {
|
Operator::I64AtomicRmw16XchgU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -8063,7 +8063,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
||||||
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw32UXchg { ref memarg } => {
|
Operator::I64AtomicRmw32XchgU { ref memarg } => {
|
||||||
let value = state.pop1()?.into_int_value();
|
let value = state.pop1()?.into_int_value();
|
||||||
let effective_address = resolve_memory_ptr(
|
let effective_address = resolve_memory_ptr(
|
||||||
builder,
|
builder,
|
||||||
@ -8144,7 +8144,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
);
|
);
|
||||||
state.push1(old);
|
state.push1(old);
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw8UCmpxchg { ref memarg } => {
|
Operator::I32AtomicRmw8CmpxchgU { ref memarg } => {
|
||||||
let ((cmp, cmp_info), (new, new_info)) = state.pop2_extra()?;
|
let ((cmp, cmp_info), (new, new_info)) = state.pop2_extra()?;
|
||||||
let cmp = apply_pending_canonicalization(builder, intrinsics, cmp, cmp_info);
|
let cmp = apply_pending_canonicalization(builder, intrinsics, cmp, cmp_info);
|
||||||
let new = apply_pending_canonicalization(builder, intrinsics, new, new_info);
|
let new = apply_pending_canonicalization(builder, intrinsics, new, new_info);
|
||||||
@ -8196,7 +8196,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name());
|
let old = builder.build_int_z_extend(old, intrinsics.i32_ty, &state.var_name());
|
||||||
state.push1_extra(old, ExtraInfo::arithmetic_f32());
|
state.push1_extra(old, ExtraInfo::arithmetic_f32());
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw16UCmpxchg { ref memarg } => {
|
Operator::I32AtomicRmw16CmpxchgU { ref memarg } => {
|
||||||
let ((cmp, cmp_info), (new, new_info)) = state.pop2_extra()?;
|
let ((cmp, cmp_info), (new, new_info)) = state.pop2_extra()?;
|
||||||
let cmp = apply_pending_canonicalization(builder, intrinsics, cmp, cmp_info);
|
let cmp = apply_pending_canonicalization(builder, intrinsics, cmp, cmp_info);
|
||||||
let new = apply_pending_canonicalization(builder, intrinsics, new, new_info);
|
let new = apply_pending_canonicalization(builder, intrinsics, new, new_info);
|
||||||
@ -8292,7 +8292,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let old = builder.build_extract_value(old, 0, "").unwrap();
|
let old = builder.build_extract_value(old, 0, "").unwrap();
|
||||||
state.push1(old);
|
state.push1(old);
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw8UCmpxchg { ref memarg } => {
|
Operator::I64AtomicRmw8CmpxchgU { ref memarg } => {
|
||||||
let ((cmp, cmp_info), (new, new_info)) = state.pop2_extra()?;
|
let ((cmp, cmp_info), (new, new_info)) = state.pop2_extra()?;
|
||||||
let cmp = apply_pending_canonicalization(builder, intrinsics, cmp, cmp_info);
|
let cmp = apply_pending_canonicalization(builder, intrinsics, cmp, cmp_info);
|
||||||
let new = apply_pending_canonicalization(builder, intrinsics, new, new_info);
|
let new = apply_pending_canonicalization(builder, intrinsics, new, new_info);
|
||||||
@ -8344,7 +8344,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
||||||
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw16UCmpxchg { ref memarg } => {
|
Operator::I64AtomicRmw16CmpxchgU { ref memarg } => {
|
||||||
let ((cmp, cmp_info), (new, new_info)) = state.pop2_extra()?;
|
let ((cmp, cmp_info), (new, new_info)) = state.pop2_extra()?;
|
||||||
let cmp = apply_pending_canonicalization(builder, intrinsics, cmp, cmp_info);
|
let cmp = apply_pending_canonicalization(builder, intrinsics, cmp, cmp_info);
|
||||||
let new = apply_pending_canonicalization(builder, intrinsics, new, new_info);
|
let new = apply_pending_canonicalization(builder, intrinsics, new, new_info);
|
||||||
@ -8396,7 +8396,7 @@ impl<'ctx> FunctionCodeGenerator<CodegenError> for LLVMFunctionCodeGenerator<'ct
|
|||||||
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
let old = builder.build_int_z_extend(old, intrinsics.i64_ty, &state.var_name());
|
||||||
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
state.push1_extra(old, ExtraInfo::arithmetic_f64());
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw32UCmpxchg { ref memarg } => {
|
Operator::I64AtomicRmw32CmpxchgU { ref memarg } => {
|
||||||
let ((cmp, cmp_info), (new, new_info)) = state.pop2_extra()?;
|
let ((cmp, cmp_info), (new, new_info)) = state.pop2_extra()?;
|
||||||
let cmp = apply_pending_canonicalization(builder, intrinsics, cmp, cmp_info);
|
let cmp = apply_pending_canonicalization(builder, intrinsics, cmp, cmp_info);
|
||||||
let new = apply_pending_canonicalization(builder, intrinsics, new, new_info);
|
let new = apply_pending_canonicalization(builder, intrinsics, new, new_info);
|
||||||
|
@ -12,7 +12,7 @@ edition = "2018"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
nix = "0.15"
|
nix = "0.15"
|
||||||
page_size = "0.4"
|
page_size = "0.4"
|
||||||
wasmparser = "0.39.1"
|
wasmparser = "0.45.0"
|
||||||
parking_lot = "0.9"
|
parking_lot = "0.9"
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
errno = "0.2"
|
errno = "0.2"
|
||||||
|
@ -451,7 +451,7 @@ fn func_type_to_func_sig(func_ty: &FuncType) -> Result<FuncSig, BinaryReaderErro
|
|||||||
|
|
||||||
fn eval_init_expr(op: &Operator) -> Result<Initializer, BinaryReaderError> {
|
fn eval_init_expr(op: &Operator) -> Result<Initializer, BinaryReaderError> {
|
||||||
Ok(match *op {
|
Ok(match *op {
|
||||||
Operator::GetGlobal { global_index } => {
|
Operator::GlobalGet { global_index } => {
|
||||||
Initializer::GetGlobal(ImportedGlobalIndex::new(global_index as usize))
|
Initializer::GetGlobal(ImportedGlobalIndex::new(global_index as usize))
|
||||||
}
|
}
|
||||||
Operator::I32Const { value } => Initializer::Const(Value::I32(value)),
|
Operator::I32Const { value } => Initializer::Const(Value::I32(value)),
|
||||||
|
@ -2553,7 +2553,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
};
|
};
|
||||||
|
|
||||||
match *op {
|
match *op {
|
||||||
Operator::GetGlobal { global_index } => {
|
Operator::GlobalGet { global_index } => {
|
||||||
let global_index = global_index as usize;
|
let global_index = global_index as usize;
|
||||||
|
|
||||||
let tmp = self.machine.acquire_temp_gpr().unwrap();
|
let tmp = self.machine.acquire_temp_gpr().unwrap();
|
||||||
@ -2619,7 +2619,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
|
|
||||||
self.machine.release_temp_gpr(tmp);
|
self.machine.release_temp_gpr(tmp);
|
||||||
}
|
}
|
||||||
Operator::SetGlobal { global_index } => {
|
Operator::GlobalSet { global_index } => {
|
||||||
let mut global_index = global_index as usize;
|
let mut global_index = global_index as usize;
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
@ -2667,7 +2667,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
|
|
||||||
self.machine.release_temp_gpr(tmp);
|
self.machine.release_temp_gpr(tmp);
|
||||||
}
|
}
|
||||||
Operator::GetLocal { local_index } => {
|
Operator::LocalGet { local_index } => {
|
||||||
let local_index = local_index as usize;
|
let local_index = local_index as usize;
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
a,
|
a,
|
||||||
@ -2684,7 +2684,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
);
|
);
|
||||||
self.value_stack.push(ret);
|
self.value_stack.push(ret);
|
||||||
}
|
}
|
||||||
Operator::SetLocal { local_index } => {
|
Operator::LocalSet { local_index } => {
|
||||||
let local_index = local_index as usize;
|
let local_index = local_index as usize;
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
@ -2698,7 +2698,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
self.locals[local_index],
|
self.locals[local_index],
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
Operator::TeeLocal { local_index } => {
|
Operator::LocalTee { local_index } => {
|
||||||
let local_index = local_index as usize;
|
let local_index = local_index as usize;
|
||||||
let loc = *self.value_stack.last().unwrap();
|
let loc = *self.value_stack.last().unwrap();
|
||||||
|
|
||||||
@ -3515,7 +3515,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
&mut self.value_stack,
|
&mut self.value_stack,
|
||||||
Condition::GreaterEqual,
|
Condition::GreaterEqual,
|
||||||
)?,
|
)?,
|
||||||
Operator::I64ExtendUI32 => {
|
Operator::I64ExtendI32U => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -3533,7 +3533,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
ret,
|
ret,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
Operator::I64ExtendSI32 => {
|
Operator::I64ExtendI32S => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -4666,7 +4666,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Operator::I32TruncUF32 => {
|
Operator::I32TruncF32U => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -4725,7 +4725,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Operator::I32TruncUSatF32 => {
|
Operator::I32TruncSatF32U => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -4776,7 +4776,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
self.machine.release_temp_gpr(tmp_out);
|
self.machine.release_temp_gpr(tmp_out);
|
||||||
}
|
}
|
||||||
|
|
||||||
Operator::I32TruncSF32 => {
|
Operator::I32TruncF32S => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -4835,7 +4835,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
self.machine.release_temp_gpr(tmp_out);
|
self.machine.release_temp_gpr(tmp_out);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Operator::I32TruncSSatF32 => {
|
Operator::I32TruncSatF32S => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -4893,7 +4893,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
self.machine.release_temp_gpr(tmp_out);
|
self.machine.release_temp_gpr(tmp_out);
|
||||||
}
|
}
|
||||||
|
|
||||||
Operator::I64TruncSF32 => {
|
Operator::I64TruncF32S => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -4952,7 +4952,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Operator::I64TruncSSatF32 => {
|
Operator::I64TruncSatF32S => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -5010,7 +5010,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
self.machine.release_temp_gpr(tmp_out);
|
self.machine.release_temp_gpr(tmp_out);
|
||||||
}
|
}
|
||||||
|
|
||||||
Operator::I64TruncUF32 => {
|
Operator::I64TruncF32U => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -5093,7 +5093,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
self.machine.release_temp_gpr(tmp_out);
|
self.machine.release_temp_gpr(tmp_out);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Operator::I64TruncUSatF32 => {
|
Operator::I64TruncSatF32U => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -5170,7 +5170,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
self.machine.release_temp_gpr(tmp_out);
|
self.machine.release_temp_gpr(tmp_out);
|
||||||
}
|
}
|
||||||
|
|
||||||
Operator::I32TruncUF64 => {
|
Operator::I32TruncF64U => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -5230,7 +5230,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Operator::I32TruncUSatF64 => {
|
Operator::I32TruncSatF64U => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -5282,7 +5282,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
self.machine.release_temp_gpr(tmp_out);
|
self.machine.release_temp_gpr(tmp_out);
|
||||||
}
|
}
|
||||||
|
|
||||||
Operator::I32TruncSF64 => {
|
Operator::I32TruncF64S => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -5347,7 +5347,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Operator::I32TruncSSatF64 => {
|
Operator::I32TruncSatF64S => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -5410,7 +5410,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
self.machine.release_temp_gpr(tmp_out);
|
self.machine.release_temp_gpr(tmp_out);
|
||||||
}
|
}
|
||||||
|
|
||||||
Operator::I64TruncSF64 => {
|
Operator::I64TruncF64S => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -5470,7 +5470,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Operator::I64TruncSSatF64 => {
|
Operator::I64TruncSatF64S => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -5528,7 +5528,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
self.machine.release_temp_gpr(tmp_out);
|
self.machine.release_temp_gpr(tmp_out);
|
||||||
}
|
}
|
||||||
|
|
||||||
Operator::I64TruncUF64 => {
|
Operator::I64TruncF64U => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -5612,7 +5612,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Operator::I64TruncUSatF64 => {
|
Operator::I64TruncSatF64U => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -5689,7 +5689,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
self.machine.release_temp_gpr(tmp_out);
|
self.machine.release_temp_gpr(tmp_out);
|
||||||
}
|
}
|
||||||
|
|
||||||
Operator::F32ConvertSI32 => {
|
Operator::F32ConvertI32S => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -5733,7 +5733,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
self.machine.release_temp_xmm(tmp_out);
|
self.machine.release_temp_xmm(tmp_out);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Operator::F32ConvertUI32 => {
|
Operator::F32ConvertI32U => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -5776,7 +5776,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
self.machine.release_temp_xmm(tmp_out);
|
self.machine.release_temp_xmm(tmp_out);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Operator::F32ConvertSI64 => {
|
Operator::F32ConvertI64S => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -5819,7 +5819,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
self.machine.release_temp_xmm(tmp_out);
|
self.machine.release_temp_xmm(tmp_out);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Operator::F32ConvertUI64 => {
|
Operator::F32ConvertI64U => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -5879,7 +5879,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Operator::F64ConvertSI32 => {
|
Operator::F64ConvertI32S => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -5923,7 +5923,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
self.machine.release_temp_xmm(tmp_out);
|
self.machine.release_temp_xmm(tmp_out);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Operator::F64ConvertUI32 => {
|
Operator::F64ConvertI32U => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -5967,7 +5967,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
self.machine.release_temp_xmm(tmp_out);
|
self.machine.release_temp_xmm(tmp_out);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Operator::F64ConvertSI64 => {
|
Operator::F64ConvertI64S => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -6011,7 +6011,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
self.machine.release_temp_xmm(tmp_out);
|
self.machine.release_temp_xmm(tmp_out);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Operator::F64ConvertUI64 => {
|
Operator::F64ConvertI64U => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let ret = self.machine.acquire_locations(
|
let ret = self.machine.acquire_locations(
|
||||||
@ -7466,7 +7466,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Operator::Fence { flags: _ } => {
|
Operator::AtomicFence { flags: _ } => {
|
||||||
// Fence is a nop.
|
// Fence is a nop.
|
||||||
//
|
//
|
||||||
// Fence was added to preserve information about fences from
|
// Fence was added to preserve information about fences from
|
||||||
@ -7984,7 +7984,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
a.emit_mov(Size::S64, Location::GPR(value), ret);
|
a.emit_mov(Size::S64, Location::GPR(value), ret);
|
||||||
self.machine.release_temp_gpr(value);
|
self.machine.release_temp_gpr(value);
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw8UAdd { ref memarg } => {
|
Operator::I32AtomicRmw8AddU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8015,7 +8015,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
a.emit_mov(Size::S32, Location::GPR(value), ret);
|
a.emit_mov(Size::S32, Location::GPR(value), ret);
|
||||||
self.machine.release_temp_gpr(value);
|
self.machine.release_temp_gpr(value);
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw16UAdd { ref memarg } => {
|
Operator::I32AtomicRmw16AddU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8050,7 +8050,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
a.emit_mov(Size::S32, Location::GPR(value), ret);
|
a.emit_mov(Size::S32, Location::GPR(value), ret);
|
||||||
self.machine.release_temp_gpr(value);
|
self.machine.release_temp_gpr(value);
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw8UAdd { ref memarg } => {
|
Operator::I64AtomicRmw8AddU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8081,7 +8081,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
a.emit_mov(Size::S64, Location::GPR(value), ret);
|
a.emit_mov(Size::S64, Location::GPR(value), ret);
|
||||||
self.machine.release_temp_gpr(value);
|
self.machine.release_temp_gpr(value);
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw16UAdd { ref memarg } => {
|
Operator::I64AtomicRmw16AddU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8116,7 +8116,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
a.emit_mov(Size::S64, Location::GPR(value), ret);
|
a.emit_mov(Size::S64, Location::GPR(value), ret);
|
||||||
self.machine.release_temp_gpr(value);
|
self.machine.release_temp_gpr(value);
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw32UAdd { ref memarg } => {
|
Operator::I64AtomicRmw32AddU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8223,7 +8223,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
a.emit_mov(Size::S64, Location::GPR(value), ret);
|
a.emit_mov(Size::S64, Location::GPR(value), ret);
|
||||||
self.machine.release_temp_gpr(value);
|
self.machine.release_temp_gpr(value);
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw8USub { ref memarg } => {
|
Operator::I32AtomicRmw8SubU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8255,7 +8255,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
a.emit_mov(Size::S32, Location::GPR(value), ret);
|
a.emit_mov(Size::S32, Location::GPR(value), ret);
|
||||||
self.machine.release_temp_gpr(value);
|
self.machine.release_temp_gpr(value);
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw16USub { ref memarg } => {
|
Operator::I32AtomicRmw16SubU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8291,7 +8291,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
a.emit_mov(Size::S32, Location::GPR(value), ret);
|
a.emit_mov(Size::S32, Location::GPR(value), ret);
|
||||||
self.machine.release_temp_gpr(value);
|
self.machine.release_temp_gpr(value);
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw8USub { ref memarg } => {
|
Operator::I64AtomicRmw8SubU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8323,7 +8323,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
a.emit_mov(Size::S64, Location::GPR(value), ret);
|
a.emit_mov(Size::S64, Location::GPR(value), ret);
|
||||||
self.machine.release_temp_gpr(value);
|
self.machine.release_temp_gpr(value);
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw16USub { ref memarg } => {
|
Operator::I64AtomicRmw16SubU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8359,7 +8359,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
a.emit_mov(Size::S64, Location::GPR(value), ret);
|
a.emit_mov(Size::S64, Location::GPR(value), ret);
|
||||||
self.machine.release_temp_gpr(value);
|
self.machine.release_temp_gpr(value);
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw32USub { ref memarg } => {
|
Operator::I64AtomicRmw32SubU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8453,7 +8453,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw8UAnd { ref memarg } => {
|
Operator::I32AtomicRmw8AndU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8482,7 +8482,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw16UAnd { ref memarg } => {
|
Operator::I32AtomicRmw16AndU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8511,7 +8511,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw8UAnd { ref memarg } => {
|
Operator::I64AtomicRmw8AndU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8540,7 +8540,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw16UAnd { ref memarg } => {
|
Operator::I64AtomicRmw16AndU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8569,7 +8569,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw32UAnd { ref memarg } => {
|
Operator::I64AtomicRmw32AndU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8656,7 +8656,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw8UOr { ref memarg } => {
|
Operator::I32AtomicRmw8OrU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8685,7 +8685,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw16UOr { ref memarg } => {
|
Operator::I32AtomicRmw16OrU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8714,7 +8714,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw8UOr { ref memarg } => {
|
Operator::I64AtomicRmw8OrU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8743,7 +8743,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw16UOr { ref memarg } => {
|
Operator::I64AtomicRmw16OrU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8772,7 +8772,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw32UOr { ref memarg } => {
|
Operator::I64AtomicRmw32OrU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8859,7 +8859,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw8UXor { ref memarg } => {
|
Operator::I32AtomicRmw8XorU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8888,7 +8888,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw16UXor { ref memarg } => {
|
Operator::I32AtomicRmw16XorU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8917,7 +8917,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw8UXor { ref memarg } => {
|
Operator::I64AtomicRmw8XorU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8946,7 +8946,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw16UXor { ref memarg } => {
|
Operator::I64AtomicRmw16XorU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -8975,7 +8975,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw32UXor { ref memarg } => {
|
Operator::I64AtomicRmw32XorU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -9066,7 +9066,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
a.emit_mov(Size::S64, Location::GPR(value), ret);
|
a.emit_mov(Size::S64, Location::GPR(value), ret);
|
||||||
self.machine.release_temp_gpr(value);
|
self.machine.release_temp_gpr(value);
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw8UXchg { ref memarg } => {
|
Operator::I32AtomicRmw8XchgU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -9097,7 +9097,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
a.emit_mov(Size::S32, Location::GPR(value), ret);
|
a.emit_mov(Size::S32, Location::GPR(value), ret);
|
||||||
self.machine.release_temp_gpr(value);
|
self.machine.release_temp_gpr(value);
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw16UXchg { ref memarg } => {
|
Operator::I32AtomicRmw16XchgU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -9128,7 +9128,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
a.emit_mov(Size::S32, Location::GPR(value), ret);
|
a.emit_mov(Size::S32, Location::GPR(value), ret);
|
||||||
self.machine.release_temp_gpr(value);
|
self.machine.release_temp_gpr(value);
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw8UXchg { ref memarg } => {
|
Operator::I64AtomicRmw8XchgU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -9159,7 +9159,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
a.emit_mov(Size::S64, Location::GPR(value), ret);
|
a.emit_mov(Size::S64, Location::GPR(value), ret);
|
||||||
self.machine.release_temp_gpr(value);
|
self.machine.release_temp_gpr(value);
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw16UXchg { ref memarg } => {
|
Operator::I64AtomicRmw16XchgU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -9190,7 +9190,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
a.emit_mov(Size::S64, Location::GPR(value), ret);
|
a.emit_mov(Size::S64, Location::GPR(value), ret);
|
||||||
self.machine.release_temp_gpr(value);
|
self.machine.release_temp_gpr(value);
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw32UXchg { ref memarg } => {
|
Operator::I64AtomicRmw32XchgU { ref memarg } => {
|
||||||
let loc =
|
let loc =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let target =
|
let target =
|
||||||
@ -9321,7 +9321,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
a.emit_pop(Size::S64, Location::GPR(value));
|
a.emit_pop(Size::S64, Location::GPR(value));
|
||||||
self.machine.release_temp_gpr(compare);
|
self.machine.release_temp_gpr(compare);
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw8UCmpxchg { ref memarg } => {
|
Operator::I32AtomicRmw8CmpxchgU { ref memarg } => {
|
||||||
let new =
|
let new =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let cmp =
|
let cmp =
|
||||||
@ -9371,7 +9371,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
a.emit_pop(Size::S64, Location::GPR(value));
|
a.emit_pop(Size::S64, Location::GPR(value));
|
||||||
self.machine.release_temp_gpr(compare);
|
self.machine.release_temp_gpr(compare);
|
||||||
}
|
}
|
||||||
Operator::I32AtomicRmw16UCmpxchg { ref memarg } => {
|
Operator::I32AtomicRmw16CmpxchgU { ref memarg } => {
|
||||||
let new =
|
let new =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let cmp =
|
let cmp =
|
||||||
@ -9421,7 +9421,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
a.emit_pop(Size::S64, Location::GPR(value));
|
a.emit_pop(Size::S64, Location::GPR(value));
|
||||||
self.machine.release_temp_gpr(compare);
|
self.machine.release_temp_gpr(compare);
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw8UCmpxchg { ref memarg } => {
|
Operator::I64AtomicRmw8CmpxchgU { ref memarg } => {
|
||||||
let new =
|
let new =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let cmp =
|
let cmp =
|
||||||
@ -9471,7 +9471,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
a.emit_pop(Size::S64, Location::GPR(value));
|
a.emit_pop(Size::S64, Location::GPR(value));
|
||||||
self.machine.release_temp_gpr(compare);
|
self.machine.release_temp_gpr(compare);
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw16UCmpxchg { ref memarg } => {
|
Operator::I64AtomicRmw16CmpxchgU { ref memarg } => {
|
||||||
let new =
|
let new =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let cmp =
|
let cmp =
|
||||||
@ -9521,7 +9521,7 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
|
|||||||
a.emit_pop(Size::S64, Location::GPR(value));
|
a.emit_pop(Size::S64, Location::GPR(value));
|
||||||
self.machine.release_temp_gpr(compare);
|
self.machine.release_temp_gpr(compare);
|
||||||
}
|
}
|
||||||
Operator::I64AtomicRmw32UCmpxchg { ref memarg } => {
|
Operator::I64AtomicRmw32CmpxchgU { ref memarg } => {
|
||||||
let new =
|
let new =
|
||||||
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
|
||||||
let cmp =
|
let cmp =
|
||||||
|
Reference in New Issue
Block a user