Initial implementation of atomic load/store and i32 atomic rmw add.

This commit is contained in:
Nick Lewycky
2019-08-28 17:23:26 -07:00
parent 38078173d3
commit 98f35ef84a
2 changed files with 612 additions and 41 deletions

View File

@ -34,7 +34,7 @@ use wasmer_runtime_core::{
},
vm::{self, LocalGlobal, LocalTable, INTERNALS_SIZE},
};
use wasmparser::{Operator, Type as WpType, TypeOrFuncType as WpTypeOrFuncType};
use wasmparser::{MemoryImmediate, Operator, Type as WpType, TypeOrFuncType as WpTypeOrFuncType};
lazy_static! {
/// Performs a System V call to `target` with [stack_top..stack_base] as the argument list, from right to left.
@ -1465,7 +1465,8 @@ impl X64FunctionCode {
a: &mut Assembler,
m: &mut Machine,
addr: Location,
offset: usize,
memarg: &MemoryImmediate,
check_alignment: bool,
value_size: usize,
cb: F,
) {
@ -1487,7 +1488,6 @@ impl X64FunctionCode {
let tmp_addr = m.acquire_temp_gpr().unwrap();
let tmp_base = m.acquire_temp_gpr().unwrap();
let tmp_bound = m.acquire_temp_gpr().unwrap();
// Load base into temporary register.
a.emit_mov(
@ -1500,6 +1500,8 @@ impl X64FunctionCode {
);
if need_check {
let tmp_bound = m.acquire_temp_gpr().unwrap();
a.emit_mov(
Size::S64,
Location::Memory(
@ -1513,7 +1515,7 @@ impl X64FunctionCode {
a.emit_mov(Size::S32, addr, Location::GPR(tmp_addr));
// This branch is used for emitting "faster" code for the special case of (offset + value_size) not exceeding u32 range.
match (offset as u32).checked_add(value_size as u32) {
match (memarg.offset as u32).checked_add(value_size as u32) {
Some(0) => {}
Some(x) => {
a.emit_add(Size::S64, Location::Imm32(x), Location::GPR(tmp_addr));
@ -1521,7 +1523,7 @@ impl X64FunctionCode {
None => {
a.emit_add(
Size::S64,
Location::Imm32(offset as u32),
Location::Imm32(memarg.offset as u32),
Location::GPR(tmp_addr),
);
a.emit_add(
@ -1536,22 +1538,41 @@ impl X64FunctionCode {
a.emit_add(Size::S64, Location::GPR(tmp_base), Location::GPR(tmp_addr));
a.emit_cmp(Size::S64, Location::GPR(tmp_bound), Location::GPR(tmp_addr));
a.emit_conditional_trap(Condition::Above);
}
m.release_temp_gpr(tmp_bound);
m.release_temp_gpr(tmp_bound);
}
// Calculates the real address, and loads from it.
a.emit_mov(Size::S32, addr, Location::GPR(tmp_addr));
if offset != 0 {
if memarg.offset != 0 {
a.emit_add(
Size::S64,
Location::Imm32(offset as u32),
Location::Imm32(memarg.offset as u32),
Location::GPR(tmp_addr),
);
}
a.emit_add(Size::S64, Location::GPR(tmp_base), Location::GPR(tmp_addr));
m.release_temp_gpr(tmp_base);
let align = match memarg.flags & 3 {
0 => 1,
1 => 2,
2 => 4,
3 => 8,
_ => unreachable!("this match is fully covered"),
};
if check_alignment && align != 1 {
let tmp_aligncheck = m.acquire_temp_gpr().unwrap();
//let tmp_mask = m.acquire_temp_gpr().unwrap();
a.emit_mov(Size::S32, Location::GPR(tmp_addr), Location::GPR(tmp_aligncheck));
//a.emit_mov(Size::S64, Location::Imm64(align - 1), Location::GPR(tmp_mask));
//a.emit_and(Size::S64, Location::GPR(tmp_mask), Location::GPR(tmp_aligncheck));
a.emit_and(Size::S64, Location::Imm32(align - 1), Location::GPR(tmp_aligncheck));
a.emit_conditional_trap(Condition::NotEqual);
//m.release_temp_gpr(tmp_mask);
m.release_temp_gpr(tmp_aligncheck);
}
cb(a, m, tmp_addr);
m.release_temp_gpr(tmp_addr);
@ -4147,7 +4168,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a,
&mut self.machine,
target,
memarg.offset as usize,
memarg,
false,
4,
|a, m, addr| {
Self::emit_relaxed_binop(
@ -4177,7 +4199,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a,
&mut self.machine,
target,
memarg.offset as usize,
memarg,
false,
4,
|a, m, addr| {
Self::emit_relaxed_binop(
@ -4207,7 +4230,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a,
&mut self.machine,
target,
memarg.offset as usize,
memarg,
false,
1,
|a, m, addr| {
Self::emit_relaxed_zx_sx(
@ -4238,7 +4262,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a,
&mut self.machine,
target,
memarg.offset as usize,
memarg,
false,
1,
|a, m, addr| {
Self::emit_relaxed_zx_sx(
@ -4269,7 +4294,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a,
&mut self.machine,
target,
memarg.offset as usize,
memarg,
false,
2,
|a, m, addr| {
Self::emit_relaxed_zx_sx(
@ -4300,7 +4326,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a,
&mut self.machine,
target,
memarg.offset as usize,
memarg,
false,
2,
|a, m, addr| {
Self::emit_relaxed_zx_sx(
@ -4327,7 +4354,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a,
&mut self.machine,
target_addr,
memarg.offset as usize,
memarg,
false,
4,
|a, m, addr| {
Self::emit_relaxed_binop(
@ -4353,7 +4381,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a,
&mut self.machine,
target_addr,
memarg.offset as usize,
memarg,
false,
4,
|a, m, addr| {
Self::emit_relaxed_binop(
@ -4379,7 +4408,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a,
&mut self.machine,
target_addr,
memarg.offset as usize,
memarg,
false,
1,
|a, m, addr| {
Self::emit_relaxed_binop(
@ -4405,7 +4435,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a,
&mut self.machine,
target_addr,
memarg.offset as usize,
memarg,
false,
2,
|a, m, addr| {
Self::emit_relaxed_binop(
@ -4435,7 +4466,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a,
&mut self.machine,
target,
memarg.offset as usize,
memarg,
false,
8,
|a, m, addr| {
Self::emit_relaxed_binop(
@ -4465,7 +4497,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a,
&mut self.machine,
target,
memarg.offset as usize,
memarg,
false,
8,
|a, m, addr| {
Self::emit_relaxed_binop(
@ -4495,7 +4528,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a,
&mut self.machine,
target,
memarg.offset as usize,
memarg,
false,
1,
|a, m, addr| {
Self::emit_relaxed_zx_sx(
@ -4526,7 +4560,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a,
&mut self.machine,
target,
memarg.offset as usize,
memarg,
false,
1,
|a, m, addr| {
Self::emit_relaxed_zx_sx(
@ -4557,7 +4592,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a,
&mut self.machine,
target,
memarg.offset as usize,
memarg,
false,
2,
|a, m, addr| {
Self::emit_relaxed_zx_sx(
@ -4588,7 +4624,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a,
&mut self.machine,
target,
memarg.offset as usize,
memarg,
false,
2,
|a, m, addr| {
Self::emit_relaxed_zx_sx(
@ -4619,7 +4656,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a,
&mut self.machine,
target,
memarg.offset as usize,
memarg,
false,
4,
|a, m, addr| {
match ret {
@ -4660,7 +4698,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a,
&mut self.machine,
target,
memarg.offset as usize,
memarg,
false,
4,
|a, m, addr| {
Self::emit_relaxed_zx_sx(
@ -4687,7 +4726,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a,
&mut self.machine,
target_addr,
memarg.offset as usize,
memarg,
false,
8,
|a, m, addr| {
Self::emit_relaxed_binop(
@ -4713,7 +4753,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a,
&mut self.machine,
target_addr,
memarg.offset as usize,
memarg,
false,
8,
|a, m, addr| {
Self::emit_relaxed_binop(
@ -4739,7 +4780,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a,
&mut self.machine,
target_addr,
memarg.offset as usize,
memarg,
false,
1,
|a, m, addr| {
Self::emit_relaxed_binop(
@ -4765,7 +4807,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a,
&mut self.machine,
target_addr,
memarg.offset as usize,
memarg,
false,
2,
|a, m, addr| {
Self::emit_relaxed_binop(
@ -4791,7 +4834,8 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
a,
&mut self.machine,
target_addr,
memarg.offset as usize,
memarg,
false,
4,
|a, m, addr| {
Self::emit_relaxed_binop(
@ -4980,6 +5024,471 @@ impl FunctionCodeGenerator<CodegenError> for X64FunctionCode {
}
}
}
Operator::Fence { flags: _ } => {
// Fence is a nop.
//
// Fence was added to preserve information about fences from
// source languages. If in the future Wasm extends the memory
// model, and if we hadn't recorded what fences used to be there,
// it would lead to data races that weren't present in the
// original source language.
}
Operator::I32AtomicLoad { ref memarg } => {
let target =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let ret = self.machine.acquire_locations(
a,
&[(WpType::I32, MachineValue::WasmStack(self.value_stack.len()))],
false,
)[0];
self.value_stack.push(ret);
Self::emit_memory_op(
module_info,
&self.config,
a,
&mut self.machine,
target,
memarg,
true,
4,
|a, m, addr| {
Self::emit_relaxed_binop(
a,
m,
Assembler::emit_mov,
Size::S32,
Location::Memory(addr, 0),
ret,
);
},
);
}
Operator::I32AtomicLoad8U { ref memarg } => {
let target =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let ret = self.machine.acquire_locations(
a,
&[(WpType::I32, MachineValue::WasmStack(self.value_stack.len()))],
false,
)[0];
self.value_stack.push(ret);
Self::emit_memory_op(
module_info,
&self.config,
a,
&mut self.machine,
target,
memarg,
true,
1,
|a, m, addr| {
Self::emit_relaxed_zx_sx(
a,
m,
Assembler::emit_movzx,
Size::S8,
Location::Memory(addr, 0),
Size::S32,
ret,
);
},
);
}
Operator::I32AtomicLoad16U { ref memarg } => {
let target =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let ret = self.machine.acquire_locations(
a,
&[(WpType::I32, MachineValue::WasmStack(self.value_stack.len()))],
false,
)[0];
self.value_stack.push(ret);
Self::emit_memory_op(
module_info,
&self.config,
a,
&mut self.machine,
target,
memarg,
true,
2,
|a, m, addr| {
Self::emit_relaxed_zx_sx(
a,
m,
Assembler::emit_movzx,
Size::S16,
Location::Memory(addr, 0),
Size::S32,
ret,
);
},
);
}
Operator::I32AtomicStore { ref memarg } => {
let target_value =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let target_addr =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
Self::emit_memory_op(
module_info,
&self.config,
a,
&mut self.machine,
target_addr,
memarg,
true,
4,
|a, m, addr| {
Self::emit_relaxed_binop(
a,
m,
Assembler::emit_xchg,
Size::S32,
target_value,
Location::Memory(addr, 0),
);
},
);
}
Operator::I32AtomicStore8 { ref memarg } => {
let target_value =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let target_addr =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
Self::emit_memory_op(
module_info,
&self.config,
a,
&mut self.machine,
target_addr,
memarg,
true,
1,
|a, m, addr| {
Self::emit_relaxed_binop(
a,
m,
Assembler::emit_xchg,
Size::S8,
target_value,
Location::Memory(addr, 0),
);
},
);
}
Operator::I32AtomicStore16 { ref memarg } => {
let target_value =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let target_addr =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
Self::emit_memory_op(
module_info,
&self.config,
a,
&mut self.machine,
target_addr,
memarg,
true,
2,
|a, m, addr| {
Self::emit_relaxed_binop(
a,
m,
Assembler::emit_xchg,
Size::S16,
target_value,
Location::Memory(addr, 0),
);
},
);
}
Operator::I64AtomicLoad { ref memarg } => {
let target =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let ret = self.machine.acquire_locations(
a,
&[(WpType::I64, MachineValue::WasmStack(self.value_stack.len()))],
false,
)[0];
self.value_stack.push(ret);
Self::emit_memory_op(
module_info,
&self.config,
a,
&mut self.machine,
target,
memarg,
true,
8,
|a, m, addr| {
Self::emit_relaxed_binop(
a,
m,
Assembler::emit_mov,
Size::S64,
Location::Memory(addr, 0),
ret,
);
},
);
}
Operator::I64AtomicLoad8U { ref memarg } => {
let target =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let ret = self.machine.acquire_locations(
a,
&[(WpType::I64, MachineValue::WasmStack(self.value_stack.len()))],
false,
)[0];
self.value_stack.push(ret);
Self::emit_memory_op(
module_info,
&self.config,
a,
&mut self.machine,
target,
memarg,
true,
1,
|a, m, addr| {
Self::emit_relaxed_zx_sx(
a,
m,
Assembler::emit_movzx,
Size::S8,
Location::Memory(addr, 0),
Size::S64,
ret,
);
},
);
}
Operator::I64AtomicLoad16U { ref memarg } => {
let target =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let ret = self.machine.acquire_locations(
a,
&[(WpType::I64, MachineValue::WasmStack(self.value_stack.len()))],
false,
)[0];
self.value_stack.push(ret);
Self::emit_memory_op(
module_info,
&self.config,
a,
&mut self.machine,
target,
memarg,
true,
2,
|a, m, addr| {
Self::emit_relaxed_zx_sx(
a,
m,
Assembler::emit_movzx,
Size::S16,
Location::Memory(addr, 0),
Size::S64,
ret,
);
},
);
}
Operator::I64AtomicLoad32U { ref memarg } => {
let target =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let ret = self.machine.acquire_locations(
a,
&[(WpType::I64, MachineValue::WasmStack(self.value_stack.len()))],
false,
)[0];
self.value_stack.push(ret);
Self::emit_memory_op(
module_info,
&self.config,
a,
&mut self.machine,
target,
memarg,
true,
4,
|a, m, addr| {
match ret {
Location::GPR(_) => {}
Location::Memory(base, offset) => {
a.emit_mov(
Size::S32,
Location::Imm32(0),
Location::Memory(base, offset + 4),
); // clear upper bits
}
_ => unreachable!(),
}
Self::emit_relaxed_binop(
a,
m,
Assembler::emit_mov,
Size::S32,
Location::Memory(addr, 0),
ret,
);
},
);
}
Operator::I64AtomicStore { ref memarg } => {
let target_value =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let target_addr =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
Self::emit_memory_op(
module_info,
&self.config,
a,
&mut self.machine,
target_addr,
memarg,
true,
8,
|a, m, addr| {
Self::emit_relaxed_binop(
a,
m,
Assembler::emit_xchg,
Size::S64,
target_value,
Location::Memory(addr, 0),
);
},
);
}
Operator::I64AtomicStore8 { ref memarg } => {
let target_value =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let target_addr =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
Self::emit_memory_op(
module_info,
&self.config,
a,
&mut self.machine,
target_addr,
memarg,
true,
1,
|a, m, addr| {
Self::emit_relaxed_binop(
a,
m,
Assembler::emit_xchg,
Size::S8,
target_value,
Location::Memory(addr, 0),
);
},
);
}
Operator::I64AtomicStore16 { ref memarg } => {
let target_value =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let target_addr =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
Self::emit_memory_op(
module_info,
&self.config,
a,
&mut self.machine,
target_addr,
memarg,
true,
2,
|a, m, addr| {
Self::emit_relaxed_binop(
a,
m,
Assembler::emit_xchg,
Size::S16,
target_value,
Location::Memory(addr, 0),
);
},
);
}
Operator::I64AtomicStore32 { ref memarg } => {
let target_value =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let target_addr =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
Self::emit_memory_op(
module_info,
&self.config,
a,
&mut self.machine,
target_addr,
memarg,
true,
4,
|a, m, addr| {
Self::emit_relaxed_binop(
a,
m,
Assembler::emit_xchg,
Size::S32,
target_value,
Location::Memory(addr, 0),
);
},
);
}
Operator::I32AtomicRmwAdd { ref memarg } => {
let loc = get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let target =
get_location_released(a, &mut self.machine, self.value_stack.pop().unwrap());
let ret = self.machine.acquire_locations(
a,
&[(WpType::I32, MachineValue::WasmStack(self.value_stack.len()))],
false,
)[0];
self.value_stack.push(ret);
let value = self.machine.acquire_temp_gpr().unwrap();
a.emit_mov(
Size::S32,
loc,
Location::GPR(value));
Self::emit_memory_op(
module_info,
&self.config,
a,
&mut self.machine,
target,
memarg,
true,
4,
|a, _m, addr| {
a.emit_lock_xadd(Size::S32, Location::GPR(value), Location::Memory(addr, 0))
}
);
a.emit_mov(
Size::S32,
Location::GPR(value),
ret);
self.machine.release_temp_gpr(value);
}
_ => {
return Err(CodegenError {
message: format!("not yet implemented: {:?}", op),

View File

@ -94,6 +94,8 @@ pub trait Emitter {
fn emit_popcnt(&mut self, sz: Size, src: Location, dst: Location);
fn emit_movzx(&mut self, sz_src: Size, src: Location, sz_dst: Size, dst: Location);
fn emit_movsx(&mut self, sz_src: Size, src: Location, sz_dst: Size, dst: Location);
fn emit_xchg(&mut self, sz: Size, src: Location, dst: Location);
fn emit_lock_xadd(&mut self, sz: Size, src: Location, dst: Location);
fn emit_btc_gpr_imm8_32(&mut self, src: u8, dst: GPR);
fn emit_btc_gpr_imm8_64(&mut self, src: u8, dst: GPR);
@ -562,7 +564,7 @@ impl Emitter for Assembler {
(Size::S64, Location::Memory(src, disp), Location::GPR(dst)) => {
dynasm!(self ; lea Rq(dst as u8), [Rq(src as u8) + disp]);
}
_ => unreachable!(),
_ => panic!("LEA {:?} {:?} {:?}", sz, src, dst),
}
}
fn emit_lea_label(&mut self, label: Self::Label, dst: Location) {
@ -570,7 +572,7 @@ impl Emitter for Assembler {
Location::GPR(x) => {
dynasm!(self ; lea Rq(x as u8), [=>label]);
}
_ => unreachable!(),
_ => panic!("LEA label={:?} {:?}", label, dst),
}
}
fn emit_cdq(&mut self) {
@ -602,7 +604,7 @@ impl Emitter for Assembler {
match loc {
Location::GPR(x) => dynasm!(self ; jmp Rq(x as u8)),
Location::Memory(base, disp) => dynasm!(self ; jmp QWORD [Rq(base as u8) + disp]),
_ => unreachable!(),
_ => panic!("JMP {:?}", loc),
}
}
fn emit_conditional_trap(&mut self, condition: Condition) {
@ -634,7 +636,7 @@ impl Emitter for Assembler {
Condition::Equal => dynasm!(self ; sete Rb(dst as u8)),
Condition::NotEqual => dynasm!(self ; setne Rb(dst as u8)),
Condition::Signed => dynasm!(self ; sets Rb(dst as u8)),
_ => unreachable!(),
_ => panic!("SET {:?} {:?}", condition, dst),
}
}
fn emit_push(&mut self, sz: Size, src: Location) {
@ -644,7 +646,7 @@ impl Emitter for Assembler {
(Size::S64, Location::Memory(src, disp)) => {
dynasm!(self ; push QWORD [Rq(src as u8) + disp])
}
_ => panic!("push {:?} {:?}", sz, src),
_ => panic!("PUSH {:?} {:?}", sz, src),
}
}
fn emit_pop(&mut self, sz: Size, dst: Location) {
@ -653,12 +655,12 @@ impl Emitter for Assembler {
(Size::S64, Location::Memory(dst, disp)) => {
dynasm!(self ; pop QWORD [Rq(dst as u8) + disp])
}
_ => panic!("pop {:?} {:?}", sz, dst),
_ => panic!("POP {:?} {:?}", sz, dst),
}
}
fn emit_cmp(&mut self, sz: Size, left: Location, right: Location) {
binop_all_nofp!(cmp, self, sz, left, right, {
panic!("{:?} {:?} {:?}", sz, left, right);
panic!("CMP {:?} {:?} {:?}", sz, left, right);
});
}
fn emit_add(&mut self, sz: Size, src: Location, dst: Location) {
@ -743,7 +745,7 @@ impl Emitter for Assembler {
(Size::S16, Location::Memory(src, disp), Size::S64, Location::GPR(dst)) => {
dynasm!(self ; movzx Rq(dst as u8), WORD [Rq(src as u8) + disp]);
}
_ => unreachable!(),
_ => panic!("MOVZX {:?} {:?} {:?} {:?}", sz_src, src, sz_dst, dst),
}
}
fn emit_movsx(&mut self, sz_src: Size, src: Location, sz_dst: Size, dst: Location) {
@ -778,7 +780,67 @@ impl Emitter for Assembler {
(Size::S32, Location::Memory(src, disp), Size::S64, Location::GPR(dst)) => {
dynasm!(self ; movsx Rq(dst as u8), DWORD [Rq(src as u8) + disp]);
}
_ => unreachable!(),
_ => panic!("MOVSX {:?} {:?} {:?} {:?}", sz_src, src, sz_dst, dst),
}
}
fn emit_xchg(&mut self, sz: Size, src: Location, dst: Location) {
match (sz, src, dst) {
(Size::S8, Location::GPR(src), Location::GPR(dst)) => {
dynasm!(self ; xchg Rb(dst as u8), Rb(src as u8));
}
(Size::S16, Location::GPR(src), Location::GPR(dst)) => {
dynasm!(self ; xchg Rw(dst as u8), Rw(src as u8));
}
(Size::S32, Location::GPR(src), Location::GPR(dst)) => {
dynasm!(self ; xchg Rd(dst as u8), Rd(src as u8));
}
(Size::S64, Location::GPR(src), Location::GPR(dst)) => {
dynasm!(self ; xchg Rq(dst as u8), Rq(src as u8));
}
(Size::S8, Location::Memory(src, disp), Location::GPR(dst)) => {
dynasm!(self ; xchg Rb(dst as u8), [Rq(src as u8) + disp]);
}
(Size::S8, Location::GPR(src), Location::Memory(dst, disp)) => {
dynasm!(self ; xchg [Rq(dst as u8) + disp], Rb(src as u8));
}
(Size::S16, Location::Memory(src, disp), Location::GPR(dst)) => {
dynasm!(self ; xchg Rw(dst as u8), [Rq(src as u8) + disp]);
}
(Size::S16, Location::GPR(src), Location::Memory(dst, disp)) => {
dynasm!(self ; xchg [Rq(dst as u8) + disp], Rw(src as u8));
}
(Size::S32, Location::Memory(src, disp), Location::GPR(dst)) => {
dynasm!(self ; xchg Rd(dst as u8), [Rq(src as u8) + disp]);
}
(Size::S32, Location::GPR(src), Location::Memory(dst, disp)) => {
dynasm!(self ; xchg [Rq(dst as u8) + disp], Rd(src as u8));
}
(Size::S64, Location::Memory(src, disp), Location::GPR(dst)) => {
dynasm!(self ; xchg Rq(dst as u8), [Rq(src as u8) + disp]);
}
(Size::S64, Location::GPR(src), Location::Memory(dst, disp)) => {
dynasm!(self ; xchg [Rq(dst as u8) + disp], Rq(src as u8));
}
_ => panic!("XCHG {:?} {:?} {:?}", sz, src, dst),
}
}
fn emit_lock_xadd(&mut self, sz: Size, src: Location, dst: Location) {
match (sz, src, dst) {
(Size::S8, Location::GPR(src), Location::Memory(dst, disp)) => {
dynasm!(self ; lock xadd [Rq(dst as u8) + disp], Rb(src as u8));
}
(Size::S16, Location::GPR(src), Location::Memory(dst, disp)) => {
dynasm!(self ; lock xadd [Rq(dst as u8) + disp], Rw(src as u8));
}
(Size::S32, Location::GPR(src), Location::Memory(dst, disp)) => {
dynasm!(self ; lock xadd [Rq(dst as u8) + disp], Rd(src as u8));
}
(Size::S64, Location::GPR(src), Location::Memory(dst, disp)) => {
dynasm!(self ; lock xadd [Rq(dst as u8) + disp], Rq(src as u8));
}
_ => panic!("LOCK XADD {:?} {:?} {:?}", sz, src, dst),
}
}