Renamed dynasm backend to singlepass

This commit is contained in:
Syrus
2019-04-11 12:44:03 -07:00
parent 1f06e90729
commit d67bfdb2c5
24 changed files with 100 additions and 76 deletions

View File

@ -0,0 +1,33 @@
use wasmer_runtime_core::{
backend::{FuncResolver, ProtectedCaller},
module::ModuleInfo,
structures::Map,
types::{FuncIndex, FuncSig, SigIndex},
};
use wasmparser::{Operator, Type as WpType};
pub trait ModuleCodeGenerator<FCG: FunctionCodeGenerator, PC: ProtectedCaller, FR: FuncResolver> {
fn check_precondition(&mut self, module_info: &ModuleInfo) -> Result<(), CodegenError>;
fn next_function(&mut self) -> Result<&mut FCG, CodegenError>;
fn finalize(self, module_info: &ModuleInfo) -> Result<(PC, FR), CodegenError>;
fn feed_signatures(&mut self, signatures: Map<SigIndex, FuncSig>) -> Result<(), CodegenError>;
fn feed_function_signatures(
&mut self,
assoc: Map<FuncIndex, SigIndex>,
) -> Result<(), CodegenError>;
fn feed_import_function(&mut self) -> Result<(), CodegenError>;
}
pub trait FunctionCodeGenerator {
fn feed_return(&mut self, ty: WpType) -> Result<(), CodegenError>;
fn feed_param(&mut self, ty: WpType) -> Result<(), CodegenError>;
fn feed_local(&mut self, ty: WpType, n: usize) -> Result<(), CodegenError>;
fn begin_body(&mut self) -> Result<(), CodegenError>;
fn feed_opcode(&mut self, op: Operator, module_info: &ModuleInfo) -> Result<(), CodegenError>;
fn finalize(&mut self) -> Result<(), CodegenError>;
}
#[derive(Debug)]
pub struct CodegenError {
pub message: &'static str,
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,926 @@
use dynasmrt::{x64::Assembler, AssemblyOffset, DynamicLabel, DynasmApi, DynasmLabelApi};
#[repr(u8)]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub enum GPR {
RAX,
RCX,
RDX,
RBX,
RSP,
RBP,
RSI,
RDI,
R8,
R9,
R10,
R11,
R12,
R13,
R14,
R15,
}
#[repr(u8)]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub enum XMM {
XMM0,
XMM1,
XMM2,
XMM3,
XMM4,
XMM5,
XMM6,
XMM7,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub enum Location {
Imm8(u8),
Imm32(u32),
Imm64(u64),
GPR(GPR),
XMM(XMM),
Memory(GPR, i32),
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum Condition {
None,
Above,
AboveEqual,
Below,
BelowEqual,
Greater,
GreaterEqual,
Less,
LessEqual,
Equal,
NotEqual,
Signed,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum Size {
S8,
S16,
S32,
S64,
}
#[derive(Copy, Clone, Debug)]
#[allow(dead_code)]
pub enum XMMOrMemory {
XMM(XMM),
Memory(GPR, i32),
}
#[derive(Copy, Clone, Debug)]
#[allow(dead_code)]
pub enum GPROrMemory {
GPR(GPR),
Memory(GPR, i32),
}
pub trait Emitter {
type Label;
type Offset;
fn get_label(&mut self) -> Self::Label;
fn get_offset(&mut self) -> Self::Offset;
fn emit_label(&mut self, label: Self::Label);
fn emit_mov(&mut self, sz: Size, src: Location, dst: Location);
fn emit_lea(&mut self, sz: Size, src: Location, dst: Location);
fn emit_lea_label(&mut self, label: Self::Label, dst: Location);
fn emit_cdq(&mut self);
fn emit_cqo(&mut self);
fn emit_xor(&mut self, sz: Size, src: Location, dst: Location);
fn emit_jmp(&mut self, condition: Condition, label: Self::Label);
fn emit_jmp_location(&mut self, loc: Location);
fn emit_conditional_trap(&mut self, condition: Condition);
fn emit_set(&mut self, condition: Condition, dst: GPR);
fn emit_push(&mut self, sz: Size, src: Location);
fn emit_pop(&mut self, sz: Size, dst: Location);
fn emit_cmp(&mut self, sz: Size, left: Location, right: Location);
fn emit_add(&mut self, sz: Size, src: Location, dst: Location);
fn emit_sub(&mut self, sz: Size, src: Location, dst: Location);
fn emit_imul(&mut self, sz: Size, src: Location, dst: Location);
fn emit_imul_imm32_gpr64(&mut self, src: u32, dst: GPR);
fn emit_div(&mut self, sz: Size, divisor: Location);
fn emit_idiv(&mut self, sz: Size, divisor: Location);
fn emit_shl(&mut self, sz: Size, src: Location, dst: Location);
fn emit_shr(&mut self, sz: Size, src: Location, dst: Location);
fn emit_sar(&mut self, sz: Size, src: Location, dst: Location);
fn emit_rol(&mut self, sz: Size, src: Location, dst: Location);
fn emit_ror(&mut self, sz: Size, src: Location, dst: Location);
fn emit_and(&mut self, sz: Size, src: Location, dst: Location);
fn emit_or(&mut self, sz: Size, src: Location, dst: Location);
fn emit_lzcnt(&mut self, sz: Size, src: Location, dst: Location);
fn emit_tzcnt(&mut self, sz: Size, src: Location, dst: Location);
fn emit_popcnt(&mut self, sz: Size, src: Location, dst: Location);
fn emit_movzx(&mut self, sz_src: Size, src: Location, sz_dst: Size, dst: Location);
fn emit_movsx(&mut self, sz_src: Size, src: Location, sz_dst: Size, dst: Location);
fn emit_btc_gpr_imm8_32(&mut self, src: u8, dst: GPR);
fn emit_btc_gpr_imm8_64(&mut self, src: u8, dst: GPR);
fn emit_cmovae_gpr_32(&mut self, src: GPR, dst: GPR);
fn emit_cmovae_gpr_64(&mut self, src: GPR, dst: GPR);
fn emit_vaddss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vaddsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vsubss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vsubsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vmulss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vmulsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vdivss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vdivsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vmaxss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vmaxsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vminss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vminsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vcmpeqss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vcmpeqsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vcmpneqss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vcmpneqsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vcmpltss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vcmpltsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vcmpless(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vcmplesd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vcmpgtss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vcmpgtsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vcmpgess(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vcmpgesd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vsqrtss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vsqrtsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vroundss_nearest(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vroundss_floor(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vroundss_ceil(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vroundss_trunc(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vroundsd_nearest(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vroundsd_floor(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vroundsd_ceil(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vroundsd_trunc(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vcvtss2sd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_vcvtsd2ss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM);
fn emit_ucomiss(&mut self, src: XMMOrMemory, dst: XMM);
fn emit_ucomisd(&mut self, src: XMMOrMemory, dst: XMM);
fn emit_cvttss2si_32(&mut self, src: XMMOrMemory, dst: GPR);
fn emit_cvttss2si_64(&mut self, src: XMMOrMemory, dst: GPR);
fn emit_cvttsd2si_32(&mut self, src: XMMOrMemory, dst: GPR);
fn emit_cvttsd2si_64(&mut self, src: XMMOrMemory, dst: GPR);
fn emit_vcvtsi2ss_32(&mut self, src1: XMM, src2: GPROrMemory, dst: XMM);
fn emit_vcvtsi2ss_64(&mut self, src1: XMM, src2: GPROrMemory, dst: XMM);
fn emit_vcvtsi2sd_32(&mut self, src1: XMM, src2: GPROrMemory, dst: XMM);
fn emit_vcvtsi2sd_64(&mut self, src1: XMM, src2: GPROrMemory, dst: XMM);
fn emit_test_gpr_64(&mut self, reg: GPR);
fn emit_ud2(&mut self);
fn emit_ret(&mut self);
fn emit_call_label(&mut self, label: Self::Label);
fn emit_call_location(&mut self, loc: Location);
}
macro_rules! unop_gpr {
($ins:ident, $assembler:tt, $sz:expr, $loc:expr, $otherwise:block) => {
match ($sz, $loc) {
(Size::S32, Location::GPR(loc)) => {
dynasm!($assembler ; $ins Rd(loc as u8));
},
(Size::S64, Location::GPR(loc)) => {
dynasm!($assembler ; $ins Rq(loc as u8));
},
_ => $otherwise
}
};
}
macro_rules! unop_mem {
($ins:ident, $assembler:tt, $sz:expr, $loc:expr, $otherwise:block) => {
match ($sz, $loc) {
(Size::S32, Location::Memory(loc, disp)) => {
dynasm!($assembler ; $ins DWORD [Rq(loc as u8) + disp] );
},
(Size::S64, Location::Memory(loc, disp)) => {
dynasm!($assembler ; $ins QWORD [Rq(loc as u8) + disp] );
},
_ => $otherwise
}
};
}
macro_rules! unop_gpr_or_mem {
($ins:ident, $assembler:tt, $sz:expr, $loc:expr, $otherwise:block) => {
unop_gpr!($ins, $assembler, $sz, $loc, {
unop_mem!($ins, $assembler, $sz, $loc, $otherwise)
})
};
}
macro_rules! binop_imm32_gpr {
($ins:ident, $assembler:tt, $sz:expr, $src:expr, $dst:expr, $otherwise:block) => {
match ($sz, $src, $dst) {
(Size::S32, Location::Imm32(src), Location::GPR(dst)) => {
dynasm!($assembler ; $ins Rd(dst as u8), src as i32); // IMM32_2GPR
},
(Size::S64, Location::Imm32(src), Location::GPR(dst)) => {
dynasm!($assembler ; $ins Rq(dst as u8), src as i32); // IMM32_2GPR
},
_ => $otherwise
}
};
}
macro_rules! binop_imm32_mem {
($ins:ident, $assembler:tt, $sz:expr, $src:expr, $dst:expr, $otherwise:block) => {
match ($sz, $src, $dst) {
(Size::S32, Location::Imm32(src), Location::Memory(dst, disp)) => {
dynasm!($assembler ; $ins DWORD [Rq(dst as u8) + disp], src as i32);
},
(Size::S64, Location::Imm32(src), Location::Memory(dst, disp)) => {
dynasm!($assembler ; $ins QWORD [Rq(dst as u8) + disp], src as i32);
},
_ => $otherwise
}
};
}
macro_rules! binop_imm64_gpr {
($ins:ident, $assembler:tt, $sz:expr, $src:expr, $dst:expr, $otherwise:block) => {
match ($sz, $src, $dst) {
(Size::S64, Location::Imm64(src), Location::GPR(dst)) => {
dynasm!($assembler ; $ins Rq(dst as u8), QWORD src as i64); // IMM32_2GPR
},
_ => $otherwise
}
};
}
macro_rules! binop_gpr_gpr {
($ins:ident, $assembler:tt, $sz:expr, $src:expr, $dst:expr, $otherwise:block) => {
match ($sz, $src, $dst) {
(Size::S32, Location::GPR(src), Location::GPR(dst)) => {
dynasm!($assembler ; $ins Rd(dst as u8), Rd(src as u8)); // GPR2GPR
},
(Size::S64, Location::GPR(src), Location::GPR(dst)) => {
dynasm!($assembler ; $ins Rq(dst as u8), Rq(src as u8)); // GPR2GPR
},
_ => $otherwise
}
};
}
macro_rules! binop_gpr_mem {
($ins:ident, $assembler:tt, $sz:expr, $src:expr, $dst:expr, $otherwise:block) => {
match ($sz, $src, $dst) {
(Size::S32, Location::GPR(src), Location::Memory(dst, disp)) => {
dynasm!($assembler ; $ins [Rq(dst as u8) + disp], Rd(src as u8)); // GPR2MEM
},
(Size::S64, Location::GPR(src), Location::Memory(dst, disp)) => {
dynasm!($assembler ; $ins [Rq(dst as u8) + disp], Rq(src as u8)); // GPR2MEM
},
_ => $otherwise
}
};
}
macro_rules! binop_mem_gpr {
($ins:ident, $assembler:tt, $sz:expr, $src:expr, $dst:expr, $otherwise:block) => {
match ($sz, $src, $dst) {
(Size::S32, Location::Memory(src, disp), Location::GPR(dst)) => {
dynasm!($assembler ; $ins Rd(dst as u8), [Rq(src as u8) + disp]); // MEM2GPR
},
(Size::S64, Location::Memory(src, disp), Location::GPR(dst)) => {
dynasm!($assembler ; $ins Rq(dst as u8), [Rq(src as u8) + disp]); // MEM2GPR
},
_ => $otherwise
}
};
}
macro_rules! binop_all_nofp {
($ins:ident, $assembler:tt, $sz:expr, $src:expr, $dst:expr, $otherwise:block) => {
binop_imm32_gpr!($ins, $assembler, $sz, $src, $dst, {
binop_imm32_mem!($ins, $assembler, $sz, $src, $dst, {
binop_gpr_gpr!($ins, $assembler, $sz, $src, $dst, {
binop_gpr_mem!($ins, $assembler, $sz, $src, $dst, {
binop_mem_gpr!($ins, $assembler, $sz, $src, $dst, $otherwise)
})
})
})
})
};
}
macro_rules! binop_shift {
($ins:ident, $assembler:tt, $sz:expr, $src:expr, $dst:expr, $otherwise:block) => {
match ($sz, $src, $dst) {
(Size::S32, Location::GPR(GPR::RCX), Location::GPR(dst)) => {
dynasm!($assembler ; $ins Rd(dst as u8), cl);
},
(Size::S32, Location::GPR(GPR::RCX), Location::Memory(dst, disp)) => {
dynasm!($assembler ; $ins DWORD [Rq(dst as u8) + disp], cl);
},
(Size::S32, Location::Imm8(imm), Location::GPR(dst)) => {
dynasm!($assembler ; $ins Rd(dst as u8), imm as i8);
},
(Size::S32, Location::Imm8(imm), Location::Memory(dst, disp)) => {
dynasm!($assembler ; $ins DWORD [Rq(dst as u8) + disp], imm as i8);
},
(Size::S64, Location::GPR(GPR::RCX), Location::GPR(dst)) => {
dynasm!($assembler ; $ins Rq(dst as u8), cl);
},
(Size::S64, Location::GPR(GPR::RCX), Location::Memory(dst, disp)) => {
dynasm!($assembler ; $ins QWORD [Rq(dst as u8) + disp], cl);
},
(Size::S64, Location::Imm8(imm), Location::GPR(dst)) => {
dynasm!($assembler ; $ins Rq(dst as u8), imm as i8);
},
(Size::S64, Location::Imm8(imm), Location::Memory(dst, disp)) => {
dynasm!($assembler ; $ins QWORD [Rq(dst as u8) + disp], imm as i8);
},
_ => $otherwise
}
}
}
macro_rules! jmp_op {
($ins:ident, $assembler:tt, $label:ident) => {
dynasm!($assembler ; $ins =>$label);
}
}
macro_rules! trap_op {
($ins:ident, $assembler:tt) => {
dynasm!($assembler
; $ins >trap
; jmp >after
; trap:
; ud2
; after:
);
}
}
macro_rules! avx_fn {
($ins:ident, $name:ident) => {
fn $name(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) {
// Dynasm bug: AVX instructions are not encoded correctly.
match src2 {
XMMOrMemory::XMM(x) => match src1 {
XMM::XMM0 => dynasm!(self ; $ins Rx((dst as u8)), xmm0, Rx((x as u8))),
XMM::XMM1 => dynasm!(self ; $ins Rx((dst as u8)), xmm1, Rx((x as u8))),
XMM::XMM2 => dynasm!(self ; $ins Rx((dst as u8)), xmm2, Rx((x as u8))),
XMM::XMM3 => dynasm!(self ; $ins Rx((dst as u8)), xmm3, Rx((x as u8))),
XMM::XMM4 => dynasm!(self ; $ins Rx((dst as u8)), xmm4, Rx((x as u8))),
XMM::XMM5 => dynasm!(self ; $ins Rx((dst as u8)), xmm5, Rx((x as u8))),
XMM::XMM6 => dynasm!(self ; $ins Rx((dst as u8)), xmm6, Rx((x as u8))),
XMM::XMM7 => dynasm!(self ; $ins Rx((dst as u8)), xmm7, Rx((x as u8))),
},
XMMOrMemory::Memory(base, disp) => match src1 {
XMM::XMM0 => dynasm!(self ; $ins Rx((dst as u8)), xmm0, [Rq((base as u8)) + disp]),
XMM::XMM1 => dynasm!(self ; $ins Rx((dst as u8)), xmm1, [Rq((base as u8)) + disp]),
XMM::XMM2 => dynasm!(self ; $ins Rx((dst as u8)), xmm2, [Rq((base as u8)) + disp]),
XMM::XMM3 => dynasm!(self ; $ins Rx((dst as u8)), xmm3, [Rq((base as u8)) + disp]),
XMM::XMM4 => dynasm!(self ; $ins Rx((dst as u8)), xmm4, [Rq((base as u8)) + disp]),
XMM::XMM5 => dynasm!(self ; $ins Rx((dst as u8)), xmm5, [Rq((base as u8)) + disp]),
XMM::XMM6 => dynasm!(self ; $ins Rx((dst as u8)), xmm6, [Rq((base as u8)) + disp]),
XMM::XMM7 => dynasm!(self ; $ins Rx((dst as u8)), xmm7, [Rq((base as u8)) + disp]),
},
}
}
}
}
macro_rules! avx_i2f_64_fn {
($ins:ident, $name:ident) => {
fn $name(&mut self, src1: XMM, src2: GPROrMemory, dst: XMM) {
match src2 {
GPROrMemory::GPR(x) => match src1 {
XMM::XMM0 => dynasm!(self ; $ins Rx((dst as u8)), xmm0, Rq((x as u8))),
XMM::XMM1 => dynasm!(self ; $ins Rx((dst as u8)), xmm1, Rq((x as u8))),
XMM::XMM2 => dynasm!(self ; $ins Rx((dst as u8)), xmm2, Rq((x as u8))),
XMM::XMM3 => dynasm!(self ; $ins Rx((dst as u8)), xmm3, Rq((x as u8))),
XMM::XMM4 => dynasm!(self ; $ins Rx((dst as u8)), xmm4, Rq((x as u8))),
XMM::XMM5 => dynasm!(self ; $ins Rx((dst as u8)), xmm5, Rq((x as u8))),
XMM::XMM6 => dynasm!(self ; $ins Rx((dst as u8)), xmm6, Rq((x as u8))),
XMM::XMM7 => dynasm!(self ; $ins Rx((dst as u8)), xmm7, Rq((x as u8))),
},
GPROrMemory::Memory(base, disp) => match src1 {
XMM::XMM0 => dynasm!(self ; $ins Rx((dst as u8)), xmm0, QWORD [Rq((base as u8)) + disp]),
XMM::XMM1 => dynasm!(self ; $ins Rx((dst as u8)), xmm1, QWORD [Rq((base as u8)) + disp]),
XMM::XMM2 => dynasm!(self ; $ins Rx((dst as u8)), xmm2, QWORD [Rq((base as u8)) + disp]),
XMM::XMM3 => dynasm!(self ; $ins Rx((dst as u8)), xmm3, QWORD [Rq((base as u8)) + disp]),
XMM::XMM4 => dynasm!(self ; $ins Rx((dst as u8)), xmm4, QWORD [Rq((base as u8)) + disp]),
XMM::XMM5 => dynasm!(self ; $ins Rx((dst as u8)), xmm5, QWORD [Rq((base as u8)) + disp]),
XMM::XMM6 => dynasm!(self ; $ins Rx((dst as u8)), xmm6, QWORD [Rq((base as u8)) + disp]),
XMM::XMM7 => dynasm!(self ; $ins Rx((dst as u8)), xmm7, QWORD [Rq((base as u8)) + disp]),
},
}
}
}
}
macro_rules! avx_i2f_32_fn {
($ins:ident, $name:ident) => {
fn $name(&mut self, src1: XMM, src2: GPROrMemory, dst: XMM) {
match src2 {
GPROrMemory::GPR(x) => match src1 {
XMM::XMM0 => dynasm!(self ; $ins Rx((dst as u8)), xmm0, Rd((x as u8))),
XMM::XMM1 => dynasm!(self ; $ins Rx((dst as u8)), xmm1, Rd((x as u8))),
XMM::XMM2 => dynasm!(self ; $ins Rx((dst as u8)), xmm2, Rd((x as u8))),
XMM::XMM3 => dynasm!(self ; $ins Rx((dst as u8)), xmm3, Rd((x as u8))),
XMM::XMM4 => dynasm!(self ; $ins Rx((dst as u8)), xmm4, Rd((x as u8))),
XMM::XMM5 => dynasm!(self ; $ins Rx((dst as u8)), xmm5, Rd((x as u8))),
XMM::XMM6 => dynasm!(self ; $ins Rx((dst as u8)), xmm6, Rd((x as u8))),
XMM::XMM7 => dynasm!(self ; $ins Rx((dst as u8)), xmm7, Rd((x as u8))),
},
GPROrMemory::Memory(base, disp) => match src1 {
XMM::XMM0 => dynasm!(self ; $ins Rx((dst as u8)), xmm0, DWORD [Rq((base as u8)) + disp]),
XMM::XMM1 => dynasm!(self ; $ins Rx((dst as u8)), xmm1, DWORD [Rq((base as u8)) + disp]),
XMM::XMM2 => dynasm!(self ; $ins Rx((dst as u8)), xmm2, DWORD [Rq((base as u8)) + disp]),
XMM::XMM3 => dynasm!(self ; $ins Rx((dst as u8)), xmm3, DWORD [Rq((base as u8)) + disp]),
XMM::XMM4 => dynasm!(self ; $ins Rx((dst as u8)), xmm4, DWORD [Rq((base as u8)) + disp]),
XMM::XMM5 => dynasm!(self ; $ins Rx((dst as u8)), xmm5, DWORD [Rq((base as u8)) + disp]),
XMM::XMM6 => dynasm!(self ; $ins Rx((dst as u8)), xmm6, DWORD [Rq((base as u8)) + disp]),
XMM::XMM7 => dynasm!(self ; $ins Rx((dst as u8)), xmm7, DWORD [Rq((base as u8)) + disp]),
},
}
}
}
}
macro_rules! avx_round_fn {
($ins:ident, $name:ident, $mode:expr) => {
fn $name(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) {
match src2 {
XMMOrMemory::XMM(x) => dynasm!(self ; $ins Rx((dst as u8)), Rx((src1 as u8)), Rx((x as u8)), $mode),
XMMOrMemory::Memory(base, disp) => dynasm!(self ; $ins Rx((dst as u8)), Rx((src1 as u8)), [Rq((base as u8)) + disp], $mode),
}
}
}
}
impl Emitter for Assembler {
type Label = DynamicLabel;
type Offset = AssemblyOffset;
fn get_label(&mut self) -> DynamicLabel {
self.new_dynamic_label()
}
fn get_offset(&mut self) -> AssemblyOffset {
self.offset()
}
fn emit_label(&mut self, label: Self::Label) {
dynasm!(self ; => label);
}
fn emit_mov(&mut self, sz: Size, src: Location, dst: Location) {
binop_all_nofp!(mov, self, sz, src, dst, {
binop_imm64_gpr!(mov, self, sz, src, dst, {
match (sz, src, dst) {
(Size::S8, Location::GPR(src), Location::Memory(dst, disp)) => {
dynasm!(self ; mov [Rq(dst as u8) + disp], Rb(src as u8));
}
(Size::S8, Location::Memory(src, disp), Location::GPR(dst)) => {
dynasm!(self ; mov Rb(dst as u8), [Rq(src as u8) + disp]);
}
(Size::S8, Location::Imm32(src), Location::Memory(dst, disp)) => {
dynasm!(self ; mov BYTE [Rq(dst as u8) + disp], src as i8);
}
(Size::S16, Location::GPR(src), Location::Memory(dst, disp)) => {
dynasm!(self ; mov [Rq(dst as u8) + disp], Rw(src as u8));
}
(Size::S16, Location::Memory(src, disp), Location::GPR(dst)) => {
dynasm!(self ; mov Rw(dst as u8), [Rq(src as u8) + disp]);
}
(Size::S16, Location::Imm32(src), Location::Memory(dst, disp)) => {
dynasm!(self ; mov WORD [Rq(dst as u8) + disp], src as i16);
}
(Size::S32, Location::GPR(src), Location::XMM(dst)) => {
dynasm!(self ; movd Rx(dst as u8), Rd(src as u8));
}
(Size::S32, Location::XMM(src), Location::GPR(dst)) => {
dynasm!(self ; movd Rd(dst as u8), Rx(src as u8));
}
(Size::S32, Location::Memory(src, disp), Location::XMM(dst)) => {
dynasm!(self ; movd Rx(dst as u8), [Rq(src as u8) + disp]);
}
(Size::S32, Location::XMM(src), Location::Memory(dst, disp)) => {
dynasm!(self ; movd [Rq(dst as u8) + disp], Rx(src as u8));
}
(Size::S64, Location::GPR(src), Location::XMM(dst)) => {
dynasm!(self ; movq Rx(dst as u8), Rq(src as u8));
}
(Size::S64, Location::XMM(src), Location::GPR(dst)) => {
dynasm!(self ; movq Rq(dst as u8), Rx(src as u8));
}
(Size::S64, Location::Memory(src, disp), Location::XMM(dst)) => {
dynasm!(self ; movq Rx(dst as u8), [Rq(src as u8) + disp]);
}
(Size::S64, Location::XMM(src), Location::Memory(dst, disp)) => {
dynasm!(self ; movq [Rq(dst as u8) + disp], Rx(src as u8));
}
(_, Location::XMM(src), Location::XMM(dst)) => {
dynasm!(self ; movq Rx(dst as u8), Rx(src as u8));
}
_ => panic!("MOV {:?} {:?} {:?}", sz, src, dst),
}
})
});
}
fn emit_lea(&mut self, sz: Size, src: Location, dst: Location) {
match (sz, src, dst) {
(Size::S32, Location::Memory(src, disp), Location::GPR(dst)) => {
dynasm!(self ; lea Rd(dst as u8), [Rq(src as u8) + disp]);
}
(Size::S64, Location::Memory(src, disp), Location::GPR(dst)) => {
dynasm!(self ; lea Rq(dst as u8), [Rq(src as u8) + disp]);
}
_ => unreachable!(),
}
}
fn emit_lea_label(&mut self, label: Self::Label, dst: Location) {
match dst {
Location::GPR(x) => {
dynasm!(self ; lea Rq(x as u8), [=>label]);
}
_ => unreachable!(),
}
}
fn emit_cdq(&mut self) {
dynasm!(self ; cdq);
}
fn emit_cqo(&mut self) {
dynasm!(self ; cqo);
}
fn emit_xor(&mut self, sz: Size, src: Location, dst: Location) {
binop_all_nofp!(xor, self, sz, src, dst, { unreachable!() });
}
fn emit_jmp(&mut self, condition: Condition, label: Self::Label) {
match condition {
Condition::None => jmp_op!(jmp, self, label),
Condition::Above => jmp_op!(ja, self, label),
Condition::AboveEqual => jmp_op!(jae, self, label),
Condition::Below => jmp_op!(jb, self, label),
Condition::BelowEqual => jmp_op!(jbe, self, label),
Condition::Greater => jmp_op!(jg, self, label),
Condition::GreaterEqual => jmp_op!(jge, self, label),
Condition::Less => jmp_op!(jl, self, label),
Condition::LessEqual => jmp_op!(jle, self, label),
Condition::Equal => jmp_op!(je, self, label),
Condition::NotEqual => jmp_op!(jne, self, label),
Condition::Signed => jmp_op!(js, self, label),
}
}
fn emit_jmp_location(&mut self, loc: Location) {
match loc {
Location::GPR(x) => dynasm!(self ; jmp Rq(x as u8)),
Location::Memory(base, disp) => dynasm!(self ; jmp QWORD [Rq(base as u8) + disp]),
_ => unreachable!(),
}
}
fn emit_conditional_trap(&mut self, condition: Condition) {
match condition {
Condition::None => trap_op!(jmp, self),
Condition::Above => trap_op!(ja, self),
Condition::AboveEqual => trap_op!(jae, self),
Condition::Below => trap_op!(jb, self),
Condition::BelowEqual => trap_op!(jbe, self),
Condition::Greater => trap_op!(jg, self),
Condition::GreaterEqual => trap_op!(jge, self),
Condition::Less => trap_op!(jl, self),
Condition::LessEqual => trap_op!(jle, self),
Condition::Equal => trap_op!(je, self),
Condition::NotEqual => trap_op!(jne, self),
Condition::Signed => trap_op!(js, self),
}
}
fn emit_set(&mut self, condition: Condition, dst: GPR) {
match condition {
Condition::Above => dynasm!(self ; seta Rb(dst as u8)),
Condition::AboveEqual => dynasm!(self ; setae Rb(dst as u8)),
Condition::Below => dynasm!(self ; setb Rb(dst as u8)),
Condition::BelowEqual => dynasm!(self ; setbe Rb(dst as u8)),
Condition::Greater => dynasm!(self ; setg Rb(dst as u8)),
Condition::GreaterEqual => dynasm!(self ; setge Rb(dst as u8)),
Condition::Less => dynasm!(self ; setl Rb(dst as u8)),
Condition::LessEqual => dynasm!(self ; setle Rb(dst as u8)),
Condition::Equal => dynasm!(self ; sete Rb(dst as u8)),
Condition::NotEqual => dynasm!(self ; setne Rb(dst as u8)),
Condition::Signed => dynasm!(self ; sets Rb(dst as u8)),
_ => unreachable!(),
}
}
fn emit_push(&mut self, sz: Size, src: Location) {
match (sz, src) {
(Size::S64, Location::Imm32(src)) => dynasm!(self ; push src as i32),
(Size::S64, Location::GPR(src)) => dynasm!(self ; push Rq(src as u8)),
(Size::S64, Location::Memory(src, disp)) => {
dynasm!(self ; push QWORD [Rq(src as u8) + disp])
}
_ => panic!("push {:?} {:?}", sz, src),
}
}
fn emit_pop(&mut self, sz: Size, dst: Location) {
match (sz, dst) {
(Size::S64, Location::GPR(dst)) => dynasm!(self ; pop Rq(dst as u8)),
(Size::S64, Location::Memory(dst, disp)) => {
dynasm!(self ; pop QWORD [Rq(dst as u8) + disp])
}
_ => panic!("pop {:?} {:?}", sz, dst),
}
}
fn emit_cmp(&mut self, sz: Size, left: Location, right: Location) {
binop_all_nofp!(cmp, self, sz, left, right, {
panic!("{:?} {:?} {:?}", sz, left, right);
});
}
fn emit_add(&mut self, sz: Size, src: Location, dst: Location) {
binop_all_nofp!(add, self, sz, src, dst, { unreachable!() });
}
fn emit_sub(&mut self, sz: Size, src: Location, dst: Location) {
binop_all_nofp!(sub, self, sz, src, dst, { unreachable!() });
}
fn emit_imul(&mut self, sz: Size, src: Location, dst: Location) {
binop_gpr_gpr!(imul, self, sz, src, dst, {
binop_mem_gpr!(imul, self, sz, src, dst, { unreachable!() })
});
}
fn emit_imul_imm32_gpr64(&mut self, src: u32, dst: GPR) {
dynasm!(self ; imul Rq(dst as u8), Rq(dst as u8), src as i32);
}
fn emit_div(&mut self, sz: Size, divisor: Location) {
unop_gpr_or_mem!(div, self, sz, divisor, { unreachable!() });
}
fn emit_idiv(&mut self, sz: Size, divisor: Location) {
unop_gpr_or_mem!(idiv, self, sz, divisor, { unreachable!() });
}
fn emit_shl(&mut self, sz: Size, src: Location, dst: Location) {
binop_shift!(shl, self, sz, src, dst, { unreachable!() });
}
fn emit_shr(&mut self, sz: Size, src: Location, dst: Location) {
binop_shift!(shr, self, sz, src, dst, { unreachable!() });
}
fn emit_sar(&mut self, sz: Size, src: Location, dst: Location) {
binop_shift!(sar, self, sz, src, dst, { unreachable!() });
}
fn emit_rol(&mut self, sz: Size, src: Location, dst: Location) {
binop_shift!(rol, self, sz, src, dst, { unreachable!() });
}
fn emit_ror(&mut self, sz: Size, src: Location, dst: Location) {
binop_shift!(ror, self, sz, src, dst, { unreachable!() });
}
fn emit_and(&mut self, sz: Size, src: Location, dst: Location) {
binop_all_nofp!(and, self, sz, src, dst, { unreachable!() });
}
fn emit_or(&mut self, sz: Size, src: Location, dst: Location) {
binop_all_nofp!(or, self, sz, src, dst, { unreachable!() });
}
fn emit_lzcnt(&mut self, sz: Size, src: Location, dst: Location) {
binop_gpr_gpr!(lzcnt, self, sz, src, dst, {
binop_mem_gpr!(lzcnt, self, sz, src, dst, { unreachable!() })
});
}
fn emit_tzcnt(&mut self, sz: Size, src: Location, dst: Location) {
binop_gpr_gpr!(tzcnt, self, sz, src, dst, {
binop_mem_gpr!(tzcnt, self, sz, src, dst, { unreachable!() })
});
}
fn emit_popcnt(&mut self, sz: Size, src: Location, dst: Location) {
binop_gpr_gpr!(popcnt, self, sz, src, dst, {
binop_mem_gpr!(popcnt, self, sz, src, dst, { unreachable!() })
});
}
fn emit_movzx(&mut self, sz_src: Size, src: Location, sz_dst: Size, dst: Location) {
match (sz_src, src, sz_dst, dst) {
(Size::S8, Location::GPR(src), Size::S32, Location::GPR(dst)) => {
dynasm!(self ; movzx Rd(dst as u8), Rb(src as u8));
}
(Size::S16, Location::GPR(src), Size::S32, Location::GPR(dst)) => {
dynasm!(self ; movzx Rd(dst as u8), Rw(src as u8));
}
(Size::S8, Location::Memory(src, disp), Size::S32, Location::GPR(dst)) => {
dynasm!(self ; movzx Rd(dst as u8), BYTE [Rq(src as u8) + disp]);
}
(Size::S16, Location::Memory(src, disp), Size::S32, Location::GPR(dst)) => {
dynasm!(self ; movzx Rd(dst as u8), WORD [Rq(src as u8) + disp]);
}
(Size::S8, Location::GPR(src), Size::S64, Location::GPR(dst)) => {
dynasm!(self ; movzx Rq(dst as u8), Rb(src as u8));
}
(Size::S16, Location::GPR(src), Size::S64, Location::GPR(dst)) => {
dynasm!(self ; movzx Rq(dst as u8), Rw(src as u8));
}
(Size::S8, Location::Memory(src, disp), Size::S64, Location::GPR(dst)) => {
dynasm!(self ; movzx Rq(dst as u8), BYTE [Rq(src as u8) + disp]);
}
(Size::S16, Location::Memory(src, disp), Size::S64, Location::GPR(dst)) => {
dynasm!(self ; movzx Rq(dst as u8), WORD [Rq(src as u8) + disp]);
}
_ => unreachable!(),
}
}
fn emit_movsx(&mut self, sz_src: Size, src: Location, sz_dst: Size, dst: Location) {
match (sz_src, src, sz_dst, dst) {
(Size::S8, Location::GPR(src), Size::S32, Location::GPR(dst)) => {
dynasm!(self ; movsx Rd(dst as u8), Rb(src as u8));
}
(Size::S16, Location::GPR(src), Size::S32, Location::GPR(dst)) => {
dynasm!(self ; movsx Rd(dst as u8), Rw(src as u8));
}
(Size::S8, Location::Memory(src, disp), Size::S32, Location::GPR(dst)) => {
dynasm!(self ; movsx Rd(dst as u8), BYTE [Rq(src as u8) + disp]);
}
(Size::S16, Location::Memory(src, disp), Size::S32, Location::GPR(dst)) => {
dynasm!(self ; movsx Rd(dst as u8), WORD [Rq(src as u8) + disp]);
}
(Size::S8, Location::GPR(src), Size::S64, Location::GPR(dst)) => {
dynasm!(self ; movsx Rq(dst as u8), Rb(src as u8));
}
(Size::S16, Location::GPR(src), Size::S64, Location::GPR(dst)) => {
dynasm!(self ; movsx Rq(dst as u8), Rw(src as u8));
}
(Size::S32, Location::GPR(src), Size::S64, Location::GPR(dst)) => {
dynasm!(self ; movsx Rq(dst as u8), Rd(src as u8));
}
(Size::S8, Location::Memory(src, disp), Size::S64, Location::GPR(dst)) => {
dynasm!(self ; movsx Rq(dst as u8), BYTE [Rq(src as u8) + disp]);
}
(Size::S16, Location::Memory(src, disp), Size::S64, Location::GPR(dst)) => {
dynasm!(self ; movsx Rq(dst as u8), WORD [Rq(src as u8) + disp]);
}
(Size::S32, Location::Memory(src, disp), Size::S64, Location::GPR(dst)) => {
dynasm!(self ; movsx Rq(dst as u8), DWORD [Rq(src as u8) + disp]);
}
_ => unreachable!(),
}
}
fn emit_btc_gpr_imm8_32(&mut self, src: u8, dst: GPR) {
dynasm!(self ; btc Rd(dst as u8), BYTE src as i8);
}
fn emit_btc_gpr_imm8_64(&mut self, src: u8, dst: GPR) {
dynasm!(self ; btc Rq(dst as u8), BYTE src as i8);
}
fn emit_cmovae_gpr_32(&mut self, src: GPR, dst: GPR) {
dynasm!(self ; cmovae Rd(dst as u8), Rd(src as u8));
}
fn emit_cmovae_gpr_64(&mut self, src: GPR, dst: GPR) {
dynasm!(self ; cmovae Rq(dst as u8), Rq(src as u8));
}
avx_fn!(vaddss, emit_vaddss);
avx_fn!(vaddsd, emit_vaddsd);
avx_fn!(vsubss, emit_vsubss);
avx_fn!(vsubsd, emit_vsubsd);
avx_fn!(vmulss, emit_vmulss);
avx_fn!(vmulsd, emit_vmulsd);
avx_fn!(vdivss, emit_vdivss);
avx_fn!(vdivsd, emit_vdivsd);
avx_fn!(vmaxss, emit_vmaxss);
avx_fn!(vmaxsd, emit_vmaxsd);
avx_fn!(vminss, emit_vminss);
avx_fn!(vminsd, emit_vminsd);
avx_fn!(vcmpeqss, emit_vcmpeqss);
avx_fn!(vcmpeqsd, emit_vcmpeqsd);
avx_fn!(vcmpneqss, emit_vcmpneqss);
avx_fn!(vcmpneqsd, emit_vcmpneqsd);
avx_fn!(vcmpltss, emit_vcmpltss);
avx_fn!(vcmpltsd, emit_vcmpltsd);
avx_fn!(vcmpless, emit_vcmpless);
avx_fn!(vcmplesd, emit_vcmplesd);
avx_fn!(vcmpgtss, emit_vcmpgtss);
avx_fn!(vcmpgtsd, emit_vcmpgtsd);
avx_fn!(vcmpgess, emit_vcmpgess);
avx_fn!(vcmpgesd, emit_vcmpgesd);
avx_fn!(vsqrtss, emit_vsqrtss);
avx_fn!(vsqrtsd, emit_vsqrtsd);
avx_fn!(vcvtss2sd, emit_vcvtss2sd);
avx_fn!(vcvtsd2ss, emit_vcvtsd2ss);
avx_round_fn!(vroundss, emit_vroundss_nearest, 0);
avx_round_fn!(vroundss, emit_vroundss_floor, 1);
avx_round_fn!(vroundss, emit_vroundss_ceil, 2);
avx_round_fn!(vroundss, emit_vroundss_trunc, 3);
avx_round_fn!(vroundsd, emit_vroundsd_nearest, 0);
avx_round_fn!(vroundsd, emit_vroundsd_floor, 1);
avx_round_fn!(vroundsd, emit_vroundsd_ceil, 2);
avx_round_fn!(vroundsd, emit_vroundsd_trunc, 3);
avx_i2f_32_fn!(vcvtsi2ss, emit_vcvtsi2ss_32);
avx_i2f_32_fn!(vcvtsi2sd, emit_vcvtsi2sd_32);
avx_i2f_64_fn!(vcvtsi2ss, emit_vcvtsi2ss_64);
avx_i2f_64_fn!(vcvtsi2sd, emit_vcvtsi2sd_64);
fn emit_ucomiss(&mut self, src: XMMOrMemory, dst: XMM) {
match src {
XMMOrMemory::XMM(x) => dynasm!(self ; ucomiss Rx(dst as u8), Rx(x as u8)),
XMMOrMemory::Memory(base, disp) => {
dynasm!(self ; ucomiss Rx(dst as u8), [Rq(base as u8) + disp])
}
}
}
fn emit_ucomisd(&mut self, src: XMMOrMemory, dst: XMM) {
match src {
XMMOrMemory::XMM(x) => dynasm!(self ; ucomisd Rx(dst as u8), Rx(x as u8)),
XMMOrMemory::Memory(base, disp) => {
dynasm!(self ; ucomisd Rx(dst as u8), [Rq(base as u8) + disp])
}
}
}
fn emit_cvttss2si_32(&mut self, src: XMMOrMemory, dst: GPR) {
match src {
XMMOrMemory::XMM(x) => dynasm!(self ; cvttss2si Rd(dst as u8), Rx(x as u8)),
XMMOrMemory::Memory(base, disp) => {
dynasm!(self ; cvttss2si Rd(dst as u8), [Rq(base as u8) + disp])
}
}
}
fn emit_cvttss2si_64(&mut self, src: XMMOrMemory, dst: GPR) {
match src {
XMMOrMemory::XMM(x) => dynasm!(self ; cvttss2si Rq(dst as u8), Rx(x as u8)),
XMMOrMemory::Memory(base, disp) => {
dynasm!(self ; cvttss2si Rq(dst as u8), [Rq(base as u8) + disp])
}
}
}
fn emit_cvttsd2si_32(&mut self, src: XMMOrMemory, dst: GPR) {
match src {
XMMOrMemory::XMM(x) => dynasm!(self ; cvttsd2si Rd(dst as u8), Rx(x as u8)),
XMMOrMemory::Memory(base, disp) => {
dynasm!(self ; cvttsd2si Rd(dst as u8), [Rq(base as u8) + disp])
}
}
}
fn emit_cvttsd2si_64(&mut self, src: XMMOrMemory, dst: GPR) {
match src {
XMMOrMemory::XMM(x) => dynasm!(self ; cvttsd2si Rq(dst as u8), Rx(x as u8)),
XMMOrMemory::Memory(base, disp) => {
dynasm!(self ; cvttsd2si Rq(dst as u8), [Rq(base as u8) + disp])
}
}
}
fn emit_test_gpr_64(&mut self, reg: GPR) {
dynasm!(self ; test Rq(reg as u8), Rq(reg as u8));
}
fn emit_ud2(&mut self) {
dynasm!(self ; ud2);
}
fn emit_ret(&mut self) {
dynasm!(self ; ret);
}
fn emit_call_label(&mut self, label: Self::Label) {
dynasm!(self ; call =>label);
}
fn emit_call_location(&mut self, loc: Location) {
match loc {
Location::GPR(x) => dynasm!(self ; call Rq(x as u8)),
Location::Memory(base, disp) => dynasm!(self ; call QWORD [Rq(base as u8) + disp]),
_ => unreachable!(),
}
}
}

View File

@ -0,0 +1,95 @@
#![feature(proc_macro_hygiene)]
#[cfg(not(any(
all(target_os = "macos", target_arch = "x86_64"),
all(target_os = "linux", target_arch = "x86_64"),
)))]
compile_error!("This crate doesn't yet support compiling on operating systems other than linux and macos and architectures other than x86_64");
extern crate dynasmrt;
#[macro_use]
extern crate dynasm;
#[macro_use]
extern crate lazy_static;
extern crate byteorder;
#[macro_use]
extern crate smallvec;
mod codegen;
mod codegen_x64;
mod emitter_x64;
mod machine;
mod parse;
mod protect_unix;
use crate::codegen::{CodegenError, ModuleCodeGenerator};
use crate::parse::LoadError;
use wasmer_runtime_core::{
backend::{sys::Memory, Backend, CacheGen, Compiler, CompilerConfig, Token},
cache::{Artifact, Error as CacheError},
error::{CompileError, CompileResult},
module::{ModuleInfo, ModuleInner},
};
struct Placeholder;
impl CacheGen for Placeholder {
fn generate_cache(
&self,
_module: &ModuleInner,
) -> Result<(Box<ModuleInfo>, Box<[u8]>, Memory), CacheError> {
Err(CacheError::Unknown(
"the singlepass backend doesn't support caching yet".to_string(),
))
}
}
pub struct SinglePassCompiler {}
impl SinglePassCompiler {
pub fn new() -> Self {
Self {}
}
}
impl Compiler for SinglePassCompiler {
fn compile(
&self,
wasm: &[u8],
compiler_config: CompilerConfig,
_: Token,
) -> CompileResult<ModuleInner> {
let mut mcg = codegen_x64::X64ModuleCodeGenerator::new();
let info = parse::read_module(wasm, Backend::Singlepass, &mut mcg, &compiler_config)?;
let (ec, resolver) = mcg.finalize(&info)?;
Ok(ModuleInner {
cache_gen: Box::new(Placeholder),
func_resolver: Box::new(resolver),
protected_caller: Box::new(ec),
info: info,
})
}
unsafe fn from_cache(&self, _artifact: Artifact, _: Token) -> Result<ModuleInner, CacheError> {
Err(CacheError::Unknown(
"the singlepass backend doesn't support caching yet".to_string(),
))
}
}
impl From<CodegenError> for CompileError {
fn from(other: CodegenError) -> CompileError {
CompileError::InternalError {
msg: other.message.into(),
}
}
}
impl From<LoadError> for CompileError {
fn from(other: LoadError) -> CompileError {
CompileError::InternalError {
msg: format!("{:?}", other),
}
}
}

View File

@ -0,0 +1,419 @@
use crate::emitter_x64::*;
use smallvec::SmallVec;
use std::collections::HashSet;
use wasmparser::Type as WpType;
struct MachineStackOffset(usize);
pub struct Machine {
used_gprs: HashSet<GPR>,
used_xmms: HashSet<XMM>,
stack_offset: MachineStackOffset,
save_area_offset: Option<MachineStackOffset>,
}
impl Machine {
pub fn new() -> Self {
Machine {
used_gprs: HashSet::new(),
used_xmms: HashSet::new(),
stack_offset: MachineStackOffset(0),
save_area_offset: None,
}
}
pub fn get_stack_offset(&self) -> usize {
self.stack_offset.0
}
pub fn get_used_gprs(&self) -> Vec<GPR> {
self.used_gprs.iter().cloned().collect()
}
pub fn get_used_xmms(&self) -> Vec<XMM> {
self.used_xmms.iter().cloned().collect()
}
pub fn get_vmctx_reg() -> GPR {
GPR::R15
}
/// Picks an unused general purpose register for local/stack/argument use.
///
/// This method does not mark the register as used.
pub fn pick_gpr(&self) -> Option<GPR> {
use GPR::*;
static REGS: &'static [GPR] = &[RSI, RDI, R8, R9, R10, R11];
for r in REGS {
if !self.used_gprs.contains(r) {
return Some(*r);
}
}
None
}
/// Picks an unused general purpose register for internal temporary use.
///
/// This method does not mark the register as used.
pub fn pick_temp_gpr(&self) -> Option<GPR> {
use GPR::*;
static REGS: &'static [GPR] = &[RAX, RCX, RDX];
for r in REGS {
if !self.used_gprs.contains(r) {
return Some(*r);
}
}
None
}
/// Acquires a temporary GPR.
pub fn acquire_temp_gpr(&mut self) -> Option<GPR> {
let gpr = self.pick_temp_gpr();
if let Some(x) = gpr {
self.used_gprs.insert(x);
}
gpr
}
/// Releases a temporary GPR.
pub fn release_temp_gpr(&mut self, gpr: GPR) {
assert_eq!(self.used_gprs.remove(&gpr), true);
}
/// Picks an unused XMM register.
///
/// This method does not mark the register as used.
pub fn pick_xmm(&self) -> Option<XMM> {
use XMM::*;
static REGS: &'static [XMM] = &[XMM3, XMM4, XMM5, XMM6, XMM7];
for r in REGS {
if !self.used_xmms.contains(r) {
return Some(*r);
}
}
None
}
/// Picks an unused XMM register for internal temporary use.
///
/// This method does not mark the register as used.
pub fn pick_temp_xmm(&self) -> Option<XMM> {
use XMM::*;
static REGS: &'static [XMM] = &[XMM0, XMM1, XMM2];
for r in REGS {
if !self.used_xmms.contains(r) {
return Some(*r);
}
}
None
}
/// Acquires a temporary XMM register.
pub fn acquire_temp_xmm(&mut self) -> Option<XMM> {
let xmm = self.pick_temp_xmm();
if let Some(x) = xmm {
self.used_xmms.insert(x);
}
xmm
}
/// Releases a temporary XMM register.
pub fn release_temp_xmm(&mut self, xmm: XMM) {
assert_eq!(self.used_xmms.remove(&xmm), true);
}
/// Acquires locations from the machine state.
///
/// If the returned locations are used for stack value, `release_location` needs to be called on them;
/// Otherwise, if the returned locations are used for locals, `release_location` does not need to be called on them.
pub fn acquire_locations<E: Emitter>(
&mut self,
assembler: &mut E,
tys: &[WpType],
zeroed: bool,
) -> SmallVec<[Location; 1]> {
let mut ret = smallvec![];
let mut delta_stack_offset: usize = 0;
for ty in tys {
let loc = match *ty {
WpType::F32 | WpType::F64 => self.pick_xmm().map(Location::XMM),
WpType::I32 | WpType::I64 => self.pick_gpr().map(Location::GPR),
_ => unreachable!(),
};
let loc = if let Some(x) = loc {
x
} else {
self.stack_offset.0 += 8;
delta_stack_offset += 8;
Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32))
};
if let Location::GPR(x) = loc {
self.used_gprs.insert(x);
} else if let Location::XMM(x) = loc {
self.used_xmms.insert(x);
}
ret.push(loc);
}
if delta_stack_offset != 0 {
assembler.emit_sub(
Size::S64,
Location::Imm32(delta_stack_offset as u32),
Location::GPR(GPR::RSP),
);
}
if zeroed {
for i in 0..tys.len() {
assembler.emit_mov(Size::S64, Location::Imm32(0), ret[i]);
}
}
ret
}
/// Releases locations used for stack value.
pub fn release_locations<E: Emitter>(&mut self, assembler: &mut E, locs: &[Location]) {
let mut delta_stack_offset: usize = 0;
for loc in locs.iter().rev() {
match *loc {
Location::GPR(ref x) => {
assert_eq!(self.used_gprs.remove(x), true);
}
Location::XMM(ref x) => {
assert_eq!(self.used_xmms.remove(x), true);
}
Location::Memory(GPR::RBP, x) => {
if x >= 0 {
unreachable!();
}
let offset = (-x) as usize;
if offset != self.stack_offset.0 {
unreachable!();
}
self.stack_offset.0 -= 8;
delta_stack_offset += 8;
}
_ => {}
}
}
if delta_stack_offset != 0 {
assembler.emit_add(
Size::S64,
Location::Imm32(delta_stack_offset as u32),
Location::GPR(GPR::RSP),
);
}
}
pub fn release_locations_only_regs(&mut self, locs: &[Location]) {
for loc in locs.iter().rev() {
match *loc {
Location::GPR(ref x) => {
assert_eq!(self.used_gprs.remove(x), true);
}
Location::XMM(ref x) => {
assert_eq!(self.used_xmms.remove(x), true);
}
_ => {}
}
}
}
pub fn release_locations_only_stack<E: Emitter>(
&mut self,
assembler: &mut E,
locs: &[Location],
) {
let mut delta_stack_offset: usize = 0;
for loc in locs.iter().rev() {
match *loc {
Location::Memory(GPR::RBP, x) => {
if x >= 0 {
unreachable!();
}
let offset = (-x) as usize;
if offset != self.stack_offset.0 {
unreachable!();
}
self.stack_offset.0 -= 8;
delta_stack_offset += 8;
}
_ => {}
}
}
if delta_stack_offset != 0 {
assembler.emit_add(
Size::S64,
Location::Imm32(delta_stack_offset as u32),
Location::GPR(GPR::RSP),
);
}
}
pub fn release_locations_keep_state<E: Emitter>(&self, assembler: &mut E, locs: &[Location]) {
let mut delta_stack_offset: usize = 0;
for loc in locs.iter().rev() {
match *loc {
Location::Memory(GPR::RBP, x) => {
if x >= 0 {
unreachable!();
}
let offset = (-x) as usize;
if offset != self.stack_offset.0 {
unreachable!();
}
delta_stack_offset += 8;
}
_ => {}
}
}
if delta_stack_offset != 0 {
assembler.emit_add(
Size::S64,
Location::Imm32(delta_stack_offset as u32),
Location::GPR(GPR::RSP),
);
}
}
pub fn init_locals<E: Emitter>(
&mut self,
a: &mut E,
n: usize,
n_params: usize,
) -> Vec<Location> {
// Use callee-saved registers for locals.
fn get_local_location(idx: usize) -> Location {
match idx {
0 => Location::GPR(GPR::R12),
1 => Location::GPR(GPR::R13),
2 => Location::GPR(GPR::R14),
3 => Location::GPR(GPR::RBX),
_ => Location::Memory(GPR::RBP, -(((idx - 3) * 8) as i32)),
}
}
let mut locations: Vec<Location> = vec![];
let mut allocated: usize = 0;
// Determine locations for parameters.
for i in 0..n_params {
let loc = Self::get_param_location(i + 1);
locations.push(match loc {
Location::GPR(_) => {
let old_idx = allocated;
allocated += 1;
get_local_location(old_idx)
}
Location::Memory(_, _) => loc,
_ => unreachable!(),
});
}
// Determine locations for normal locals.
for _ in n_params..n {
locations.push(get_local_location(allocated));
allocated += 1;
}
// How many machine stack slots did all the locals use?
let num_mem_slots = locations
.iter()
.filter(|&&loc| match loc {
Location::Memory(_, _) => true,
_ => false,
})
.count();
// Move RSP down to reserve space for machine stack slots.
if num_mem_slots > 0 {
a.emit_sub(
Size::S64,
Location::Imm32((num_mem_slots * 8) as u32),
Location::GPR(GPR::RSP),
);
self.stack_offset.0 += num_mem_slots * 8;
}
// Save callee-saved registers.
for loc in locations.iter() {
if let Location::GPR(_) = *loc {
a.emit_push(Size::S64, *loc);
self.stack_offset.0 += 8;
}
}
// Save R15 for vmctx use.
a.emit_push(Size::S64, Location::GPR(GPR::R15));
self.stack_offset.0 += 8;
// Save the offset of static area.
self.save_area_offset = Some(MachineStackOffset(self.stack_offset.0));
// Load in-register parameters into the allocated locations.
for i in 0..n_params {
let loc = Self::get_param_location(i + 1);
match loc {
Location::GPR(_) => {
a.emit_mov(Size::S64, loc, locations[i]);
}
_ => break,
}
}
// Load vmctx.
a.emit_mov(
Size::S64,
Self::get_param_location(0),
Location::GPR(GPR::R15),
);
// Initialize all normal locals to zero.
for i in n_params..n {
a.emit_mov(Size::S64, Location::Imm32(0), locations[i]);
}
locations
}
pub fn finalize_locals<E: Emitter>(&mut self, a: &mut E, locations: &[Location]) {
// Unwind stack to the "save area".
a.emit_lea(
Size::S64,
Location::Memory(
GPR::RBP,
-(self.save_area_offset.as_ref().unwrap().0 as i32),
),
Location::GPR(GPR::RSP),
);
// Restore R15 used by vmctx.
a.emit_pop(Size::S64, Location::GPR(GPR::R15));
// Restore callee-saved registers.
for loc in locations.iter().rev() {
if let Location::GPR(_) = *loc {
a.emit_pop(Size::S64, *loc);
}
}
}
pub fn get_param_location(idx: usize) -> Location {
match idx {
0 => Location::GPR(GPR::RDI),
1 => Location::GPR(GPR::RSI),
2 => Location::GPR(GPR::RDX),
3 => Location::GPR(GPR::RCX),
4 => Location::GPR(GPR::R8),
5 => Location::GPR(GPR::R9),
_ => Location::Memory(GPR::RBP, (16 + (idx - 6) * 8) as i32),
}
}
}

View File

@ -0,0 +1,447 @@
use crate::codegen::{CodegenError, FunctionCodeGenerator, ModuleCodeGenerator};
use hashbrown::HashMap;
use wasmer_runtime_core::{
backend::{Backend, CompilerConfig, FuncResolver, ProtectedCaller},
module::{
DataInitializer, ExportIndex, ImportName, ModuleInfo, StringTable, StringTableBuilder,
TableInitializer,
},
structures::{Map, TypedIndex},
types::{
ElementType, FuncIndex, FuncSig, GlobalDescriptor, GlobalIndex, GlobalInit,
ImportedGlobalIndex, Initializer, MemoryDescriptor, MemoryIndex, SigIndex, TableDescriptor,
TableIndex, Type, Value,
},
units::Pages,
};
use wasmparser::{
BinaryReaderError, Data, DataKind, Element, ElementKind, Export, ExternalKind, FuncType,
Import, ImportSectionEntryType, InitExpr, ModuleReader, Operator, SectionCode, Type as WpType,
WasmDecoder,
};
#[derive(Debug)]
pub enum LoadError {
Parse(BinaryReaderError),
Codegen(CodegenError),
}
impl From<BinaryReaderError> for LoadError {
fn from(other: BinaryReaderError) -> LoadError {
LoadError::Parse(other)
}
}
impl From<CodegenError> for LoadError {
fn from(other: CodegenError) -> LoadError {
LoadError::Codegen(other)
}
}
fn validate(bytes: &[u8]) -> Result<(), LoadError> {
let mut parser = wasmparser::ValidatingParser::new(
bytes,
Some(wasmparser::ValidatingParserConfig {
operator_config: wasmparser::OperatorValidatorConfig {
enable_threads: false,
enable_reference_types: false,
enable_simd: false,
enable_bulk_memory: false,
},
mutable_global_imports: false,
}),
);
loop {
let state = parser.read();
match *state {
wasmparser::ParserState::EndWasm => break Ok(()),
wasmparser::ParserState::Error(err) => Err(LoadError::Parse(err))?,
_ => {}
}
}
}
pub fn read_module<
MCG: ModuleCodeGenerator<FCG, PC, FR>,
FCG: FunctionCodeGenerator,
PC: ProtectedCaller,
FR: FuncResolver,
>(
wasm: &[u8],
backend: Backend,
mcg: &mut MCG,
compiler_config: &CompilerConfig,
) -> Result<ModuleInfo, LoadError> {
validate(wasm)?;
let mut info = ModuleInfo {
memories: Map::new(),
globals: Map::new(),
tables: Map::new(),
imported_functions: Map::new(),
imported_memories: Map::new(),
imported_tables: Map::new(),
imported_globals: Map::new(),
exports: Default::default(),
data_initializers: Vec::new(),
elem_initializers: Vec::new(),
start_func: None,
func_assoc: Map::new(),
signatures: Map::new(),
backend: backend,
namespace_table: StringTable::new(),
name_table: StringTable::new(),
em_symbol_map: compiler_config.symbol_map.clone(),
custom_sections: HashMap::new(),
};
let mut reader = ModuleReader::new(wasm)?;
loop {
if reader.eof() {
return Ok(info);
}
let section = reader.read()?;
match section.code {
SectionCode::Type => {
let type_reader = section.get_type_section_reader()?;
for ty in type_reader {
let ty = ty?;
info.signatures.push(func_type_to_func_sig(ty)?);
}
mcg.feed_signatures(info.signatures.clone())?;
}
SectionCode::Import => {
let import_reader = section.get_import_section_reader()?;
let mut namespace_builder = StringTableBuilder::new();
let mut name_builder = StringTableBuilder::new();
for import in import_reader {
let Import { module, field, ty } = import?;
let namespace_index = namespace_builder.register(module);
let name_index = name_builder.register(field);
let import_name = ImportName {
namespace_index,
name_index,
};
match ty {
ImportSectionEntryType::Function(sigindex) => {
let sigindex = SigIndex::new(sigindex as usize);
info.imported_functions.push(import_name);
info.func_assoc.push(sigindex);
mcg.feed_import_function()?;
}
ImportSectionEntryType::Table(table_ty) => {
assert_eq!(table_ty.element_type, WpType::AnyFunc);
let table_desc = TableDescriptor {
element: ElementType::Anyfunc,
minimum: table_ty.limits.initial,
maximum: table_ty.limits.maximum,
};
info.imported_tables.push((import_name, table_desc));
}
ImportSectionEntryType::Memory(memory_ty) => {
let mem_desc = MemoryDescriptor {
minimum: Pages(memory_ty.limits.initial),
maximum: memory_ty.limits.maximum.map(|max| Pages(max)),
shared: memory_ty.shared,
};
info.imported_memories.push((import_name, mem_desc));
}
ImportSectionEntryType::Global(global_ty) => {
let global_desc = GlobalDescriptor {
mutable: global_ty.mutable,
ty: wp_type_to_type(global_ty.content_type)?,
};
info.imported_globals.push((import_name, global_desc));
}
}
}
info.namespace_table = namespace_builder.finish();
info.name_table = name_builder.finish();
}
SectionCode::Function => {
let func_decl_reader = section.get_function_section_reader()?;
for sigindex in func_decl_reader {
let sigindex = sigindex?;
let sigindex = SigIndex::new(sigindex as usize);
info.func_assoc.push(sigindex);
}
mcg.feed_function_signatures(info.func_assoc.clone())?;
}
SectionCode::Table => {
let table_decl_reader = section.get_table_section_reader()?;
for table_ty in table_decl_reader {
let table_ty = table_ty?;
let table_desc = TableDescriptor {
element: ElementType::Anyfunc,
minimum: table_ty.limits.initial,
maximum: table_ty.limits.maximum,
};
info.tables.push(table_desc);
}
}
SectionCode::Memory => {
let mem_decl_reader = section.get_memory_section_reader()?;
for memory_ty in mem_decl_reader {
let memory_ty = memory_ty?;
let mem_desc = MemoryDescriptor {
minimum: Pages(memory_ty.limits.initial),
maximum: memory_ty.limits.maximum.map(|max| Pages(max)),
shared: memory_ty.shared,
};
info.memories.push(mem_desc);
}
}
SectionCode::Global => {
let global_decl_reader = section.get_global_section_reader()?;
for global in global_decl_reader {
let global = global?;
let desc = GlobalDescriptor {
mutable: global.ty.mutable,
ty: wp_type_to_type(global.ty.content_type)?,
};
let global_init = GlobalInit {
desc,
init: eval_init_expr(&global.init_expr)?,
};
info.globals.push(global_init);
}
}
SectionCode::Export => {
let export_reader = section.get_export_section_reader()?;
for export in export_reader {
let Export { field, kind, index } = export?;
let export_index = match kind {
ExternalKind::Function => ExportIndex::Func(FuncIndex::new(index as usize)),
ExternalKind::Table => ExportIndex::Table(TableIndex::new(index as usize)),
ExternalKind::Memory => {
ExportIndex::Memory(MemoryIndex::new(index as usize))
}
ExternalKind::Global => {
ExportIndex::Global(GlobalIndex::new(index as usize))
}
};
info.exports.insert(field.to_string(), export_index);
}
}
SectionCode::Start => {
let start_index = section.get_start_section_content()?;
info.start_func = Some(FuncIndex::new(start_index as usize));
}
SectionCode::Element => {
let element_reader = section.get_element_section_reader()?;
for element in element_reader {
let Element { kind, items } = element?;
match kind {
ElementKind::Active {
table_index,
init_expr,
} => {
let table_index = TableIndex::new(table_index as usize);
let base = eval_init_expr(&init_expr)?;
let items_reader = items.get_items_reader()?;
let elements: Vec<_> = items_reader
.into_iter()
.map(|res| res.map(|index| FuncIndex::new(index as usize)))
.collect::<Result<_, _>>()?;
let table_init = TableInitializer {
table_index,
base,
elements,
};
info.elem_initializers.push(table_init);
}
ElementKind::Passive(_ty) => {
return Err(BinaryReaderError {
message: "passive tables are not yet supported",
offset: -1isize as usize,
}
.into());
}
}
}
}
SectionCode::Code => {
let mut code_reader = section.get_code_section_reader()?;
if code_reader.get_count() as usize > info.func_assoc.len() {
return Err(BinaryReaderError {
message: "code_reader.get_count() > info.func_assoc.len()",
offset: ::std::usize::MAX,
}
.into());
}
mcg.check_precondition(&info)?;
for i in 0..code_reader.get_count() {
let item = code_reader.read()?;
let fcg = mcg.next_function()?;
let sig = info
.signatures
.get(
*info
.func_assoc
.get(FuncIndex::new(i as usize + info.imported_functions.len()))
.unwrap(),
)
.unwrap();
for ret in sig.returns() {
fcg.feed_return(type_to_wp_type(*ret))?;
}
for param in sig.params() {
fcg.feed_param(type_to_wp_type(*param))?;
}
for local in item.get_locals_reader()? {
let (count, ty) = local?;
fcg.feed_local(ty, count as usize)?;
}
fcg.begin_body()?;
for op in item.get_operators_reader()? {
let op = op?;
fcg.feed_opcode(op, &info)?;
}
fcg.finalize()?;
}
}
SectionCode::Data => {
let data_reader = section.get_data_section_reader()?;
for data in data_reader {
let Data { kind, data } = data?;
match kind {
DataKind::Active {
memory_index,
init_expr,
} => {
let memory_index = MemoryIndex::new(memory_index as usize);
let base = eval_init_expr(&init_expr)?;
let data_init = DataInitializer {
memory_index,
base,
data: data.to_vec(),
};
info.data_initializers.push(data_init);
}
DataKind::Passive => {
return Err(BinaryReaderError {
message: "passive memories are not yet supported",
offset: -1isize as usize,
}
.into());
}
}
}
}
SectionCode::DataCount => {}
SectionCode::Custom { .. } => {}
}
}
}
pub fn wp_type_to_type(ty: WpType) -> Result<Type, BinaryReaderError> {
Ok(match ty {
WpType::I32 => Type::I32,
WpType::I64 => Type::I64,
WpType::F32 => Type::F32,
WpType::F64 => Type::F64,
WpType::V128 => {
return Err(BinaryReaderError {
message: "the wasmer llvm backend does not yet support the simd extension",
offset: -1isize as usize,
});
}
_ => panic!("broken invariant, invalid type"),
})
}
pub fn type_to_wp_type(ty: Type) -> WpType {
match ty {
Type::I32 => WpType::I32,
Type::I64 => WpType::I64,
Type::F32 => WpType::F32,
Type::F64 => WpType::F64,
}
}
fn func_type_to_func_sig(func_ty: FuncType) -> Result<FuncSig, BinaryReaderError> {
assert_eq!(func_ty.form, WpType::Func);
Ok(FuncSig::new(
func_ty
.params
.iter()
.cloned()
.map(wp_type_to_type)
.collect::<Result<Vec<_>, _>>()?,
func_ty
.returns
.iter()
.cloned()
.map(wp_type_to_type)
.collect::<Result<Vec<_>, _>>()?,
))
}
fn eval_init_expr(expr: &InitExpr) -> Result<Initializer, BinaryReaderError> {
let mut reader = expr.get_operators_reader();
let (op, offset) = reader.read_with_offset()?;
Ok(match op {
Operator::GetGlobal { global_index } => {
Initializer::GetGlobal(ImportedGlobalIndex::new(global_index as usize))
}
Operator::I32Const { value } => Initializer::Const(Value::I32(value)),
Operator::I64Const { value } => Initializer::Const(Value::I64(value)),
Operator::F32Const { value } => {
Initializer::Const(Value::F32(f32::from_bits(value.bits())))
}
Operator::F64Const { value } => {
Initializer::Const(Value::F64(f64::from_bits(value.bits())))
}
_ => {
return Err(BinaryReaderError {
message: "init expr evaluation failed: unsupported opcode",
offset,
});
}
})
}

View File

@ -0,0 +1,208 @@
//! Installing signal handlers allows us to handle traps and out-of-bounds memory
//! accesses that occur when runniing webassembly.
//!
//! This code is inspired by: https://github.com/pepyakin/wasmtime/commit/625a2b6c0815b21996e111da51b9664feb174622
//!
//! When a WebAssembly module triggers any traps, we perform recovery here.
//!
//! This module uses TLS (thread-local storage) to track recovery information. Since the four signals we're handling
//! are very special, the async signal unsafety of Rust's TLS implementation generally does not affect the correctness here
//! unless you have memory unsafety elsewhere in your code.
//!
use libc::{c_int, c_void, siginfo_t};
use nix::sys::signal::{
sigaction, SaFlags, SigAction, SigHandler, SigSet, Signal, SIGBUS, SIGFPE, SIGILL, SIGSEGV,
};
use std::any::Any;
use std::cell::{Cell, UnsafeCell};
use std::ptr;
use std::sync::Once;
use wasmer_runtime_core::error::{RuntimeError, RuntimeResult};
extern "C" fn signal_trap_handler(
signum: ::nix::libc::c_int,
siginfo: *mut siginfo_t,
ucontext: *mut c_void,
) {
unsafe {
do_unwind(signum, siginfo as _, ucontext);
}
}
extern "C" {
pub fn setjmp(env: *mut c_void) -> c_int;
fn longjmp(env: *mut c_void, val: c_int) -> !;
}
pub unsafe fn install_sighandler() {
let sa = SigAction::new(
SigHandler::SigAction(signal_trap_handler),
SaFlags::SA_ONSTACK,
SigSet::empty(),
);
sigaction(SIGFPE, &sa).unwrap();
sigaction(SIGILL, &sa).unwrap();
sigaction(SIGSEGV, &sa).unwrap();
sigaction(SIGBUS, &sa).unwrap();
}
const SETJMP_BUFFER_LEN: usize = 27;
pub static SIGHANDLER_INIT: Once = Once::new();
thread_local! {
pub static SETJMP_BUFFER: UnsafeCell<[c_int; SETJMP_BUFFER_LEN]> = UnsafeCell::new([0; SETJMP_BUFFER_LEN]);
pub static CAUGHT_ADDRESSES: Cell<(*const c_void, *const c_void)> = Cell::new((ptr::null(), ptr::null()));
pub static CURRENT_EXECUTABLE_BUFFER: Cell<*const c_void> = Cell::new(ptr::null());
pub static TRAP_EARLY_DATA: Cell<Option<Box<dyn Any>>> = Cell::new(None);
}
pub unsafe fn trigger_trap() -> ! {
let jmp_buf = SETJMP_BUFFER.with(|buf| buf.get());
longjmp(jmp_buf as *mut c_void, 0)
}
pub fn call_protected<T>(f: impl FnOnce() -> T) -> RuntimeResult<T> {
unsafe {
let jmp_buf = SETJMP_BUFFER.with(|buf| buf.get());
let prev_jmp_buf = *jmp_buf;
SIGHANDLER_INIT.call_once(|| {
install_sighandler();
});
let signum = setjmp(jmp_buf as *mut _);
if signum != 0 {
*jmp_buf = prev_jmp_buf;
if let Some(data) = TRAP_EARLY_DATA.with(|cell| cell.replace(None)) {
Err(RuntimeError::Panic { data })
} else {
let (faulting_addr, _inst_ptr) = CAUGHT_ADDRESSES.with(|cell| cell.get());
let signal = match Signal::from_c_int(signum) {
Ok(SIGFPE) => "floating-point exception",
Ok(SIGILL) => "illegal instruction",
Ok(SIGSEGV) => "segmentation violation",
Ok(SIGBUS) => "bus error",
Err(_) => "error while getting the Signal",
_ => "unkown trapped signal",
};
// When the trap-handler is fully implemented, this will return more information.
Err(RuntimeError::Trap {
msg: format!("unknown trap at {:p} - {}", faulting_addr, signal).into(),
}
.into())
}
} else {
let ret = f(); // TODO: Switch stack?
*jmp_buf = prev_jmp_buf;
Ok(ret)
}
}
}
/// Unwinds to last protected_call.
pub unsafe fn do_unwind(signum: i32, siginfo: *const c_void, ucontext: *const c_void) -> ! {
// Since do_unwind is only expected to get called from WebAssembly code which doesn't hold any host resources (locks etc.)
// itself, accessing TLS here is safe. In case any other code calls this, it often indicates a memory safety bug and you should
// temporarily disable the signal handlers to debug it.
let jmp_buf = SETJMP_BUFFER.with(|buf| buf.get());
if *jmp_buf == [0; SETJMP_BUFFER_LEN] {
::std::process::abort();
}
CAUGHT_ADDRESSES.with(|cell| cell.set(get_faulting_addr_and_ip(siginfo, ucontext)));
longjmp(jmp_buf as *mut ::nix::libc::c_void, signum)
}
#[cfg(all(target_os = "linux", target_arch = "x86_64"))]
unsafe fn get_faulting_addr_and_ip(
siginfo: *const c_void,
ucontext: *const c_void,
) -> (*const c_void, *const c_void) {
use libc::{ucontext_t, RIP};
#[allow(dead_code)]
#[repr(C)]
struct siginfo_t {
si_signo: i32,
si_errno: i32,
si_code: i32,
si_addr: u64,
// ...
}
let siginfo = siginfo as *const siginfo_t;
let si_addr = (*siginfo).si_addr;
let ucontext = ucontext as *const ucontext_t;
let rip = (*ucontext).uc_mcontext.gregs[RIP as usize];
(si_addr as _, rip as _)
}
#[cfg(all(target_os = "macos", target_arch = "x86_64"))]
unsafe fn get_faulting_addr_and_ip(
siginfo: *const c_void,
ucontext: *const c_void,
) -> (*const c_void, *const c_void) {
#[allow(dead_code)]
#[repr(C)]
struct ucontext_t {
uc_onstack: u32,
uc_sigmask: u32,
uc_stack: libc::stack_t,
uc_link: *const ucontext_t,
uc_mcsize: u64,
uc_mcontext: *const mcontext_t,
}
#[repr(C)]
struct exception_state {
trapno: u16,
cpu: u16,
err: u32,
faultvaddr: u64,
}
#[repr(C)]
struct regs {
rax: u64,
rbx: u64,
rcx: u64,
rdx: u64,
rdi: u64,
rsi: u64,
rbp: u64,
rsp: u64,
r8: u64,
r9: u64,
r10: u64,
r11: u64,
r12: u64,
r13: u64,
r14: u64,
r15: u64,
rip: u64,
rflags: u64,
cs: u64,
fs: u64,
gs: u64,
}
#[allow(dead_code)]
#[repr(C)]
struct mcontext_t {
es: exception_state,
ss: regs,
// ...
}
let siginfo = siginfo as *const siginfo_t;
let si_addr = (*siginfo).si_addr;
let ucontext = ucontext as *const ucontext_t;
let rip = (*(*ucontext).uc_mcontext).ss.rip;
(si_addr, rip as _)
}