Use System V calling convention and implement a few opcodes.

This commit is contained in:
losfair
2019-02-13 20:04:10 +08:00
parent a69c5b4a14
commit 43df3dd546

View File

@ -1,8 +1,46 @@
use super::codegen::*;
use super::stack::ValueStack;
use super::stack::{ValueInfo, ValueLocation, ValueStack};
use dynasmrt::{x64::Assembler, DynamicLabel, DynasmApi, DynasmLabelApi};
use wasmparser::{Operator, Type as WpType};
#[repr(u8)]
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum Register {
RAX,
RCX,
RDX,
RBX,
RSP,
RBP,
RSI,
RDI,
R8,
R9,
R10,
R11,
R12,
R13,
R14,
R15,
}
impl Register {
pub fn from_scratch_reg(id: u8) -> Register {
use self::Register::*;
match id {
0 => RDI,
1 => RSI,
2 => RDX,
3 => RCX,
4 => R8,
5 => R9,
6 => R10,
7 => R11,
_ => unreachable!(),
}
}
}
#[derive(Default)]
pub struct X64ModuleCodeGenerator {
functions: Vec<X64FunctionCode>,
@ -16,7 +54,6 @@ pub struct X64FunctionCode {
locals: Vec<Local>,
num_params: usize,
current_stack_offset: usize,
callee_managed_stack_offset: usize,
value_stack: ValueStack,
}
@ -49,6 +86,8 @@ impl ModuleCodeGenerator<X64FunctionCode> for X64ModuleCodeGenerator {
dynasm!(
assembler
; => begin_label
; push rbp
; mov rbp, rsp
);
let code = X64FunctionCode {
id: self.functions.len(),
@ -58,74 +97,99 @@ impl ModuleCodeGenerator<X64FunctionCode> for X64ModuleCodeGenerator {
locals: vec![],
num_params: 0,
current_stack_offset: 0,
callee_managed_stack_offset: 0,
value_stack: ValueStack::new(13),
value_stack: ValueStack::new(8),
};
self.functions.push(code);
Ok(self.functions.last_mut().unwrap())
}
}
impl X64FunctionCode {
fn gen_rt_pop(assembler: &mut Assembler, info: &ValueInfo) -> Result<(), CodegenError> {
match info.location {
ValueLocation::Register(_) => {}
ValueLocation::Stack => {
let size = get_size_of_type(&info.ty)?;
dynasm!(
assembler
; add rsp, size as i32
);
}
}
Ok(())
}
}
impl FunctionCodeGenerator for X64FunctionCode {
fn feed_param(&mut self, ty: WpType) -> Result<(), CodegenError> {
let assembler = self.assembler.as_mut().unwrap();
let size = get_size_of_type(&ty)?;
self.current_stack_offset += size;
self.locals.push(Local {
ty: ty,
stack_offset: self.current_stack_offset,
});
let param_reg = match self.num_params {
0 => Register::RDI,
1 => Register::RSI,
2 => Register::RDX,
3 => Register::RCX,
4 => Register::R8,
5 => Register::R9,
_ => {
return Err(CodegenError {
message: "more than 6 function parameters is not yet supported",
})
}
};
self.num_params += 1;
if is_dword(size) {
dynasm!(
assembler
; sub rsp, 4
; mov [rsp], Rd(param_reg as u8)
);
} else {
dynasm!(
assembler
; sub rsp, 8
; mov [rsp], Rq(param_reg as u8)
);
}
Ok(())
}
fn feed_local(&mut self, ty: WpType, n: usize) -> Result<(), CodegenError> {
let assembler = self.assembler.as_mut().unwrap();
let size = get_size_of_type(&ty)?;
for _ in 0..n {
// FIXME: check range of n
self.current_stack_offset += size;
self.callee_managed_stack_offset += size;
self.locals.push(Local {
ty: ty,
stack_offset: self.current_stack_offset,
});
match size {
4 => dynasm!(
assembler
; sub rsp, 4
; mov DWORD [rsp], 0
),
8 => dynasm!(
assembler
; sub rsp, 8
; mov QWORD [rsp], 0
),
_ => unreachable!(),
}
}
Ok(())
}
fn begin_body(&mut self) -> Result<(), CodegenError> {
let assembler = self.assembler.as_mut().unwrap();
dynasm!(
assembler
; mov rax, rsp
; sub rsp, self.callee_managed_stack_offset as i32
; xor rcx, rcx
);
for local in &self.locals[self.num_params..] {
let size = get_size_of_type(&local.ty)?;
dynasm!(
assembler
; sub rax, size as i32
);
if size == 4 {
dynasm!(
assembler
; mov [rax], ecx
);
} else if size == 8 {
dynasm!(
assembler
; mov [rax], rcx
);
} else {
return Err(CodegenError {
message: "unsupported size for type",
});
}
}
dynasm!(
assembler
; push rbp
; mov rbp, rsp
);
Ok(())
}
fn feed_opcode(&mut self, op: Operator) -> Result<(), CodegenError> {
@ -139,12 +203,71 @@ impl FunctionCodeGenerator for X64FunctionCode {
});
}
let local = self.locals[local_index];
dynasm!(
assembler
; mov rax, rbp
; add rax, (self.current_stack_offset - local.stack_offset) as i32
// TODO: How should we dynamically specify a register?
);
let location = self.value_stack.push(local.ty);
let size = get_size_of_type(&local.ty)?;
match location {
ValueLocation::Register(id) => {
if is_dword(size) {
dynasm!(
assembler
; mov Rd(Register::from_scratch_reg(id) as u8), [rbp - (local.stack_offset as i32)]
);
} else {
dynasm!(
assembler
; mov Rq(Register::from_scratch_reg(id) as u8), [rbp - (local.stack_offset as i32)]
);
}
}
ValueLocation::Stack => {
if is_dword(size) {
dynasm!(
assembler
; mov eax, [rbp - (local.stack_offset as i32)]
; sub rsp, 4
; mov [rsp], eax
);
} else {
dynasm!(
assembler
; mov rax, [rbp - (local.stack_offset as i32)]
; sub rsp, 8
; mov [rsp], rax
);
}
}
}
}
Operator::I32Add => {
let (a, b) = self.value_stack.pop2()?;
if a.ty != WpType::I32 || b.ty != WpType::I32 {
return Err(CodegenError {
message: "I32Add type mismatch",
});
}
Self::gen_rt_pop(assembler, &b);
Self::gen_rt_pop(assembler, &a);
self.value_stack.push(WpType::I32);
if a.location.is_register() && b.location.is_register() {
let (a_reg, b_reg) = (
Register::from_scratch_reg(a.location.get_register()?),
Register::from_scratch_reg(b.location.get_register()?),
);
// output is in a_reg.
dynasm!(
assembler
; add Rd(a_reg as u8), Rd(b_reg as u8)
);
} else {
unimplemented!();
}
}
Operator::Drop => {
let info = self.value_stack.pop()?;
Self::gen_rt_pop(assembler, &info)?;
}
_ => unimplemented!(),
}
@ -158,7 +281,6 @@ impl FunctionCodeGenerator for X64FunctionCode {
; => self.cleanup_label
; mov rsp, rbp
; pop rbp
; add rsp, self.current_stack_offset as i32
; ret
);
Ok(())
@ -174,3 +296,7 @@ fn get_size_of_type(ty: &WpType) -> Result<usize, CodegenError> {
}),
}
}
fn is_dword(n: usize) -> bool {
n == 4
}