Starting to port singlepass.

# Conflicts:
#	Cargo.lock
This commit is contained in:
losfair
2020-04-25 02:00:41 +08:00
committed by Syrus
parent dff7cf28c9
commit 7911986cbe
7 changed files with 13163 additions and 1 deletions

View File

@@ -19,6 +19,11 @@ wasm-common = { path = "../wasm-common", version = "0.16.2", default-features =
rayon = "1.3.0" rayon = "1.3.0"
serde = { version = "1.0.106", features = ["derive"] } serde = { version = "1.0.106", features = ["derive"] }
more-asserts = "0.2.1" more-asserts = "0.2.1"
dynasm = "0.5"
dynasmrt = "0.5"
lazy_static = "1.4"
byteorder = "1.3"
smallvec = "1"
[badges] [badges]
maintenance = { status = "actively-developed" } maintenance = { status = "actively-developed" }

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,155 @@
use std::collections::BTreeMap;
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub struct RegisterIndex(pub usize);
/// Information of an inline breakpoint.
///
/// TODO: Move this into runtime.
#[derive(Clone, Debug)]
pub struct InlineBreakpoint {
/// Size in bytes taken by this breakpoint's instruction sequence.
pub size: usize,
/// Type of the inline breakpoint.
pub ty: InlineBreakpointType,
}
/// The type of an inline breakpoint.
#[repr(u8)]
#[derive(Copy, Clone, Debug)]
pub enum InlineBreakpointType {
/// A middleware invocation breakpoint.
Middleware,
}
/// A kind of wasm or constant value
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub enum WasmAbstractValue {
/// A wasm runtime value
Runtime,
/// A wasm constant value
Const(u64),
}
/// A container for the state of a running wasm instance.
#[derive(Clone, Debug)]
pub struct MachineState {
/// Stack values.
pub stack_values: Vec<MachineValue>,
/// Register values.
pub register_values: Vec<MachineValue>,
/// Previous frame.
pub prev_frame: BTreeMap<usize, MachineValue>,
/// Wasm stack.
pub wasm_stack: Vec<WasmAbstractValue>,
/// Private depth of the wasm stack.
pub wasm_stack_private_depth: usize,
/// Wasm instruction offset.
pub wasm_inst_offset: usize,
}
/// A diff of two `MachineState`s.
#[derive(Clone, Debug, Default)]
pub struct MachineStateDiff {
/// Last.
pub last: Option<usize>,
/// Stack push.
pub stack_push: Vec<MachineValue>,
/// Stack pop.
pub stack_pop: usize,
/// Register diff.
pub reg_diff: Vec<(RegisterIndex, MachineValue)>,
/// Previous frame diff.
pub prev_frame_diff: BTreeMap<usize, Option<MachineValue>>, // None for removal
/// Wasm stack push.
pub wasm_stack_push: Vec<WasmAbstractValue>,
/// Wasm stack pop.
pub wasm_stack_pop: usize,
/// Private depth of the wasm stack.
pub wasm_stack_private_depth: usize, // absolute value; not a diff.
/// Wasm instruction offset.
pub wasm_inst_offset: usize, // absolute value; not a diff.
}
/// A kind of machine value.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub enum MachineValue {
/// Undefined.
Undefined,
/// Vmctx.
Vmctx,
/// Vmctx Deref.
VmctxDeref(Vec<usize>),
/// Preserve Register.
PreserveRegister(RegisterIndex),
/// Copy Stack BP Relative.
CopyStackBPRelative(i32), // relative to Base Pointer, in byte offset
/// Explicit Shadow.
ExplicitShadow, // indicates that all values above this are above the shadow region
/// Wasm Stack.
WasmStack(usize),
/// Wasm Local.
WasmLocal(usize),
/// Two Halves.
TwoHalves(Box<(MachineValue, MachineValue)>), // 32-bit values. TODO: optimize: add another type for inner "half" value to avoid boxing?
}
/// A map of function states.
#[derive(Clone, Debug)]
pub struct FunctionStateMap {
/// Initial.
pub initial: MachineState,
/// Local Function Id.
pub local_function_id: usize,
/// Locals.
pub locals: Vec<WasmAbstractValue>,
/// Shadow size.
pub shadow_size: usize, // for single-pass backend, 32 bytes on x86-64
/// Diffs.
pub diffs: Vec<MachineStateDiff>,
/// Wasm Function Header target offset.
pub wasm_function_header_target_offset: Option<SuspendOffset>,
/// Wasm offset to target offset
pub wasm_offset_to_target_offset: BTreeMap<usize, SuspendOffset>,
/// Loop offsets.
pub loop_offsets: BTreeMap<usize, OffsetInfo>, /* suspend_offset -> info */
/// Call offsets.
pub call_offsets: BTreeMap<usize, OffsetInfo>, /* suspend_offset -> info */
/// Trappable offsets.
pub trappable_offsets: BTreeMap<usize, OffsetInfo>, /* suspend_offset -> info */
}
/// A kind of suspend offset.
#[derive(Clone, Copy, Debug)]
pub enum SuspendOffset {
/// A loop.
Loop(usize),
/// A call.
Call(usize),
/// A trappable.
Trappable(usize),
}
/// Info for an offset.
#[derive(Clone, Debug)]
pub struct OffsetInfo {
/// End offset.
pub end_offset: usize, // excluded bound
/// Diff Id.
pub diff_id: usize,
/// Activate offset.
pub activate_offset: usize,
}
/// A map of module state.
#[derive(Clone, Debug)]
pub struct ModuleStateMap {
/// Local functions.
pub local_functions: BTreeMap<usize, FunctionStateMap>,
/// Total size.
pub total_size: usize,
}

File diff suppressed because it is too large Load Diff

View File

@@ -5,11 +5,20 @@
//! including Blockchains and Edge computing where quick compilation //! including Blockchains and Edge computing where quick compilation
//! times are a must, and JIT bombs should never happen. //! times are a must, and JIT bombs should never happen.
//! //!
//! Compared to Cranelift and LLVM, Singlepass is much faster to compile. //! Compared to Cranelift and LLVM, Singlepass compiles much faster but has worse
//! runtime performance.
//!
//! > Note: Singlepass currently depends on Rust nightly features. //! > Note: Singlepass currently depends on Rust nightly features.
#![feature(proc_macro_hygiene)]
mod compiler; mod compiler;
mod config; mod config;
//mod codegen_x64;
mod common_decl;
mod emitter_x64;
mod machine;
mod x64_decl;
pub use crate::compiler::SinglepassCompiler; pub use crate::compiler::SinglepassCompiler;
pub use crate::config::SinglepassConfig; pub use crate::config::SinglepassConfig;

View File

@@ -0,0 +1,513 @@
use crate::common_decl::*;
use crate::emitter_x64::*;
use crate::x64_decl::{new_machine_state, X64Register};
use smallvec::smallvec;
use smallvec::SmallVec;
use std::collections::HashSet;
use wasmparser::Type as WpType;
struct MachineStackOffset(usize);
pub struct Machine {
used_gprs: HashSet<GPR>,
used_xmms: HashSet<XMM>,
stack_offset: MachineStackOffset,
save_area_offset: Option<MachineStackOffset>,
pub state: MachineState,
pub(crate) track_state: bool,
}
impl Machine {
pub fn new() -> Self {
Machine {
used_gprs: HashSet::new(),
used_xmms: HashSet::new(),
stack_offset: MachineStackOffset(0),
save_area_offset: None,
state: new_machine_state(),
track_state: true,
}
}
pub fn get_stack_offset(&self) -> usize {
self.stack_offset.0
}
pub fn get_used_gprs(&self) -> Vec<GPR> {
self.used_gprs.iter().cloned().collect()
}
pub fn get_used_xmms(&self) -> Vec<XMM> {
self.used_xmms.iter().cloned().collect()
}
pub fn get_vmctx_reg() -> GPR {
GPR::R15
}
/// Picks an unused general purpose register for local/stack/argument use.
///
/// This method does not mark the register as used.
pub fn pick_gpr(&self) -> Option<GPR> {
use GPR::*;
static REGS: &'static [GPR] = &[RSI, RDI, R8, R9, R10, R11];
for r in REGS {
if !self.used_gprs.contains(r) {
return Some(*r);
}
}
None
}
/// Picks an unused general purpose register for internal temporary use.
///
/// This method does not mark the register as used.
pub fn pick_temp_gpr(&self) -> Option<GPR> {
use GPR::*;
static REGS: &'static [GPR] = &[RAX, RCX, RDX];
for r in REGS {
if !self.used_gprs.contains(r) {
return Some(*r);
}
}
None
}
/// Acquires a temporary GPR.
pub fn acquire_temp_gpr(&mut self) -> Option<GPR> {
let gpr = self.pick_temp_gpr();
if let Some(x) = gpr {
self.used_gprs.insert(x);
}
gpr
}
/// Releases a temporary GPR.
pub fn release_temp_gpr(&mut self, gpr: GPR) {
assert!(self.used_gprs.remove(&gpr));
}
/// Specify that a given register is in use.
pub fn reserve_unused_temp_gpr(&mut self, gpr: GPR) -> GPR {
assert!(!self.used_gprs.contains(&gpr));
self.used_gprs.insert(gpr);
gpr
}
/// Picks an unused XMM register.
///
/// This method does not mark the register as used.
pub fn pick_xmm(&self) -> Option<XMM> {
use XMM::*;
static REGS: &'static [XMM] = &[XMM3, XMM4, XMM5, XMM6, XMM7];
for r in REGS {
if !self.used_xmms.contains(r) {
return Some(*r);
}
}
None
}
/// Picks an unused XMM register for internal temporary use.
///
/// This method does not mark the register as used.
pub fn pick_temp_xmm(&self) -> Option<XMM> {
use XMM::*;
static REGS: &'static [XMM] = &[XMM0, XMM1, XMM2];
for r in REGS {
if !self.used_xmms.contains(r) {
return Some(*r);
}
}
None
}
/// Acquires a temporary XMM register.
pub fn acquire_temp_xmm(&mut self) -> Option<XMM> {
let xmm = self.pick_temp_xmm();
if let Some(x) = xmm {
self.used_xmms.insert(x);
}
xmm
}
/// Releases a temporary XMM register.
pub fn release_temp_xmm(&mut self, xmm: XMM) {
assert_eq!(self.used_xmms.remove(&xmm), true);
}
/// Acquires locations from the machine state.
///
/// If the returned locations are used for stack value, `release_location` needs to be called on them;
/// Otherwise, if the returned locations are used for locals, `release_location` does not need to be called on them.
pub fn acquire_locations<E: Emitter>(
&mut self,
assembler: &mut E,
tys: &[(WpType, MachineValue)],
zeroed: bool,
) -> SmallVec<[Location; 1]> {
let mut ret = smallvec![];
let mut delta_stack_offset: usize = 0;
for (ty, mv) in tys {
let loc = match *ty {
WpType::F32 | WpType::F64 => self.pick_xmm().map(Location::XMM),
WpType::I32 | WpType::I64 => self.pick_gpr().map(Location::GPR),
_ => unreachable!(),
};
let loc = if let Some(x) = loc {
x
} else {
self.stack_offset.0 += 8;
delta_stack_offset += 8;
Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32))
};
if let Location::GPR(x) = loc {
self.used_gprs.insert(x);
self.state.register_values[X64Register::GPR(x).to_index().0] = mv.clone();
} else if let Location::XMM(x) = loc {
self.used_xmms.insert(x);
self.state.register_values[X64Register::XMM(x).to_index().0] = mv.clone();
} else {
self.state.stack_values.push(mv.clone());
}
self.state.wasm_stack.push(WasmAbstractValue::Runtime);
ret.push(loc);
}
if delta_stack_offset != 0 {
assembler.emit_sub(
Size::S64,
Location::Imm32(delta_stack_offset as u32),
Location::GPR(GPR::RSP),
);
}
if zeroed {
for i in 0..tys.len() {
assembler.emit_mov(Size::S64, Location::Imm32(0), ret[i]);
}
}
ret
}
/// Releases locations used for stack value.
pub fn release_locations<E: Emitter>(&mut self, assembler: &mut E, locs: &[Location]) {
let mut delta_stack_offset: usize = 0;
for loc in locs.iter().rev() {
match *loc {
Location::GPR(ref x) => {
assert_eq!(self.used_gprs.remove(x), true);
self.state.register_values[X64Register::GPR(*x).to_index().0] =
MachineValue::Undefined;
}
Location::XMM(ref x) => {
assert_eq!(self.used_xmms.remove(x), true);
self.state.register_values[X64Register::XMM(*x).to_index().0] =
MachineValue::Undefined;
}
Location::Memory(GPR::RBP, x) => {
if x >= 0 {
unreachable!();
}
let offset = (-x) as usize;
if offset != self.stack_offset.0 {
unreachable!();
}
self.stack_offset.0 -= 8;
delta_stack_offset += 8;
self.state.stack_values.pop().unwrap();
}
_ => {}
}
self.state.wasm_stack.pop().unwrap();
}
if delta_stack_offset != 0 {
assembler.emit_add(
Size::S64,
Location::Imm32(delta_stack_offset as u32),
Location::GPR(GPR::RSP),
);
}
}
pub fn release_locations_only_regs(&mut self, locs: &[Location]) {
for loc in locs.iter().rev() {
match *loc {
Location::GPR(ref x) => {
assert_eq!(self.used_gprs.remove(x), true);
self.state.register_values[X64Register::GPR(*x).to_index().0] =
MachineValue::Undefined;
}
Location::XMM(ref x) => {
assert_eq!(self.used_xmms.remove(x), true);
self.state.register_values[X64Register::XMM(*x).to_index().0] =
MachineValue::Undefined;
}
_ => {}
}
// Wasm state popping is deferred to `release_locations_only_osr_state`.
}
}
pub fn release_locations_only_stack<E: Emitter>(
&mut self,
assembler: &mut E,
locs: &[Location],
) {
let mut delta_stack_offset: usize = 0;
for loc in locs.iter().rev() {
match *loc {
Location::Memory(GPR::RBP, x) => {
if x >= 0 {
unreachable!();
}
let offset = (-x) as usize;
if offset != self.stack_offset.0 {
unreachable!();
}
self.stack_offset.0 -= 8;
delta_stack_offset += 8;
self.state.stack_values.pop().unwrap();
}
_ => {}
}
// Wasm state popping is deferred to `release_locations_only_osr_state`.
}
if delta_stack_offset != 0 {
assembler.emit_add(
Size::S64,
Location::Imm32(delta_stack_offset as u32),
Location::GPR(GPR::RSP),
);
}
}
pub fn release_locations_only_osr_state(&mut self, n: usize) {
for _ in 0..n {
self.state.wasm_stack.pop().unwrap();
}
}
pub fn release_locations_keep_state<E: Emitter>(&self, assembler: &mut E, locs: &[Location]) {
let mut delta_stack_offset: usize = 0;
let mut stack_offset = self.stack_offset.0;
for loc in locs.iter().rev() {
match *loc {
Location::Memory(GPR::RBP, x) => {
if x >= 0 {
unreachable!();
}
let offset = (-x) as usize;
if offset != stack_offset {
unreachable!();
}
stack_offset -= 8;
delta_stack_offset += 8;
}
_ => {}
}
}
if delta_stack_offset != 0 {
assembler.emit_add(
Size::S64,
Location::Imm32(delta_stack_offset as u32),
Location::GPR(GPR::RSP),
);
}
}
pub fn init_locals<E: Emitter>(
&mut self,
a: &mut E,
n: usize,
n_params: usize,
) -> Vec<Location> {
// Use callee-saved registers for locals.
fn get_local_location(idx: usize) -> Location {
match idx {
0 => Location::GPR(GPR::R12),
1 => Location::GPR(GPR::R13),
2 => Location::GPR(GPR::R14),
3 => Location::GPR(GPR::RBX),
_ => Location::Memory(GPR::RBP, -(((idx - 3) * 8) as i32)),
}
}
let mut locations: Vec<Location> = vec![];
let mut allocated: usize = 0;
// Determine locations for parameters.
for i in 0..n_params {
let loc = Self::get_param_location(i + 1);
locations.push(match loc {
Location::GPR(_) => {
let old_idx = allocated;
allocated += 1;
get_local_location(old_idx)
}
Location::Memory(_, _) => {
let old_idx = allocated;
allocated += 1;
get_local_location(old_idx)
}
_ => unreachable!(),
});
}
// Determine locations for normal locals.
for _ in n_params..n {
locations.push(get_local_location(allocated));
allocated += 1;
}
for (i, loc) in locations.iter().enumerate() {
match *loc {
Location::GPR(x) => {
self.state.register_values[X64Register::GPR(x).to_index().0] =
MachineValue::WasmLocal(i);
}
Location::Memory(_, _) => {
self.state.stack_values.push(MachineValue::WasmLocal(i));
}
_ => unreachable!(),
}
}
// How many machine stack slots did all the locals use?
let num_mem_slots = locations
.iter()
.filter(|&&loc| match loc {
Location::Memory(_, _) => true,
_ => false,
})
.count();
// Move RSP down to reserve space for machine stack slots.
if num_mem_slots > 0 {
a.emit_sub(
Size::S64,
Location::Imm32((num_mem_slots * 8) as u32),
Location::GPR(GPR::RSP),
);
self.stack_offset.0 += num_mem_slots * 8;
}
// Save callee-saved registers.
for loc in locations.iter() {
if let Location::GPR(x) = *loc {
a.emit_push(Size::S64, *loc);
self.stack_offset.0 += 8;
self.state.stack_values.push(MachineValue::PreserveRegister(
X64Register::GPR(x).to_index(),
));
}
}
// Save R15 for vmctx use.
a.emit_push(Size::S64, Location::GPR(GPR::R15));
self.stack_offset.0 += 8;
self.state.stack_values.push(MachineValue::PreserveRegister(
X64Register::GPR(GPR::R15).to_index(),
));
// Save the offset of static area.
self.save_area_offset = Some(MachineStackOffset(self.stack_offset.0));
// Load in-register parameters into the allocated locations.
for i in 0..n_params {
let loc = Self::get_param_location(i + 1);
match loc {
Location::GPR(_) => {
a.emit_mov(Size::S64, loc, locations[i]);
}
Location::Memory(_, _) => match locations[i] {
Location::GPR(_) => {
a.emit_mov(Size::S64, loc, locations[i]);
}
Location::Memory(_, _) => {
a.emit_mov(Size::S64, loc, Location::GPR(GPR::RAX));
a.emit_mov(Size::S64, Location::GPR(GPR::RAX), locations[i]);
}
_ => unreachable!(),
},
_ => unreachable!(),
}
}
// Load vmctx.
a.emit_mov(
Size::S64,
Self::get_param_location(0),
Location::GPR(GPR::R15),
);
// Initialize all normal locals to zero.
for i in n_params..n {
a.emit_mov(Size::S64, Location::Imm32(0), locations[i]);
}
locations
}
pub fn finalize_locals<E: Emitter>(&mut self, a: &mut E, locations: &[Location]) {
// Unwind stack to the "save area".
a.emit_lea(
Size::S64,
Location::Memory(
GPR::RBP,
-(self.save_area_offset.as_ref().unwrap().0 as i32),
),
Location::GPR(GPR::RSP),
);
// Restore R15 used by vmctx.
a.emit_pop(Size::S64, Location::GPR(GPR::R15));
// Restore callee-saved registers.
for loc in locations.iter().rev() {
if let Location::GPR(_) = *loc {
a.emit_pop(Size::S64, *loc);
}
}
}
pub fn get_param_location(idx: usize) -> Location {
match idx {
0 => Location::GPR(GPR::RDI),
1 => Location::GPR(GPR::RSI),
2 => Location::GPR(GPR::RDX),
3 => Location::GPR(GPR::RCX),
4 => Location::GPR(GPR::R8),
5 => Location::GPR(GPR::R9),
_ => Location::Memory(GPR::RBP, (16 + (idx - 6) * 8) as i32),
}
}
}
#[cfg(test)]
mod test {
use super::*;
use dynasmrt::x64::Assembler;
#[test]
fn test_release_locations_keep_state_nopanic() {
let mut machine = Machine::new();
let mut assembler = Assembler::new().unwrap();
let locs = machine.acquire_locations(
&mut assembler,
&(0..10)
.map(|_| (WpType::I32, MachineValue::Undefined))
.collect::<Vec<_>>(),
false,
);
machine.release_locations_keep_state(&mut assembler, &locs);
}
}

View File

@@ -0,0 +1,222 @@
//! X64 structures.
use crate::common_decl::{MachineState, MachineValue, RegisterIndex};
use std::collections::BTreeMap;
use wasm_common::Type;
/// General-purpose registers.
#[repr(u8)]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub enum GPR {
/// RAX register
RAX,
/// RCX register
RCX,
/// RDX register
RDX,
/// RBX register
RBX,
/// RSP register
RSP,
/// RBP register
RBP,
/// RSI register
RSI,
/// RDI register
RDI,
/// R8 register
R8,
/// R9 register
R9,
/// R10 register
R10,
/// R11 register
R11,
/// R12 register
R12,
/// R13 register
R13,
/// R14 register
R14,
/// R15 register
R15,
}
/// XMM registers.
#[repr(u8)]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub enum XMM {
/// XMM register 0
XMM0,
/// XMM register 1
XMM1,
/// XMM register 2
XMM2,
/// XMM register 3
XMM3,
/// XMM register 4
XMM4,
/// XMM register 5
XMM5,
/// XMM register 6
XMM6,
/// XMM register 7
XMM7,
/// XMM register 8
XMM8,
/// XMM register 9
XMM9,
/// XMM register 10
XMM10,
/// XMM register 11
XMM11,
/// XMM register 12
XMM12,
/// XMM register 13
XMM13,
/// XMM register 14
XMM14,
/// XMM register 15
XMM15,
}
/// A machine register under the x86-64 architecture.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum X64Register {
/// General-purpose registers.
GPR(GPR),
/// XMM (floating point/SIMD) registers.
XMM(XMM),
}
impl X64Register {
/// Returns the index of the register.
pub fn to_index(&self) -> RegisterIndex {
match *self {
X64Register::GPR(x) => RegisterIndex(x as usize),
X64Register::XMM(x) => RegisterIndex(x as usize + 16),
}
}
/// Converts a DWARD regnum to X64Register.
pub fn from_dwarf_regnum(x: u16) -> Option<X64Register> {
Some(match x {
0 => X64Register::GPR(GPR::RAX),
1 => X64Register::GPR(GPR::RDX),
2 => X64Register::GPR(GPR::RCX),
3 => X64Register::GPR(GPR::RBX),
4 => X64Register::GPR(GPR::RSI),
5 => X64Register::GPR(GPR::RDI),
6 => X64Register::GPR(GPR::RBP),
7 => X64Register::GPR(GPR::RSP),
8 => X64Register::GPR(GPR::R8),
9 => X64Register::GPR(GPR::R9),
10 => X64Register::GPR(GPR::R10),
11 => X64Register::GPR(GPR::R11),
12 => X64Register::GPR(GPR::R12),
13 => X64Register::GPR(GPR::R13),
14 => X64Register::GPR(GPR::R14),
15 => X64Register::GPR(GPR::R15),
17 => X64Register::XMM(XMM::XMM0),
18 => X64Register::XMM(XMM::XMM1),
19 => X64Register::XMM(XMM::XMM2),
20 => X64Register::XMM(XMM::XMM3),
21 => X64Register::XMM(XMM::XMM4),
22 => X64Register::XMM(XMM::XMM5),
23 => X64Register::XMM(XMM::XMM6),
24 => X64Register::XMM(XMM::XMM7),
_ => return None,
})
}
/// Returns the instruction prefix for `movq %this_reg, ?(%rsp)`.
///
/// To build an instruction, append the memory location as a 32-bit
/// offset to the stack pointer to this prefix.
pub fn prefix_mov_to_stack(&self) -> Option<&'static [u8]> {
Some(match *self {
X64Register::GPR(gpr) => match gpr {
GPR::RDI => &[0x48, 0x89, 0xbc, 0x24],
GPR::RSI => &[0x48, 0x89, 0xb4, 0x24],
GPR::RDX => &[0x48, 0x89, 0x94, 0x24],
GPR::RCX => &[0x48, 0x89, 0x8c, 0x24],
GPR::R8 => &[0x4c, 0x89, 0x84, 0x24],
GPR::R9 => &[0x4c, 0x89, 0x8c, 0x24],
_ => return None,
},
X64Register::XMM(xmm) => match xmm {
XMM::XMM0 => &[0x66, 0x0f, 0xd6, 0x84, 0x24],
XMM::XMM1 => &[0x66, 0x0f, 0xd6, 0x8c, 0x24],
XMM::XMM2 => &[0x66, 0x0f, 0xd6, 0x94, 0x24],
XMM::XMM3 => &[0x66, 0x0f, 0xd6, 0x9c, 0x24],
XMM::XMM4 => &[0x66, 0x0f, 0xd6, 0xa4, 0x24],
XMM::XMM5 => &[0x66, 0x0f, 0xd6, 0xac, 0x24],
XMM::XMM6 => &[0x66, 0x0f, 0xd6, 0xb4, 0x24],
XMM::XMM7 => &[0x66, 0x0f, 0xd6, 0xbc, 0x24],
_ => return None,
},
})
}
}
/// An allocator that allocates registers for function arguments according to the System V ABI.
#[derive(Default)]
pub struct ArgumentRegisterAllocator {
n_gprs: usize,
n_xmms: usize,
}
impl ArgumentRegisterAllocator {
/// Allocates a register for argument type `ty`. Returns `None` if no register is available for this type.
pub fn next(&mut self, ty: Type) -> Option<X64Register> {
static GPR_SEQ: &'static [GPR] =
&[GPR::RDI, GPR::RSI, GPR::RDX, GPR::RCX, GPR::R8, GPR::R9];
static XMM_SEQ: &'static [XMM] = &[
XMM::XMM0,
XMM::XMM1,
XMM::XMM2,
XMM::XMM3,
XMM::XMM4,
XMM::XMM5,
XMM::XMM6,
XMM::XMM7,
];
match ty {
Type::I32 | Type::I64 => {
if self.n_gprs < GPR_SEQ.len() {
let gpr = GPR_SEQ[self.n_gprs];
self.n_gprs += 1;
Some(X64Register::GPR(gpr))
} else {
None
}
}
Type::F32 | Type::F64 => {
if self.n_xmms < XMM_SEQ.len() {
let xmm = XMM_SEQ[self.n_xmms];
self.n_xmms += 1;
Some(X64Register::XMM(xmm))
} else {
None
}
}
_ => todo!(
"ArgumentRegisterAllocator::next: Unsupported type: {:?}",
ty
),
}
}
}
/// Create a new `MachineState` with default values.
pub fn new_machine_state() -> MachineState {
MachineState {
stack_values: vec![],
register_values: vec![MachineValue::Undefined; 16 + 8],
prev_frame: BTreeMap::new(),
wasm_stack: vec![],
wasm_stack_private_depth: 0,
wasm_inst_offset: std::usize::MAX,
}
}