Fixed linting. Added cfg-if to the binary

This commit is contained in:
Syrus
2020-05-18 15:47:45 -07:00
parent 3050f1fa27
commit 93038b489b
8 changed files with 33 additions and 42 deletions

1
Cargo.lock generated
View File

@@ -1600,6 +1600,7 @@ dependencies = [
"anyhow",
"atty",
"bytesize",
"cfg-if",
"colored",
"distance",
"glob",

View File

@@ -39,6 +39,7 @@ structopt = { version = "0.3", features = ["suggestions"] }
distance = "0.4"
# For the inspect subcommand
bytesize = "1.0.0"
cfg-if = "0.1.10"
[workspace]
members = [

View File

@@ -35,12 +35,11 @@ pub struct FuncGen<'a> {
/// Offsets of vmctx fields.
vmoffsets: &'a VMOffsets,
// Memory plans.
memory_plans: &'a PrimaryMap<MemoryIndex, MemoryPlan>,
// Table plans.
table_plans: &'a PrimaryMap<TableIndex, TablePlan>,
// // Memory plans.
// memory_plans: &'a PrimaryMap<MemoryIndex, MemoryPlan>,
// // Table plans.
// table_plans: &'a PrimaryMap<TableIndex, TablePlan>,
/// Function signature.
signature: FunctionType,
@@ -1195,7 +1194,7 @@ impl<'a> FuncGen<'a> {
}
/// Emits a System V call sequence, specialized for labels as the call target.
fn emit_call_sysv_label<I: Iterator<Item = Location>>(
fn _emit_call_sysv_label<I: Iterator<Item = Location>>(
&mut self,
label: DynamicLabel,
params: I,
@@ -1768,8 +1767,8 @@ impl<'a> FuncGen<'a> {
module: &'a Module,
config: &'a SinglepassConfig,
vmoffsets: &'a VMOffsets,
memory_plans: &'a PrimaryMap<MemoryIndex, MemoryPlan>,
table_plans: &'a PrimaryMap<TableIndex, TablePlan>,
_memory_plans: &'a PrimaryMap<MemoryIndex, MemoryPlan>,
_table_plans: &'a PrimaryMap<TableIndex, TablePlan>,
local_func_index: LocalFunctionIndex,
local_types_excluding_arguments: &[WpType],
) -> Result<FuncGen<'a>, CodegenError> {
@@ -1797,8 +1796,8 @@ impl<'a> FuncGen<'a> {
module,
config,
vmoffsets,
memory_plans,
table_plans,
// memory_plans,
// table_plans,
signature,
assembler: Assembler::new().unwrap(),
locals: vec![], // initialization deferred to emit_head
@@ -8149,21 +8148,6 @@ fn type_to_wp_type(ty: Type) -> WpType {
}
}
fn wp_type_to_type(ty: WpType) -> Result<Type, CodegenError> {
match ty {
WpType::I32 => Ok(Type::I32),
WpType::I64 => Ok(Type::I64),
WpType::F32 => Ok(Type::F32),
WpType::F64 => Ok(Type::F64),
WpType::V128 => Ok(Type::V128),
_ => {
return Err(CodegenError {
message: "broken invariant, invalid type".to_string(),
});
}
}
}
// FIXME: This implementation seems to be not enough to resolve all kinds of register dependencies
// at call place.
fn sort_call_movs(movs: &mut [(Location, GPR)]) {

View File

@@ -70,7 +70,7 @@ pub enum MachineValue {
/// Vmctx.
Vmctx,
/// Vmctx Deref.
VmctxDeref(Vec<usize>),
_VmctxDeref(Vec<usize>),
/// Preserve Register.
PreserveRegister(RegisterIndex),
/// Copy Stack BP Relative.
@@ -82,7 +82,7 @@ pub enum MachineValue {
/// Wasm Local.
WasmLocal(usize),
/// Two Halves.
TwoHalves(Box<(MachineValue, MachineValue)>), // 32-bit values. TODO: optimize: add another type for inner "half" value to avoid boxing?
_TwoHalves(Box<(MachineValue, MachineValue)>), // 32-bit values. TODO: optimize: add another type for inner "half" value to avoid boxing?
}
/// A map of function states.
@@ -114,7 +114,7 @@ pub struct FunctionStateMap {
#[derive(Clone, Copy, Debug)]
pub enum SuspendOffset {
/// A loop.
Loop(usize),
_Loop(usize),
/// A call.
Call(usize),
/// A trappable.
@@ -220,7 +220,7 @@ impl MachineState {
impl MachineStateDiff {
/// Creates a `MachineState` from the given `&FunctionStateMap`.
pub fn build_state(&self, m: &FunctionStateMap) -> MachineState {
pub fn _build_state(&self, m: &FunctionStateMap) -> MachineState {
let mut chain: Vec<&MachineStateDiff> = vec![];
chain.push(self);
let mut current = self.last;

View File

@@ -14,7 +14,7 @@ pub enum Location {
Imm8(u8),
Imm32(u32),
Imm64(u64),
Imm128(u128),
// Imm128(u128),
GPR(GPR),
XMM(XMM),
Memory(GPR, i32),

View File

@@ -45,6 +45,7 @@ pub enum GPR {
/// XMM registers.
#[repr(u8)]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[allow(dead_code)]
pub enum XMM {
/// XMM register 0
XMM0,
@@ -98,8 +99,8 @@ impl X64Register {
}
}
/// Converts a DWARD regnum to X64Register.
pub fn from_dwarf_regnum(x: u16) -> Option<X64Register> {
/// Converts a DWARF regnum to X64Register.
pub fn _from_dwarf_regnum(x: u16) -> Option<X64Register> {
Some(match x {
0 => X64Register::GPR(GPR::RAX),
1 => X64Register::GPR(GPR::RDX),
@@ -134,7 +135,7 @@ impl X64Register {
///
/// To build an instruction, append the memory location as a 32-bit
/// offset to the stack pointer to this prefix.
pub fn prefix_mov_to_stack(&self) -> Option<&'static [u8]> {
pub fn _prefix_mov_to_stack(&self) -> Option<&'static [u8]> {
Some(match *self {
X64Register::GPR(gpr) => match gpr {
GPR::RDI => &[0x48, 0x89, 0xbc, 0x24],

View File

@@ -6,7 +6,7 @@ use std::sync::{Arc, Mutex};
use wasm_common::entity::PrimaryMap;
use wasm_common::{FunctionIndex, FunctionType, LocalFunctionIndex, SignatureIndex};
use wasmer_compiler::{
CompileError, CustomSection, CustomSectionProtection, FunctionBody, SectionIndex, Target,
CompileError, CustomSection, CustomSectionProtection, FunctionBody, SectionIndex,
};
#[cfg(feature = "compiler")]
use wasmer_compiler::{Compiler, CompilerConfig};

View File

@@ -81,14 +81,18 @@ impl StoreOptions {
Compiler::from_str(&backend)
} else {
// Auto mode, we choose the best compiler for that platform
if cfg!(feature = "cranelift") && cfg!(target_arch = "x86_64") {
Ok(Compiler::Cranelift)
} else if cfg!(feature = "singlepass") && cfg!(target_arch = "x86_64") {
Ok(Compiler::Singlepass)
} else if cfg!(feature = "llvm") {
Ok(Compiler::LLVM)
cfg_if::cfg_if! {
if #[cfg(all(feature = "cranelift", target_arch = "x86_64"))] {
return Ok(Compiler::Cranelift);
}
else if #[cfg(all(feature = "singlepass", target_arch = "x86_64"))] {
return Ok(Compiler::Singlepass);
}
else if #[cfg(feature = "llvm")] {
return Ok(Compiler::LLVM);
} else {
bail!("There are no available compilers for your architecture")
bail!("There are no available compilers for your architecture");
}
}
}
}