Remove #![allow(unused_imports, dead_code)] and fix resulting issues.

Fix nearly-all issues found by clippy too.
This commit is contained in:
Nick Lewycky
2020-05-20 17:16:56 -07:00
parent df2a45899a
commit a47a3068f4
7 changed files with 173 additions and 289 deletions

View File

@@ -1,7 +1,3 @@
//! Support for compiling with LLVM.
// Allow unused imports while developing.
#![allow(unused_imports, dead_code)]
use crate::config::LLVMConfig;
use crate::trampoline::FuncTrampoline;
use crate::translator::FuncTranslator;
@@ -10,16 +6,12 @@ use wasm_common::entity::{EntityRef, PrimaryMap, SecondaryMap};
use wasm_common::Features;
use wasm_common::{FunctionIndex, FunctionType, LocalFunctionIndex, MemoryIndex, TableIndex};
use wasmer_compiler::{
Compilation, CompileError, CompiledFunction, Compiler, CompilerConfig, CustomSection,
CustomSectionProtection, FunctionBody, FunctionBodyData, ModuleTranslationState, Relocation,
RelocationTarget, SectionBody, SectionIndex, Target, TrapInformation,
Compilation, CompileError, Compiler, CompilerConfig, FunctionBody, FunctionBodyData,
ModuleTranslationState, RelocationTarget, SectionIndex, Target,
};
use wasmer_runtime::{MemoryPlan, ModuleInfo, TablePlan, TrapCode};
use wasmer_runtime::{MemoryPlan, ModuleInfo, TablePlan};
use inkwell::targets::{InitializationConfig, Target as InkwellTarget};
use std::collections::HashMap;
use std::sync::{Arc, Mutex}; // TODO: remove
//use std::sync::{Arc, Mutex};
/// A compiler that compiles a WebAssembly module with LLVM, translating the Wasm to LLVM IR,
/// optimizing it and then translating to assembly.
@@ -75,7 +67,7 @@ impl Compiler for LLVMCompiler {
.unwrap_or_else(|| format!("fn{}", func_index.index()));
}
let mut module_custom_sections = PrimaryMap::new();
let mut functions = function_body_inputs
let functions = function_body_inputs
.into_iter()
.collect::<Vec<(LocalFunctionIndex, &FunctionBodyData<'_>)>>()
.par_iter()
@@ -94,31 +86,25 @@ impl Compiler for LLVMCompiler {
})
.collect::<Result<Vec<_>, CompileError>>()?
.into_iter()
.map(|(mut compiled_function, mut function_custom_sections)| {
.map(|(mut compiled_function, function_custom_sections)| {
let first_section = module_custom_sections.len() as u32;
for (_, custom_section) in function_custom_sections.iter() {
// TODO: remove this call to clone()
let mut custom_section = custom_section.clone();
for mut reloc in &mut custom_section.relocations {
match reloc.reloc_target {
RelocationTarget::CustomSection(index) => {
reloc.reloc_target = RelocationTarget::CustomSection(
SectionIndex::from_u32(first_section + index.as_u32()),
)
}
_ => {}
if let RelocationTarget::CustomSection(index) = reloc.reloc_target {
reloc.reloc_target = RelocationTarget::CustomSection(
SectionIndex::from_u32(first_section + index.as_u32()),
)
}
}
module_custom_sections.push(custom_section);
}
for mut reloc in &mut compiled_function.relocations {
match reloc.reloc_target {
RelocationTarget::CustomSection(index) => {
reloc.reloc_target = RelocationTarget::CustomSection(
SectionIndex::from_u32(first_section + index.as_u32()),
)
}
_ => {}
if let RelocationTarget::CustomSection(index) = reloc.reloc_target {
reloc.reloc_target = RelocationTarget::CustomSection(
SectionIndex::from_u32(first_section + index.as_u32()),
)
}
}
compiled_function
@@ -142,7 +128,7 @@ impl Compiler for LLVMCompiler {
fn compile_dynamic_function_trampolines(
&self,
module: &ModuleInfo,
_module: &ModuleInfo,
) -> Result<PrimaryMap<FunctionIndex, FunctionBody>, CompileError> {
Ok(PrimaryMap::new())
// unimplemented!("Dynamic function trampolines not yet implemented");

View File

@@ -1,6 +1,3 @@
// Allow unused imports while developing
#![allow(unused_imports, dead_code)]
use crate::compiler::LLVMCompiler;
use inkwell::targets::{
CodeModel, InitializationConfig, RelocMode, Target as InkwellTarget, TargetMachine,
@@ -120,36 +117,34 @@ impl LLVMConfig {
// The CPU features formatted as LLVM strings
let llvm_cpu_features = cpu_features
.iter()
.filter_map(|feature| match feature {
CpuFeature::SSE2 => Some("+sse2"),
CpuFeature::SSE3 => Some("+sse3"),
CpuFeature::SSSE3 => Some("+ssse3"),
CpuFeature::SSE41 => Some("+sse4.1"),
CpuFeature::SSE42 => Some("+sse4.2"),
CpuFeature::POPCNT => Some("+popcnt"),
CpuFeature::AVX => Some("+avx"),
CpuFeature::BMI1 => Some("+bmi"),
CpuFeature::BMI2 => Some("+bmi2"),
CpuFeature::AVX2 => Some("+avx2"),
CpuFeature::AVX512DQ => Some("+avx512dq"),
CpuFeature::AVX512VL => Some("+avx512vl"),
CpuFeature::LZCNT => Some("+lzcnt"),
.map(|feature| match feature {
CpuFeature::SSE2 => "+sse2",
CpuFeature::SSE3 => "+sse3",
CpuFeature::SSSE3 => "+ssse3",
CpuFeature::SSE41 => "+sse4.1",
CpuFeature::SSE42 => "+sse4.2",
CpuFeature::POPCNT => "+popcnt",
CpuFeature::AVX => "+avx",
CpuFeature::BMI1 => "+bmi",
CpuFeature::BMI2 => "+bmi2",
CpuFeature::AVX2 => "+avx2",
CpuFeature::AVX512DQ => "+avx512dq",
CpuFeature::AVX512VL => "+avx512vl",
CpuFeature::LZCNT => "+lzcnt",
})
.join(",");
let arch_string = triple.architecture.to_string();
let llvm_target = InkwellTarget::from_triple(&self.target_triple()).unwrap();
let target_machine = llvm_target
llvm_target
.create_target_machine(
&self.target_triple(),
"generic",
&llvm_cpu_features,
self.opt_level.clone(),
self.opt_level,
self.reloc_mode(),
self.code_model(),
)
.unwrap();
target_machine
.unwrap()
}
}

View File

@@ -23,7 +23,7 @@ impl FuncTrampoline {
ty: &FunctionType,
config: &LLVMConfig,
) -> Result<FunctionBody, CompileError> {
let mut module = self.ctx.create_module("");
let module = self.ctx.create_module("");
let target_triple = config.target_triple();
let target_machine = config.target_machine();
module.set_triple(&target_triple);
@@ -59,13 +59,13 @@ impl FuncTrampoline {
pass_manager.add_early_cse_pass();
pass_manager.run_on(&mut module);
pass_manager.run_on(&module);
// TODO: remove debugging
//module.print_to_stderr();
let memory_buffer = target_machine
.write_to_memory_buffer(&mut module, FileType::Object)
.write_to_memory_buffer(&module, FileType::Object)
.unwrap();
/*
@@ -84,9 +84,7 @@ impl FuncTrampoline {
let mut bytes = vec![];
for section in object.get_sections() {
if section.get_name().map(std::ffi::CStr::to_bytes)
== Some("wasmer_trampoline".as_bytes())
{
if section.get_name().map(std::ffi::CStr::to_bytes) == Some(b"wasmer_trampoline") {
bytes.extend(section.get_contents().to_vec());
break;
}
@@ -119,9 +117,9 @@ fn generate_trampoline<'ctx>(
"");
*/
let (callee_vmctx_ptr, func_ptr, args_rets_ptr) = match trampoline_func.get_params().as_slice()
let (callee_vmctx_ptr, func_ptr, args_rets_ptr) = match *trampoline_func.get_params().as_slice()
{
&[callee_vmctx_ptr, func_ptr, args_rets_ptr] => (
[callee_vmctx_ptr, func_ptr, args_rets_ptr] => (
callee_vmctx_ptr,
func_ptr.into_pointer_value(),
args_rets_ptr.into_pointer_value(),
@@ -159,17 +157,17 @@ fn generate_trampoline<'ctx>(
let arg = builder.build_load(typed_item_pointer, "arg");
args_vec.push(arg);
i = i + 1;
i += 1;
if *param_ty == Type::V128 {
i = i + 1;
i += 1;
}
}
let call_site = builder.build_call(func_ptr, &args_vec, "call");
match func_sig.results() {
&[] => {}
&[one_ret] => {
match *func_sig.results() {
[] => {}
[one_ret] => {
let ret_ptr_type = cast_ptr_ty(one_ret);
let typed_ret_ptr =

View File

@@ -1,59 +1,46 @@
use super::{
intrinsics::{
func_type_to_llvm, tbaa_label, type_to_llvm, type_to_llvm_ptr, CtxType, GlobalCache,
Intrinsics, MemoryCache,
func_type_to_llvm, tbaa_label, type_to_llvm, CtxType, GlobalCache, Intrinsics, MemoryCache,
},
read_info::blocktype_to_type,
// stackmap::{StackmapEntry, StackmapEntryKind, StackmapRegistry, ValueSemantic},
state::{ControlFrame, ExtraInfo, IfElseState, State},
// LLVMBackendConfig, LLVMCallbacks,
};
use inkwell::{
builder::Builder,
context::Context,
module::{Linkage, Module},
passes::PassManager,
//targets::{CodeModel, InitializationConfig, RelocMode, Target, TargetMachine, TargetTriple},
targets::FileType,
types::{BasicType, BasicTypeEnum, FloatMathType, IntType, PointerType, VectorType},
values::{
BasicValue, BasicValueEnum, FloatValue, FunctionValue, IntValue, PhiValue, PointerValue,
VectorValue,
},
AddressSpace,
// OptimizationLevel,
AtomicOrdering,
AtomicRMWBinOp,
FloatPredicate,
IntPredicate,
AddressSpace, AtomicOrdering, AtomicRMWBinOp, FloatPredicate, IntPredicate,
};
use smallvec::SmallVec;
use std::any::Any;
use std::collections::{HashMap, HashSet};
use std::convert::TryFrom;
use std::num::TryFromIntError;
use crate::config::LLVMConfig;
use wasm_common::entity::{PrimaryMap, SecondaryMap};
use wasm_common::{
FunctionIndex, FunctionType, GlobalIndex, LocalFunctionIndex, MemoryIndex, MemoryType,
Mutability, SignatureIndex, TableIndex, Type,
FunctionIndex, GlobalIndex, LocalFunctionIndex, MemoryIndex, SignatureIndex, TableIndex, Type,
};
use wasmer_compiler::wasmparser::{self, BinaryReader, MemoryImmediate, Operator};
use wasmer_compiler::{
to_wasm_error, wasm_unsupported, Addend, CodeOffset, CompileError, CompiledFunction,
CompiledFunctionFrameInfo, CustomSection, CustomSectionProtection, CustomSections,
FunctionAddressMap, FunctionBody, FunctionBodyData, InstructionAddressMap, Relocation,
RelocationKind, RelocationTarget, SectionBody, SectionIndex, SourceLoc, WasmResult,
to_wasm_error, wasm_unsupported, CompileError, CompiledFunction, CompiledFunctionFrameInfo,
CustomSection, CustomSectionProtection, CustomSections, FunctionAddressMap, FunctionBody,
FunctionBodyData, InstructionAddressMap, Relocation, RelocationKind, RelocationTarget,
SectionBody, SectionIndex, SourceLoc, WasmResult,
};
use wasmer_runtime::libcalls::LibCall;
use wasmer_runtime::{
MemoryPlan, MemoryStyle, ModuleInfo, TablePlan, VMBuiltinFunctionIndex, VMOffsets,
};
use wasmer_runtime::{MemoryPlan, ModuleInfo, TablePlan, VMBuiltinFunctionIndex, VMOffsets};
// TODO: debugging
use std::fs;
use std::io::Write;
//use std::fs;
//use std::io::Write;
use wasm_common::entity::entity_impl;
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)]
@@ -100,7 +87,7 @@ pub struct FuncTranslator {
ctx: Context,
}
fn const_zero<'ctx>(ty: BasicTypeEnum<'ctx>) -> BasicValueEnum<'ctx> {
fn const_zero(ty: BasicTypeEnum) -> BasicValueEnum {
match ty {
BasicTypeEnum::ArrayType(ty) => ty.const_zero().as_basic_value_enum(),
BasicTypeEnum::FloatType(ty) => ty.const_zero().as_basic_value_enum(),
@@ -125,7 +112,7 @@ impl FuncTranslator {
function_body: &FunctionBodyData,
config: &LLVMConfig,
memory_plans: &PrimaryMap<MemoryIndex, MemoryPlan>,
table_plans: &PrimaryMap<TableIndex, TablePlan>,
_table_plans: &PrimaryMap<TableIndex, TablePlan>,
func_names: &SecondaryMap<FunctionIndex, String>,
) -> Result<(CompiledFunction, CustomSections), CompileError> {
let func_index = wasm_module.func_index(*local_func_index);
@@ -134,7 +121,7 @@ impl FuncTranslator {
None => format!("<anonymous module> function {}", func_name),
Some(module_name) => format!("module {} function {}", module_name, func_name),
};
let mut module = self.ctx.create_module(module_name.as_str());
let module = self.ctx.create_module(module_name.as_str());
let target_triple = config.target_triple();
let target_machine = config.target_machine();
@@ -222,7 +209,7 @@ impl FuncTranslator {
ctx: CtxType::new(wasm_module, &func, &cache_builder),
unreachable_depth: 0,
memory_plans,
table_plans,
_table_plans,
module: &module,
// TODO: pointer width
vmoffsets: VMOffsets::new(8, &wasm_module),
@@ -233,7 +220,7 @@ impl FuncTranslator {
while fcg.state.has_control_frames() {
let pos = reader.current_position() as u32;
let op = reader.read_operator().map_err(to_wasm_error)?;
fcg.translate_operator(op, wasm_module, pos)?;
fcg.translate_operator(op, pos)?;
}
// TODO: use phf?
@@ -310,12 +297,12 @@ impl FuncTranslator {
pass_manager.add_slp_vectorize_pass();
pass_manager.add_early_cse_pass();
pass_manager.run_on(&mut module);
pass_manager.run_on(&module);
// TODO: llvm-callbacks llvm post-opt-ir
let memory_buffer = target_machine
.write_to_memory_buffer(&mut module, FileType::Object)
.write_to_memory_buffer(&module, FileType::Object)
.unwrap();
// TODO: remove debugging.
@@ -339,15 +326,7 @@ impl FuncTranslator {
if section.sh_name == goblin::elf::section_header::SHN_UNDEF as _ {
return None;
}
let name = elf.strtab.get(section.sh_name);
if name.is_none() {
return None;
}
let name = name.unwrap();
if name.is_err() {
return None;
}
Some(name.unwrap())
elf.strtab.get(section.sh_name)?.ok()
};
// Build up a mapping from a section to its relocation sections.
@@ -1128,7 +1107,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
// Replaces any NaN with the canonical QNaN, otherwise leaves the value alone.
fn canonicalize_nans(&self, value: BasicValueEnum<'ctx>) -> BasicValueEnum<'ctx> {
let f_ty = value.get_type();
let canonicalized = if f_ty.is_vector_type() {
if f_ty.is_vector_type() {
let value = value.into_vector_value();
let f_ty = f_ty.into_vector_type();
let zero = f_ty.const_zero();
@@ -1154,8 +1133,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
self.builder
.build_select(nan_cmp, canonical_qnan, value, "")
.as_basic_value_enum()
};
canonicalized
}
}
pub fn resolve_memory_ptr(
@@ -1170,9 +1148,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let intrinsics = &self.intrinsics;
let context = &self.context;
let module = &self.module;
let wasm_module = &self.wasm_module;
let function = &self.function;
let ctx = &mut self.ctx;
let memory_index = MemoryIndex::from_u32(0);
@@ -1191,14 +1167,11 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
current_length_ptr,
} => {
// Bounds check it.
let current_length = builder.build_load(current_length_ptr, "");
// TODO: tbaa_label
let minimum = memory_plans[memory_index].memory.minimum;
let maximum = memory_plans[memory_index].memory.maximum;
// If the memory is dynamic, do a bounds check. For static we rely on
// the size being a multiple of the page size and hitting a guard page.
let value_size_v = intrinsics.i64_ty.const_int(value_size as u64, false);
let ptr_in_bounds = if offset.is_const() {
// When the offset is constant, if it's below the minimum
// memory size, we've statically shown that it's safe.
let load_offset_end = offset.const_add(value_size_v);
let ptr_in_bounds = load_offset_end.const_int_compare(
IntPredicate::ULE,
@@ -1217,6 +1190,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let current_length =
builder.build_load(current_length_ptr, "").into_int_value();
// TODO: tbaa_label
builder.build_int_compare(
IntPredicate::ULE,
@@ -1431,7 +1405,7 @@ pub struct LLVMFunctionCodeGenerator<'ctx, 'a> {
ctx: CtxType<'ctx, 'a>,
unreachable_depth: usize,
memory_plans: &'a PrimaryMap<MemoryIndex, MemoryPlan>,
table_plans: &'a PrimaryMap<TableIndex, TablePlan>,
_table_plans: &'a PrimaryMap<TableIndex, TablePlan>,
// This is support for stackmaps:
/*
@@ -1447,12 +1421,7 @@ pub struct LLVMFunctionCodeGenerator<'ctx, 'a> {
}
impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
fn translate_operator(
&mut self,
op: Operator,
module: &ModuleInfo,
_source_loc: u32,
) -> Result<(), CompileError> {
fn translate_operator(&mut self, op: Operator, _source_loc: u32) -> Result<(), CompileError> {
// TODO: remove this vmctx by moving everything into CtxType. Values
// computed off vmctx usually benefit from caching.
let vmctx = &self.ctx.basic().into_pointer_value();
@@ -1488,12 +1457,10 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
* https://github.com/sunfishcode/wasm-reference-manual/blob/master/WebAssembly.md#control-flow-instructions
***************************/
Operator::Block { ty } => {
let current_block =
self.builder
.get_insert_block()
.ok_or(CompileError::Codegen(
"not currently in a block".to_string(),
))?;
let current_block = self
.builder
.get_insert_block()
.ok_or_else(|| CompileError::Codegen("not currently in a block".to_string()))?;
let end_block = self.context.append_basic_block(self.function, "end");
self.builder.position_at_end(end_block);
@@ -1567,12 +1534,10 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
Operator::Br { relative_depth } => {
let frame = self.state.frame_at_depth(relative_depth)?;
let current_block =
self.builder
.get_insert_block()
.ok_or(CompileError::Codegen(
"not currently in a block".to_string(),
))?;
let current_block = self
.builder
.get_insert_block()
.ok_or_else(|| CompileError::Codegen("not currently in a block".to_string()))?;
let value_len = if frame.is_loop() {
0
@@ -1601,12 +1566,10 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let cond = self.state.pop1()?;
let frame = self.state.frame_at_depth(relative_depth)?;
let current_block =
self.builder
.get_insert_block()
.ok_or(CompileError::Codegen(
"not currently in a block".to_string(),
))?;
let current_block = self
.builder
.get_insert_block()
.ok_or_else(|| CompileError::Codegen("not currently in a block".to_string()))?;
let value_len = if frame.is_loop() {
0
@@ -1636,12 +1599,10 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
self.builder.position_at_end(else_block);
}
Operator::BrTable { ref table } => {
let current_block =
self.builder
.get_insert_block()
.ok_or(CompileError::Codegen(
"not currently in a block".to_string(),
))?;
let current_block = self
.builder
.get_insert_block()
.ok_or_else(|| CompileError::Codegen("not currently in a block".to_string()))?;
let (label_depths, default_depth) = table.read_table().map_err(to_wasm_error)?;
@@ -1692,12 +1653,10 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
self.state.reachable = false;
}
Operator::If { ty } => {
let current_block =
self.builder
.get_insert_block()
.ok_or(CompileError::Codegen(
"not currently in a block".to_string(),
))?;
let current_block = self
.builder
.get_insert_block()
.ok_or_else(|| CompileError::Codegen("not currently in a block".to_string()))?;
let if_then_block = self.context.append_basic_block(self.function, "if_then");
let if_else_block = self.context.append_basic_block(self.function, "if_else");
let end_block = self.context.append_basic_block(self.function, "if_end");
@@ -1737,12 +1696,9 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
Operator::Else => {
if self.state.reachable {
let frame = self.state.frame_at_depth(0)?;
let current_block =
self.builder
.get_insert_block()
.ok_or(CompileError::Codegen(
"not currently in a block".to_string(),
))?;
let current_block = self.builder.get_insert_block().ok_or_else(|| {
CompileError::Codegen("not currently in a block".to_string())
})?;
for phi in frame.phis().to_vec().iter().rev() {
let (value, info) = self.state.pop1_extra()?;
@@ -1772,12 +1728,10 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
Operator::End => {
let frame = self.state.pop_frame()?;
let current_block =
self.builder
.get_insert_block()
.ok_or(CompileError::Codegen(
"not currently in a block".to_string(),
))?;
let current_block = self
.builder
.get_insert_block()
.ok_or_else(|| CompileError::Codegen("not currently in a block".to_string()))?;
if self.state.reachable {
for phi in frame.phis().iter().rev() {
@@ -1832,12 +1786,10 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
}
}
Operator::Return => {
let current_block =
self.builder
.get_insert_block()
.ok_or(CompileError::Codegen(
"not currently in a block".to_string(),
))?;
let current_block = self
.builder
.get_insert_block()
.ok_or_else(|| CompileError::Codegen("not currently in a block".to_string()))?;
let frame = self.state.outermost_frame()?;
for phi in frame.phis().to_vec().iter() {
@@ -2085,7 +2037,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
Operator::GlobalSet { global_index } => {
let global_index = GlobalIndex::from_u32(global_index);
match self.ctx.global(global_index, self.intrinsics) {
GlobalCache::Const { value } => {
GlobalCache::Const { value: _ } => {
return Err(CompileError::Codegen(format!(
"global.set on immutable global index {}",
global_index.as_u32()
@@ -2144,23 +2096,22 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
}
Operator::Call { function_index } => {
let func_index = FunctionIndex::from_u32(function_index);
let sigindex = &module.functions[func_index];
let func_type = &module.signatures[*sigindex];
let sigindex = &self.wasm_module.functions[func_index];
let func_type = &self.wasm_module.signatures[*sigindex];
let func_name = &self.func_names[func_index];
let llvm_func_type = func_type_to_llvm(&self.context, &self.intrinsics, func_type);
let (func, callee_vmctx) = if let Some(local_func_index) =
module.local_func_index(func_index)
let (func, callee_vmctx) = if self
.wasm_module
.local_func_index(func_index)
.is_some()
{
// TODO: we could do this by comparing self.function indices instead
// of going through LLVM APIs and string comparisons.
let func = self.module.get_function(func_name);
let func = if func.is_none() {
let func = self.module.get_function(func_name).unwrap_or_else(|| {
self.module
.add_function(func_name, llvm_func_type, Some(Linkage::External))
} else {
func.unwrap()
};
});
(func.as_global_value().as_pointer_value(), self.ctx.basic())
} else {
let offset = self.vmoffsets.vmctx_vmfunction_import(func_index);
@@ -2269,7 +2220,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
if let Some(basic_value) = call_site.try_as_basic_value().left() {
match func_type.results().len() {
1 => self.state.push1(basic_value),
count @ _ => {
count => {
// This is a multi-value return.
let struct_value = basic_value.into_struct_value();
for i in 0..(count as u32) {
@@ -2285,14 +2236,13 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
}
Operator::CallIndirect { index, table_index } => {
let sigindex = SignatureIndex::from_u32(index);
let func_type = &module.signatures[sigindex];
let func_type = &self.wasm_module.signatures[sigindex];
let expected_dynamic_sigindex =
self.ctx.dynamic_sigindex(sigindex, self.intrinsics);
let (table_base, table_bound) = self.ctx.table(
TableIndex::from_u32(table_index),
self.intrinsics,
self.module,
&self.builder,
);
let func_index = self.state.pop1()?.into_int_value();
@@ -2312,32 +2262,30 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
};
// Load things from the anyfunc data structure.
let (func_ptr, found_dynamic_sigindex, ctx_ptr) = unsafe {
(
self.builder
.build_load(
self.builder
.build_struct_gep(anyfunc_struct_ptr, 0, "func_ptr_ptr")
.unwrap(),
"func_ptr",
)
.into_pointer_value(),
self.builder
.build_load(
self.builder
.build_struct_gep(anyfunc_struct_ptr, 1, "sigindex_ptr")
.unwrap(),
"sigindex",
)
.into_int_value(),
self.builder.build_load(
let (func_ptr, found_dynamic_sigindex, ctx_ptr) = (
self.builder
.build_load(
self.builder
.build_struct_gep(anyfunc_struct_ptr, 2, "ctx_ptr_ptr")
.build_struct_gep(anyfunc_struct_ptr, 0, "func_ptr_ptr")
.unwrap(),
"ctx_ptr",
),
)
};
"func_ptr",
)
.into_pointer_value(),
self.builder
.build_load(
self.builder
.build_struct_gep(anyfunc_struct_ptr, 1, "sigindex_ptr")
.unwrap(),
"sigindex",
)
.into_int_value(),
self.builder.build_load(
self.builder
.build_struct_gep(anyfunc_struct_ptr, 2, "ctx_ptr_ptr")
.unwrap(),
"ctx_ptr",
),
);
let truncated_table_bounds = self.builder.build_int_truncate(
table_bound,
@@ -8443,7 +8391,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let mem_index = MemoryIndex::from_u32(reserved);
let delta = self.state.pop1()?;
let (grow_fn, grow_fn_ty) =
if let Some(local_mem_index) = module.local_memory_index(mem_index) {
if self.wasm_module.local_memory_index(mem_index).is_some() {
(
VMBuiltinFunctionIndex::get_memory32_grow_index(),
self.intrinsics.memory32_grow_ptr_ty,
@@ -8487,7 +8435,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
Operator::MemorySize { reserved } => {
let mem_index = MemoryIndex::from_u32(reserved);
let (size_fn, size_fn_ty) =
if let Some(local_mem_index) = module.local_memory_index(mem_index) {
if self.wasm_module.local_memory_index(mem_index).is_some() {
(
VMBuiltinFunctionIndex::get_memory32_size_index(),
self.intrinsics.memory32_size_ptr_ty,

View File

@@ -22,8 +22,8 @@ use inkwell::{
use std::collections::HashMap;
use wasm_common::entity::{EntityRef, PrimaryMap};
use wasm_common::{
FunctionIndex, FunctionType as FuncType, GlobalIndex, MemoryIndex, Mutability, Pages,
SignatureIndex, TableIndex, Type,
FunctionType as FuncType, GlobalIndex, MemoryIndex, Mutability, SignatureIndex, TableIndex,
Type,
};
use wasmer_runtime::ModuleInfo as WasmerCompilerModule;
use wasmer_runtime::{MemoryPlan, MemoryStyle, TrapCode, VMOffsets};
@@ -230,25 +230,7 @@ impl<'ctx> Intrinsics<'ctx> {
let ctx_ty = i8_ty;
let ctx_ptr_ty = ctx_ty.ptr_type(AddressSpace::Generic);
let local_memory_ty =
context.struct_type(&[i8_ptr_ty_basic, i64_ty_basic, i8_ptr_ty_basic], false);
let local_table_ty = local_memory_ty;
let local_global_ty = i64_ty;
let func_ctx_ty =
context.struct_type(&[ctx_ptr_ty.as_basic_type_enum(), i8_ptr_ty_basic], false);
let func_ctx_ptr_ty = func_ctx_ty.ptr_type(AddressSpace::Generic);
let imported_func_ty = context.struct_type(
&[i8_ptr_ty_basic, func_ctx_ptr_ty.as_basic_type_enum()],
false,
);
let sigindex_ty = i32_ty;
let rt_intrinsics_ty = i8_ty;
let stack_lower_bound_ty = i8_ty;
let memory_base_ty = i8_ty;
let memory_bound_ty = i8_ty;
let internals_ty = i64_ty;
let interrupt_signal_mem_ty = i8_ty;
let local_function_ty = i8_ptr_ty;
let anyfunc_ty = context.struct_type(
&[
@@ -613,24 +595,16 @@ pub enum GlobalCache<'ctx> {
Const { value: BasicValueEnum<'ctx> },
}
struct ImportedFuncCache<'ctx> {
func_ptr: PointerValue<'ctx>,
ctx_ptr: PointerValue<'ctx>,
}
pub struct CtxType<'ctx, 'a> {
ctx_ptr_value: PointerValue<'ctx>,
wasm_module: &'a WasmerCompilerModule,
cache_builder: &'a Builder<'ctx>,
cached_signal_mem: Option<PointerValue<'ctx>>,
cached_memories: HashMap<MemoryIndex, MemoryCache<'ctx>>,
cached_tables: HashMap<TableIndex, TableCache<'ctx>>,
cached_sigindices: HashMap<SignatureIndex, IntValue<'ctx>>,
cached_globals: HashMap<GlobalIndex, GlobalCache<'ctx>>,
cached_imported_functions: HashMap<FunctionIndex, ImportedFuncCache<'ctx>>,
offsets: VMOffsets,
}
@@ -647,13 +621,10 @@ impl<'ctx, 'a> CtxType<'ctx, 'a> {
wasm_module,
cache_builder,
cached_signal_mem: None,
cached_memories: HashMap::new(),
cached_tables: HashMap::new(),
cached_sigindices: HashMap::new(),
cached_globals: HashMap::new(),
cached_imported_functions: HashMap::new(),
// TODO: pointer width
offsets: VMOffsets::new(8, &wasm_module),
@@ -668,7 +639,7 @@ impl<'ctx, 'a> CtxType<'ctx, 'a> {
&mut self,
index: MemoryIndex,
intrinsics: &Intrinsics<'ctx>,
module: &Module<'ctx>,
_module: &Module<'ctx>,
memory_plans: &PrimaryMap<MemoryIndex, MemoryPlan>,
) -> MemoryCache<'ctx> {
let (cached_memories, wasm_module, ctx_ptr_value, cache_builder, offsets) = (
@@ -740,7 +711,7 @@ impl<'ctx, 'a> CtxType<'ctx, 'a> {
&mut self,
table_index: TableIndex,
intrinsics: &Intrinsics<'ctx>,
module: &Module<'ctx>,
_module: &Module<'ctx>,
) -> (PointerValue<'ctx>, PointerValue<'ctx>) {
let (cached_tables, wasm_module, ctx_ptr_value, cache_builder, offsets) = (
&mut self.cached_tables,
@@ -837,7 +808,6 @@ impl<'ctx, 'a> CtxType<'ctx, 'a> {
index: TableIndex,
intrinsics: &Intrinsics<'ctx>,
module: &Module<'ctx>,
builder: &Builder<'ctx>,
) -> (PointerValue<'ctx>, IntValue<'ctx>) {
let (ptr_to_base_ptr, ptr_to_bounds) = self.table_prepare(index, intrinsics, module);
let base_ptr = self
@@ -1048,7 +1018,7 @@ pub fn func_type_to_llvm<'ctx>(
match fntype.results() {
&[] => intrinsics.void_ty.fn_type(&param_types, false),
&[single_value] => type_to_llvm(intrinsics, single_value).fn_type(&param_types, false),
returns @ _ => {
returns => {
let basic_types: Vec<_> = returns
.iter()
.map(|&ty| type_to_llvm(intrinsics, ty))

View File

@@ -14,22 +14,18 @@ fn wp_type_to_type(ty: WpType) -> Result<Type, CompileError> {
WpType::F32 => Ok(Type::F32),
WpType::F64 => Ok(Type::F64),
WpType::V128 => Ok(Type::V128),
_ => {
return Err(CompileError::Codegen(
"broken invariant, invalid type".to_string(),
));
}
_ => Err(CompileError::Codegen(
"broken invariant, invalid type".to_string(),
)),
}
}
pub fn blocktype_to_type(ty: WpTypeOrFuncType) -> Result<Type, CompileError> {
match ty {
WpTypeOrFuncType::Type(inner_ty) => Ok(wp_type_to_type(inner_ty)?),
_ => {
return Err(CompileError::Codegen(
"the wasmer llvm backend does not yet support the multi-value return extension"
.to_string(),
));
}
_ => Err(CompileError::Codegen(
"the wasmer llvm backend does not yet support the multi-value return extension"
.to_string(),
)),
}
}

View File

@@ -181,13 +181,12 @@ impl BitAnd for ExtraInfo {
(false, true) => ExtraInfo::arithmetic_f64(),
(true, true) => ExtraInfo::arithmetic_f32() | ExtraInfo::arithmetic_f64(),
};
let info = match (self.has_pending_f32_nan(), self.has_pending_f64_nan()) {
match (self.has_pending_f32_nan(), self.has_pending_f64_nan()) {
(false, false) => info,
(true, false) => info | ExtraInfo::pending_f32_nan(),
(false, true) => info | ExtraInfo::pending_f64_nan(),
(true, true) => unreachable!("Can't form ExtraInfo with two pending canonicalizations"),
};
info
}
}
}
@@ -209,7 +208,7 @@ impl<'ctx> State<'ctx> {
}
pub fn has_control_frames(&self) -> bool {
return !self.control_stack.is_empty();
!self.control_stack.is_empty()
}
pub fn reset_stack(&mut self, frame: &ControlFrame<'ctx>) {
@@ -231,9 +230,9 @@ impl<'ctx> State<'ctx> {
}
pub fn outermost_frame(&self) -> Result<&ControlFrame<'ctx>, CompileError> {
self.control_stack.get(0).ok_or(CompileError::Codegen(
"outermost_frame: invalid control stack depth".to_string(),
))
self.control_stack.get(0).ok_or_else(|| {
CompileError::Codegen("outermost_frame: invalid control stack depth".to_string())
})
}
pub fn frame_at_depth(&self, depth: u32) -> Result<&ControlFrame<'ctx>, CompileError> {
@@ -241,9 +240,9 @@ impl<'ctx> State<'ctx> {
.control_stack
.len()
.checked_sub(1 + (depth as usize))
.ok_or(CompileError::Codegen(
"frame_at_depth: invalid control stack depth".to_string(),
))?;
.ok_or_else(|| {
CompileError::Codegen("frame_at_depth: invalid control stack depth".to_string())
})?;
Ok(&self.control_stack[index])
}
@@ -255,16 +254,16 @@ impl<'ctx> State<'ctx> {
.control_stack
.len()
.checked_sub(1 + (depth as usize))
.ok_or(CompileError::Codegen(
"frame_at_depth_mut: invalid control stack depth".to_string(),
))?;
.ok_or_else(|| {
CompileError::Codegen("frame_at_depth_mut: invalid control stack depth".to_string())
})?;
Ok(&mut self.control_stack[index])
}
pub fn pop_frame(&mut self) -> Result<ControlFrame<'ctx>, CompileError> {
self.control_stack.pop().ok_or(CompileError::Codegen(
"pop_frame: cannot pop from control stack".to_string(),
))
self.control_stack.pop().ok_or_else(|| {
CompileError::Codegen("pop_frame: cannot pop from control stack".to_string())
})
}
pub fn push1<T: BasicValue<'ctx>>(&mut self, value: T) {
@@ -280,9 +279,9 @@ impl<'ctx> State<'ctx> {
}
pub fn pop1_extra(&mut self) -> Result<(BasicValueEnum<'ctx>, ExtraInfo), CompileError> {
self.stack.pop().ok_or(CompileError::Codegen(
"pop1_extra: invalid value stack".to_string(),
))
self.stack
.pop()
.ok_or_else(|| CompileError::Codegen("pop1_extra: invalid value stack".to_string()))
}
pub fn pop2(&mut self) -> Result<(BasicValueEnum<'ctx>, BasicValueEnum<'ctx>), CompileError> {
@@ -322,13 +321,10 @@ impl<'ctx> State<'ctx> {
}
pub fn peek1_extra(&self) -> Result<(BasicValueEnum<'ctx>, ExtraInfo), CompileError> {
let index = self
.stack
.len()
.checked_sub(1)
.ok_or(CompileError::Codegen(
"peek1_extra: invalid value stack".to_string(),
))?;
let index =
self.stack.len().checked_sub(1).ok_or_else(|| {
CompileError::Codegen("peek1_extra: invalid value stack".to_string())
})?;
Ok(self.stack[index])
}
@@ -340,13 +336,10 @@ impl<'ctx> State<'ctx> {
&self,
n: usize,
) -> Result<&[(BasicValueEnum<'ctx>, ExtraInfo)], CompileError> {
let index = self
.stack
.len()
.checked_sub(n)
.ok_or(CompileError::Codegen(
"peekn_extra: invalid value stack".to_string(),
))?;
let index =
self.stack.len().checked_sub(n).ok_or_else(|| {
CompileError::Codegen("peekn_extra: invalid value stack".to_string())
})?;
Ok(&self.stack[index..])
}
@@ -364,9 +357,7 @@ impl<'ctx> State<'ctx> {
.stack
.len()
.checked_sub(n)
.ok_or(CompileError::Codegen(
"popn: invalid value stack".to_string(),
))?;
.ok_or_else(|| CompileError::Codegen("popn: invalid value stack".to_string()))?;
self.stack.truncate(index);
Ok(())