Remove #![allow(unused_imports, dead_code)] and fix resulting issues.

Fix nearly-all issues found by clippy too.
This commit is contained in:
Nick Lewycky
2020-05-20 17:16:56 -07:00
parent df2a45899a
commit a47a3068f4
7 changed files with 173 additions and 289 deletions

View File

@@ -1,7 +1,3 @@
//! Support for compiling with LLVM.
// Allow unused imports while developing.
#![allow(unused_imports, dead_code)]
use crate::config::LLVMConfig; use crate::config::LLVMConfig;
use crate::trampoline::FuncTrampoline; use crate::trampoline::FuncTrampoline;
use crate::translator::FuncTranslator; use crate::translator::FuncTranslator;
@@ -10,16 +6,12 @@ use wasm_common::entity::{EntityRef, PrimaryMap, SecondaryMap};
use wasm_common::Features; use wasm_common::Features;
use wasm_common::{FunctionIndex, FunctionType, LocalFunctionIndex, MemoryIndex, TableIndex}; use wasm_common::{FunctionIndex, FunctionType, LocalFunctionIndex, MemoryIndex, TableIndex};
use wasmer_compiler::{ use wasmer_compiler::{
Compilation, CompileError, CompiledFunction, Compiler, CompilerConfig, CustomSection, Compilation, CompileError, Compiler, CompilerConfig, FunctionBody, FunctionBodyData,
CustomSectionProtection, FunctionBody, FunctionBodyData, ModuleTranslationState, Relocation, ModuleTranslationState, RelocationTarget, SectionIndex, Target,
RelocationTarget, SectionBody, SectionIndex, Target, TrapInformation,
}; };
use wasmer_runtime::{MemoryPlan, ModuleInfo, TablePlan, TrapCode}; use wasmer_runtime::{MemoryPlan, ModuleInfo, TablePlan};
use inkwell::targets::{InitializationConfig, Target as InkwellTarget}; //use std::sync::{Arc, Mutex};
use std::collections::HashMap;
use std::sync::{Arc, Mutex}; // TODO: remove
/// A compiler that compiles a WebAssembly module with LLVM, translating the Wasm to LLVM IR, /// A compiler that compiles a WebAssembly module with LLVM, translating the Wasm to LLVM IR,
/// optimizing it and then translating to assembly. /// optimizing it and then translating to assembly.
@@ -75,7 +67,7 @@ impl Compiler for LLVMCompiler {
.unwrap_or_else(|| format!("fn{}", func_index.index())); .unwrap_or_else(|| format!("fn{}", func_index.index()));
} }
let mut module_custom_sections = PrimaryMap::new(); let mut module_custom_sections = PrimaryMap::new();
let mut functions = function_body_inputs let functions = function_body_inputs
.into_iter() .into_iter()
.collect::<Vec<(LocalFunctionIndex, &FunctionBodyData<'_>)>>() .collect::<Vec<(LocalFunctionIndex, &FunctionBodyData<'_>)>>()
.par_iter() .par_iter()
@@ -94,31 +86,25 @@ impl Compiler for LLVMCompiler {
}) })
.collect::<Result<Vec<_>, CompileError>>()? .collect::<Result<Vec<_>, CompileError>>()?
.into_iter() .into_iter()
.map(|(mut compiled_function, mut function_custom_sections)| { .map(|(mut compiled_function, function_custom_sections)| {
let first_section = module_custom_sections.len() as u32; let first_section = module_custom_sections.len() as u32;
for (_, custom_section) in function_custom_sections.iter() { for (_, custom_section) in function_custom_sections.iter() {
// TODO: remove this call to clone() // TODO: remove this call to clone()
let mut custom_section = custom_section.clone(); let mut custom_section = custom_section.clone();
for mut reloc in &mut custom_section.relocations { for mut reloc in &mut custom_section.relocations {
match reloc.reloc_target { if let RelocationTarget::CustomSection(index) = reloc.reloc_target {
RelocationTarget::CustomSection(index) => { reloc.reloc_target = RelocationTarget::CustomSection(
reloc.reloc_target = RelocationTarget::CustomSection( SectionIndex::from_u32(first_section + index.as_u32()),
SectionIndex::from_u32(first_section + index.as_u32()), )
)
}
_ => {}
} }
} }
module_custom_sections.push(custom_section); module_custom_sections.push(custom_section);
} }
for mut reloc in &mut compiled_function.relocations { for mut reloc in &mut compiled_function.relocations {
match reloc.reloc_target { if let RelocationTarget::CustomSection(index) = reloc.reloc_target {
RelocationTarget::CustomSection(index) => { reloc.reloc_target = RelocationTarget::CustomSection(
reloc.reloc_target = RelocationTarget::CustomSection( SectionIndex::from_u32(first_section + index.as_u32()),
SectionIndex::from_u32(first_section + index.as_u32()), )
)
}
_ => {}
} }
} }
compiled_function compiled_function
@@ -142,7 +128,7 @@ impl Compiler for LLVMCompiler {
fn compile_dynamic_function_trampolines( fn compile_dynamic_function_trampolines(
&self, &self,
module: &ModuleInfo, _module: &ModuleInfo,
) -> Result<PrimaryMap<FunctionIndex, FunctionBody>, CompileError> { ) -> Result<PrimaryMap<FunctionIndex, FunctionBody>, CompileError> {
Ok(PrimaryMap::new()) Ok(PrimaryMap::new())
// unimplemented!("Dynamic function trampolines not yet implemented"); // unimplemented!("Dynamic function trampolines not yet implemented");

View File

@@ -1,6 +1,3 @@
// Allow unused imports while developing
#![allow(unused_imports, dead_code)]
use crate::compiler::LLVMCompiler; use crate::compiler::LLVMCompiler;
use inkwell::targets::{ use inkwell::targets::{
CodeModel, InitializationConfig, RelocMode, Target as InkwellTarget, TargetMachine, CodeModel, InitializationConfig, RelocMode, Target as InkwellTarget, TargetMachine,
@@ -120,36 +117,34 @@ impl LLVMConfig {
// The CPU features formatted as LLVM strings // The CPU features formatted as LLVM strings
let llvm_cpu_features = cpu_features let llvm_cpu_features = cpu_features
.iter() .iter()
.filter_map(|feature| match feature { .map(|feature| match feature {
CpuFeature::SSE2 => Some("+sse2"), CpuFeature::SSE2 => "+sse2",
CpuFeature::SSE3 => Some("+sse3"), CpuFeature::SSE3 => "+sse3",
CpuFeature::SSSE3 => Some("+ssse3"), CpuFeature::SSSE3 => "+ssse3",
CpuFeature::SSE41 => Some("+sse4.1"), CpuFeature::SSE41 => "+sse4.1",
CpuFeature::SSE42 => Some("+sse4.2"), CpuFeature::SSE42 => "+sse4.2",
CpuFeature::POPCNT => Some("+popcnt"), CpuFeature::POPCNT => "+popcnt",
CpuFeature::AVX => Some("+avx"), CpuFeature::AVX => "+avx",
CpuFeature::BMI1 => Some("+bmi"), CpuFeature::BMI1 => "+bmi",
CpuFeature::BMI2 => Some("+bmi2"), CpuFeature::BMI2 => "+bmi2",
CpuFeature::AVX2 => Some("+avx2"), CpuFeature::AVX2 => "+avx2",
CpuFeature::AVX512DQ => Some("+avx512dq"), CpuFeature::AVX512DQ => "+avx512dq",
CpuFeature::AVX512VL => Some("+avx512vl"), CpuFeature::AVX512VL => "+avx512vl",
CpuFeature::LZCNT => Some("+lzcnt"), CpuFeature::LZCNT => "+lzcnt",
}) })
.join(","); .join(",");
let arch_string = triple.architecture.to_string();
let llvm_target = InkwellTarget::from_triple(&self.target_triple()).unwrap(); let llvm_target = InkwellTarget::from_triple(&self.target_triple()).unwrap();
let target_machine = llvm_target llvm_target
.create_target_machine( .create_target_machine(
&self.target_triple(), &self.target_triple(),
"generic", "generic",
&llvm_cpu_features, &llvm_cpu_features,
self.opt_level.clone(), self.opt_level,
self.reloc_mode(), self.reloc_mode(),
self.code_model(), self.code_model(),
) )
.unwrap(); .unwrap()
target_machine
} }
} }

View File

@@ -23,7 +23,7 @@ impl FuncTrampoline {
ty: &FunctionType, ty: &FunctionType,
config: &LLVMConfig, config: &LLVMConfig,
) -> Result<FunctionBody, CompileError> { ) -> Result<FunctionBody, CompileError> {
let mut module = self.ctx.create_module(""); let module = self.ctx.create_module("");
let target_triple = config.target_triple(); let target_triple = config.target_triple();
let target_machine = config.target_machine(); let target_machine = config.target_machine();
module.set_triple(&target_triple); module.set_triple(&target_triple);
@@ -59,13 +59,13 @@ impl FuncTrampoline {
pass_manager.add_early_cse_pass(); pass_manager.add_early_cse_pass();
pass_manager.run_on(&mut module); pass_manager.run_on(&module);
// TODO: remove debugging // TODO: remove debugging
//module.print_to_stderr(); //module.print_to_stderr();
let memory_buffer = target_machine let memory_buffer = target_machine
.write_to_memory_buffer(&mut module, FileType::Object) .write_to_memory_buffer(&module, FileType::Object)
.unwrap(); .unwrap();
/* /*
@@ -84,9 +84,7 @@ impl FuncTrampoline {
let mut bytes = vec![]; let mut bytes = vec![];
for section in object.get_sections() { for section in object.get_sections() {
if section.get_name().map(std::ffi::CStr::to_bytes) if section.get_name().map(std::ffi::CStr::to_bytes) == Some(b"wasmer_trampoline") {
== Some("wasmer_trampoline".as_bytes())
{
bytes.extend(section.get_contents().to_vec()); bytes.extend(section.get_contents().to_vec());
break; break;
} }
@@ -119,9 +117,9 @@ fn generate_trampoline<'ctx>(
""); "");
*/ */
let (callee_vmctx_ptr, func_ptr, args_rets_ptr) = match trampoline_func.get_params().as_slice() let (callee_vmctx_ptr, func_ptr, args_rets_ptr) = match *trampoline_func.get_params().as_slice()
{ {
&[callee_vmctx_ptr, func_ptr, args_rets_ptr] => ( [callee_vmctx_ptr, func_ptr, args_rets_ptr] => (
callee_vmctx_ptr, callee_vmctx_ptr,
func_ptr.into_pointer_value(), func_ptr.into_pointer_value(),
args_rets_ptr.into_pointer_value(), args_rets_ptr.into_pointer_value(),
@@ -159,17 +157,17 @@ fn generate_trampoline<'ctx>(
let arg = builder.build_load(typed_item_pointer, "arg"); let arg = builder.build_load(typed_item_pointer, "arg");
args_vec.push(arg); args_vec.push(arg);
i = i + 1; i += 1;
if *param_ty == Type::V128 { if *param_ty == Type::V128 {
i = i + 1; i += 1;
} }
} }
let call_site = builder.build_call(func_ptr, &args_vec, "call"); let call_site = builder.build_call(func_ptr, &args_vec, "call");
match func_sig.results() { match *func_sig.results() {
&[] => {} [] => {}
&[one_ret] => { [one_ret] => {
let ret_ptr_type = cast_ptr_ty(one_ret); let ret_ptr_type = cast_ptr_ty(one_ret);
let typed_ret_ptr = let typed_ret_ptr =

View File

@@ -1,59 +1,46 @@
use super::{ use super::{
intrinsics::{ intrinsics::{
func_type_to_llvm, tbaa_label, type_to_llvm, type_to_llvm_ptr, CtxType, GlobalCache, func_type_to_llvm, tbaa_label, type_to_llvm, CtxType, GlobalCache, Intrinsics, MemoryCache,
Intrinsics, MemoryCache,
}, },
read_info::blocktype_to_type, read_info::blocktype_to_type,
// stackmap::{StackmapEntry, StackmapEntryKind, StackmapRegistry, ValueSemantic}, // stackmap::{StackmapEntry, StackmapEntryKind, StackmapRegistry, ValueSemantic},
state::{ControlFrame, ExtraInfo, IfElseState, State}, state::{ControlFrame, ExtraInfo, IfElseState, State},
// LLVMBackendConfig, LLVMCallbacks,
}; };
use inkwell::{ use inkwell::{
builder::Builder, builder::Builder,
context::Context, context::Context,
module::{Linkage, Module}, module::{Linkage, Module},
passes::PassManager, passes::PassManager,
//targets::{CodeModel, InitializationConfig, RelocMode, Target, TargetMachine, TargetTriple},
targets::FileType, targets::FileType,
types::{BasicType, BasicTypeEnum, FloatMathType, IntType, PointerType, VectorType}, types::{BasicType, BasicTypeEnum, FloatMathType, IntType, PointerType, VectorType},
values::{ values::{
BasicValue, BasicValueEnum, FloatValue, FunctionValue, IntValue, PhiValue, PointerValue, BasicValue, BasicValueEnum, FloatValue, FunctionValue, IntValue, PhiValue, PointerValue,
VectorValue, VectorValue,
}, },
AddressSpace, AddressSpace, AtomicOrdering, AtomicRMWBinOp, FloatPredicate, IntPredicate,
// OptimizationLevel,
AtomicOrdering,
AtomicRMWBinOp,
FloatPredicate,
IntPredicate,
}; };
use smallvec::SmallVec; use smallvec::SmallVec;
use std::any::Any;
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use std::convert::TryFrom; use std::convert::TryFrom;
use std::num::TryFromIntError;
use crate::config::LLVMConfig; use crate::config::LLVMConfig;
use wasm_common::entity::{PrimaryMap, SecondaryMap}; use wasm_common::entity::{PrimaryMap, SecondaryMap};
use wasm_common::{ use wasm_common::{
FunctionIndex, FunctionType, GlobalIndex, LocalFunctionIndex, MemoryIndex, MemoryType, FunctionIndex, GlobalIndex, LocalFunctionIndex, MemoryIndex, SignatureIndex, TableIndex, Type,
Mutability, SignatureIndex, TableIndex, Type,
}; };
use wasmer_compiler::wasmparser::{self, BinaryReader, MemoryImmediate, Operator}; use wasmer_compiler::wasmparser::{self, BinaryReader, MemoryImmediate, Operator};
use wasmer_compiler::{ use wasmer_compiler::{
to_wasm_error, wasm_unsupported, Addend, CodeOffset, CompileError, CompiledFunction, to_wasm_error, wasm_unsupported, CompileError, CompiledFunction, CompiledFunctionFrameInfo,
CompiledFunctionFrameInfo, CustomSection, CustomSectionProtection, CustomSections, CustomSection, CustomSectionProtection, CustomSections, FunctionAddressMap, FunctionBody,
FunctionAddressMap, FunctionBody, FunctionBodyData, InstructionAddressMap, Relocation, FunctionBodyData, InstructionAddressMap, Relocation, RelocationKind, RelocationTarget,
RelocationKind, RelocationTarget, SectionBody, SectionIndex, SourceLoc, WasmResult, SectionBody, SectionIndex, SourceLoc, WasmResult,
}; };
use wasmer_runtime::libcalls::LibCall; use wasmer_runtime::libcalls::LibCall;
use wasmer_runtime::{ use wasmer_runtime::{MemoryPlan, ModuleInfo, TablePlan, VMBuiltinFunctionIndex, VMOffsets};
MemoryPlan, MemoryStyle, ModuleInfo, TablePlan, VMBuiltinFunctionIndex, VMOffsets,
};
// TODO: debugging // TODO: debugging
use std::fs; //use std::fs;
use std::io::Write; //use std::io::Write;
use wasm_common::entity::entity_impl; use wasm_common::entity::entity_impl;
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)] #[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)]
@@ -100,7 +87,7 @@ pub struct FuncTranslator {
ctx: Context, ctx: Context,
} }
fn const_zero<'ctx>(ty: BasicTypeEnum<'ctx>) -> BasicValueEnum<'ctx> { fn const_zero(ty: BasicTypeEnum) -> BasicValueEnum {
match ty { match ty {
BasicTypeEnum::ArrayType(ty) => ty.const_zero().as_basic_value_enum(), BasicTypeEnum::ArrayType(ty) => ty.const_zero().as_basic_value_enum(),
BasicTypeEnum::FloatType(ty) => ty.const_zero().as_basic_value_enum(), BasicTypeEnum::FloatType(ty) => ty.const_zero().as_basic_value_enum(),
@@ -125,7 +112,7 @@ impl FuncTranslator {
function_body: &FunctionBodyData, function_body: &FunctionBodyData,
config: &LLVMConfig, config: &LLVMConfig,
memory_plans: &PrimaryMap<MemoryIndex, MemoryPlan>, memory_plans: &PrimaryMap<MemoryIndex, MemoryPlan>,
table_plans: &PrimaryMap<TableIndex, TablePlan>, _table_plans: &PrimaryMap<TableIndex, TablePlan>,
func_names: &SecondaryMap<FunctionIndex, String>, func_names: &SecondaryMap<FunctionIndex, String>,
) -> Result<(CompiledFunction, CustomSections), CompileError> { ) -> Result<(CompiledFunction, CustomSections), CompileError> {
let func_index = wasm_module.func_index(*local_func_index); let func_index = wasm_module.func_index(*local_func_index);
@@ -134,7 +121,7 @@ impl FuncTranslator {
None => format!("<anonymous module> function {}", func_name), None => format!("<anonymous module> function {}", func_name),
Some(module_name) => format!("module {} function {}", module_name, func_name), Some(module_name) => format!("module {} function {}", module_name, func_name),
}; };
let mut module = self.ctx.create_module(module_name.as_str()); let module = self.ctx.create_module(module_name.as_str());
let target_triple = config.target_triple(); let target_triple = config.target_triple();
let target_machine = config.target_machine(); let target_machine = config.target_machine();
@@ -222,7 +209,7 @@ impl FuncTranslator {
ctx: CtxType::new(wasm_module, &func, &cache_builder), ctx: CtxType::new(wasm_module, &func, &cache_builder),
unreachable_depth: 0, unreachable_depth: 0,
memory_plans, memory_plans,
table_plans, _table_plans,
module: &module, module: &module,
// TODO: pointer width // TODO: pointer width
vmoffsets: VMOffsets::new(8, &wasm_module), vmoffsets: VMOffsets::new(8, &wasm_module),
@@ -233,7 +220,7 @@ impl FuncTranslator {
while fcg.state.has_control_frames() { while fcg.state.has_control_frames() {
let pos = reader.current_position() as u32; let pos = reader.current_position() as u32;
let op = reader.read_operator().map_err(to_wasm_error)?; let op = reader.read_operator().map_err(to_wasm_error)?;
fcg.translate_operator(op, wasm_module, pos)?; fcg.translate_operator(op, pos)?;
} }
// TODO: use phf? // TODO: use phf?
@@ -310,12 +297,12 @@ impl FuncTranslator {
pass_manager.add_slp_vectorize_pass(); pass_manager.add_slp_vectorize_pass();
pass_manager.add_early_cse_pass(); pass_manager.add_early_cse_pass();
pass_manager.run_on(&mut module); pass_manager.run_on(&module);
// TODO: llvm-callbacks llvm post-opt-ir // TODO: llvm-callbacks llvm post-opt-ir
let memory_buffer = target_machine let memory_buffer = target_machine
.write_to_memory_buffer(&mut module, FileType::Object) .write_to_memory_buffer(&module, FileType::Object)
.unwrap(); .unwrap();
// TODO: remove debugging. // TODO: remove debugging.
@@ -339,15 +326,7 @@ impl FuncTranslator {
if section.sh_name == goblin::elf::section_header::SHN_UNDEF as _ { if section.sh_name == goblin::elf::section_header::SHN_UNDEF as _ {
return None; return None;
} }
let name = elf.strtab.get(section.sh_name); elf.strtab.get(section.sh_name)?.ok()
if name.is_none() {
return None;
}
let name = name.unwrap();
if name.is_err() {
return None;
}
Some(name.unwrap())
}; };
// Build up a mapping from a section to its relocation sections. // Build up a mapping from a section to its relocation sections.
@@ -1128,7 +1107,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
// Replaces any NaN with the canonical QNaN, otherwise leaves the value alone. // Replaces any NaN with the canonical QNaN, otherwise leaves the value alone.
fn canonicalize_nans(&self, value: BasicValueEnum<'ctx>) -> BasicValueEnum<'ctx> { fn canonicalize_nans(&self, value: BasicValueEnum<'ctx>) -> BasicValueEnum<'ctx> {
let f_ty = value.get_type(); let f_ty = value.get_type();
let canonicalized = if f_ty.is_vector_type() { if f_ty.is_vector_type() {
let value = value.into_vector_value(); let value = value.into_vector_value();
let f_ty = f_ty.into_vector_type(); let f_ty = f_ty.into_vector_type();
let zero = f_ty.const_zero(); let zero = f_ty.const_zero();
@@ -1154,8 +1133,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
self.builder self.builder
.build_select(nan_cmp, canonical_qnan, value, "") .build_select(nan_cmp, canonical_qnan, value, "")
.as_basic_value_enum() .as_basic_value_enum()
}; }
canonicalized
} }
pub fn resolve_memory_ptr( pub fn resolve_memory_ptr(
@@ -1170,9 +1148,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let intrinsics = &self.intrinsics; let intrinsics = &self.intrinsics;
let context = &self.context; let context = &self.context;
let module = &self.module; let module = &self.module;
let wasm_module = &self.wasm_module;
let function = &self.function; let function = &self.function;
let ctx = &mut self.ctx;
let memory_index = MemoryIndex::from_u32(0); let memory_index = MemoryIndex::from_u32(0);
@@ -1191,14 +1167,11 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
current_length_ptr, current_length_ptr,
} => { } => {
// Bounds check it. // Bounds check it.
let current_length = builder.build_load(current_length_ptr, "");
// TODO: tbaa_label
let minimum = memory_plans[memory_index].memory.minimum; let minimum = memory_plans[memory_index].memory.minimum;
let maximum = memory_plans[memory_index].memory.maximum;
// If the memory is dynamic, do a bounds check. For static we rely on
// the size being a multiple of the page size and hitting a guard page.
let value_size_v = intrinsics.i64_ty.const_int(value_size as u64, false); let value_size_v = intrinsics.i64_ty.const_int(value_size as u64, false);
let ptr_in_bounds = if offset.is_const() { let ptr_in_bounds = if offset.is_const() {
// When the offset is constant, if it's below the minimum
// memory size, we've statically shown that it's safe.
let load_offset_end = offset.const_add(value_size_v); let load_offset_end = offset.const_add(value_size_v);
let ptr_in_bounds = load_offset_end.const_int_compare( let ptr_in_bounds = load_offset_end.const_int_compare(
IntPredicate::ULE, IntPredicate::ULE,
@@ -1217,6 +1190,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let current_length = let current_length =
builder.build_load(current_length_ptr, "").into_int_value(); builder.build_load(current_length_ptr, "").into_int_value();
// TODO: tbaa_label
builder.build_int_compare( builder.build_int_compare(
IntPredicate::ULE, IntPredicate::ULE,
@@ -1431,7 +1405,7 @@ pub struct LLVMFunctionCodeGenerator<'ctx, 'a> {
ctx: CtxType<'ctx, 'a>, ctx: CtxType<'ctx, 'a>,
unreachable_depth: usize, unreachable_depth: usize,
memory_plans: &'a PrimaryMap<MemoryIndex, MemoryPlan>, memory_plans: &'a PrimaryMap<MemoryIndex, MemoryPlan>,
table_plans: &'a PrimaryMap<TableIndex, TablePlan>, _table_plans: &'a PrimaryMap<TableIndex, TablePlan>,
// This is support for stackmaps: // This is support for stackmaps:
/* /*
@@ -1447,12 +1421,7 @@ pub struct LLVMFunctionCodeGenerator<'ctx, 'a> {
} }
impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> { impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
fn translate_operator( fn translate_operator(&mut self, op: Operator, _source_loc: u32) -> Result<(), CompileError> {
&mut self,
op: Operator,
module: &ModuleInfo,
_source_loc: u32,
) -> Result<(), CompileError> {
// TODO: remove this vmctx by moving everything into CtxType. Values // TODO: remove this vmctx by moving everything into CtxType. Values
// computed off vmctx usually benefit from caching. // computed off vmctx usually benefit from caching.
let vmctx = &self.ctx.basic().into_pointer_value(); let vmctx = &self.ctx.basic().into_pointer_value();
@@ -1488,12 +1457,10 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
* https://github.com/sunfishcode/wasm-reference-manual/blob/master/WebAssembly.md#control-flow-instructions * https://github.com/sunfishcode/wasm-reference-manual/blob/master/WebAssembly.md#control-flow-instructions
***************************/ ***************************/
Operator::Block { ty } => { Operator::Block { ty } => {
let current_block = let current_block = self
self.builder .builder
.get_insert_block() .get_insert_block()
.ok_or(CompileError::Codegen( .ok_or_else(|| CompileError::Codegen("not currently in a block".to_string()))?;
"not currently in a block".to_string(),
))?;
let end_block = self.context.append_basic_block(self.function, "end"); let end_block = self.context.append_basic_block(self.function, "end");
self.builder.position_at_end(end_block); self.builder.position_at_end(end_block);
@@ -1567,12 +1534,10 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
Operator::Br { relative_depth } => { Operator::Br { relative_depth } => {
let frame = self.state.frame_at_depth(relative_depth)?; let frame = self.state.frame_at_depth(relative_depth)?;
let current_block = let current_block = self
self.builder .builder
.get_insert_block() .get_insert_block()
.ok_or(CompileError::Codegen( .ok_or_else(|| CompileError::Codegen("not currently in a block".to_string()))?;
"not currently in a block".to_string(),
))?;
let value_len = if frame.is_loop() { let value_len = if frame.is_loop() {
0 0
@@ -1601,12 +1566,10 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let cond = self.state.pop1()?; let cond = self.state.pop1()?;
let frame = self.state.frame_at_depth(relative_depth)?; let frame = self.state.frame_at_depth(relative_depth)?;
let current_block = let current_block = self
self.builder .builder
.get_insert_block() .get_insert_block()
.ok_or(CompileError::Codegen( .ok_or_else(|| CompileError::Codegen("not currently in a block".to_string()))?;
"not currently in a block".to_string(),
))?;
let value_len = if frame.is_loop() { let value_len = if frame.is_loop() {
0 0
@@ -1636,12 +1599,10 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
self.builder.position_at_end(else_block); self.builder.position_at_end(else_block);
} }
Operator::BrTable { ref table } => { Operator::BrTable { ref table } => {
let current_block = let current_block = self
self.builder .builder
.get_insert_block() .get_insert_block()
.ok_or(CompileError::Codegen( .ok_or_else(|| CompileError::Codegen("not currently in a block".to_string()))?;
"not currently in a block".to_string(),
))?;
let (label_depths, default_depth) = table.read_table().map_err(to_wasm_error)?; let (label_depths, default_depth) = table.read_table().map_err(to_wasm_error)?;
@@ -1692,12 +1653,10 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
self.state.reachable = false; self.state.reachable = false;
} }
Operator::If { ty } => { Operator::If { ty } => {
let current_block = let current_block = self
self.builder .builder
.get_insert_block() .get_insert_block()
.ok_or(CompileError::Codegen( .ok_or_else(|| CompileError::Codegen("not currently in a block".to_string()))?;
"not currently in a block".to_string(),
))?;
let if_then_block = self.context.append_basic_block(self.function, "if_then"); let if_then_block = self.context.append_basic_block(self.function, "if_then");
let if_else_block = self.context.append_basic_block(self.function, "if_else"); let if_else_block = self.context.append_basic_block(self.function, "if_else");
let end_block = self.context.append_basic_block(self.function, "if_end"); let end_block = self.context.append_basic_block(self.function, "if_end");
@@ -1737,12 +1696,9 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
Operator::Else => { Operator::Else => {
if self.state.reachable { if self.state.reachable {
let frame = self.state.frame_at_depth(0)?; let frame = self.state.frame_at_depth(0)?;
let current_block = let current_block = self.builder.get_insert_block().ok_or_else(|| {
self.builder CompileError::Codegen("not currently in a block".to_string())
.get_insert_block() })?;
.ok_or(CompileError::Codegen(
"not currently in a block".to_string(),
))?;
for phi in frame.phis().to_vec().iter().rev() { for phi in frame.phis().to_vec().iter().rev() {
let (value, info) = self.state.pop1_extra()?; let (value, info) = self.state.pop1_extra()?;
@@ -1772,12 +1728,10 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
Operator::End => { Operator::End => {
let frame = self.state.pop_frame()?; let frame = self.state.pop_frame()?;
let current_block = let current_block = self
self.builder .builder
.get_insert_block() .get_insert_block()
.ok_or(CompileError::Codegen( .ok_or_else(|| CompileError::Codegen("not currently in a block".to_string()))?;
"not currently in a block".to_string(),
))?;
if self.state.reachable { if self.state.reachable {
for phi in frame.phis().iter().rev() { for phi in frame.phis().iter().rev() {
@@ -1832,12 +1786,10 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
} }
} }
Operator::Return => { Operator::Return => {
let current_block = let current_block = self
self.builder .builder
.get_insert_block() .get_insert_block()
.ok_or(CompileError::Codegen( .ok_or_else(|| CompileError::Codegen("not currently in a block".to_string()))?;
"not currently in a block".to_string(),
))?;
let frame = self.state.outermost_frame()?; let frame = self.state.outermost_frame()?;
for phi in frame.phis().to_vec().iter() { for phi in frame.phis().to_vec().iter() {
@@ -2085,7 +2037,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
Operator::GlobalSet { global_index } => { Operator::GlobalSet { global_index } => {
let global_index = GlobalIndex::from_u32(global_index); let global_index = GlobalIndex::from_u32(global_index);
match self.ctx.global(global_index, self.intrinsics) { match self.ctx.global(global_index, self.intrinsics) {
GlobalCache::Const { value } => { GlobalCache::Const { value: _ } => {
return Err(CompileError::Codegen(format!( return Err(CompileError::Codegen(format!(
"global.set on immutable global index {}", "global.set on immutable global index {}",
global_index.as_u32() global_index.as_u32()
@@ -2144,23 +2096,22 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
} }
Operator::Call { function_index } => { Operator::Call { function_index } => {
let func_index = FunctionIndex::from_u32(function_index); let func_index = FunctionIndex::from_u32(function_index);
let sigindex = &module.functions[func_index]; let sigindex = &self.wasm_module.functions[func_index];
let func_type = &module.signatures[*sigindex]; let func_type = &self.wasm_module.signatures[*sigindex];
let func_name = &self.func_names[func_index]; let func_name = &self.func_names[func_index];
let llvm_func_type = func_type_to_llvm(&self.context, &self.intrinsics, func_type); let llvm_func_type = func_type_to_llvm(&self.context, &self.intrinsics, func_type);
let (func, callee_vmctx) = if let Some(local_func_index) = let (func, callee_vmctx) = if self
module.local_func_index(func_index) .wasm_module
.local_func_index(func_index)
.is_some()
{ {
// TODO: we could do this by comparing self.function indices instead // TODO: we could do this by comparing self.function indices instead
// of going through LLVM APIs and string comparisons. // of going through LLVM APIs and string comparisons.
let func = self.module.get_function(func_name); let func = self.module.get_function(func_name).unwrap_or_else(|| {
let func = if func.is_none() {
self.module self.module
.add_function(func_name, llvm_func_type, Some(Linkage::External)) .add_function(func_name, llvm_func_type, Some(Linkage::External))
} else { });
func.unwrap()
};
(func.as_global_value().as_pointer_value(), self.ctx.basic()) (func.as_global_value().as_pointer_value(), self.ctx.basic())
} else { } else {
let offset = self.vmoffsets.vmctx_vmfunction_import(func_index); let offset = self.vmoffsets.vmctx_vmfunction_import(func_index);
@@ -2269,7 +2220,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
if let Some(basic_value) = call_site.try_as_basic_value().left() { if let Some(basic_value) = call_site.try_as_basic_value().left() {
match func_type.results().len() { match func_type.results().len() {
1 => self.state.push1(basic_value), 1 => self.state.push1(basic_value),
count @ _ => { count => {
// This is a multi-value return. // This is a multi-value return.
let struct_value = basic_value.into_struct_value(); let struct_value = basic_value.into_struct_value();
for i in 0..(count as u32) { for i in 0..(count as u32) {
@@ -2285,14 +2236,13 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
} }
Operator::CallIndirect { index, table_index } => { Operator::CallIndirect { index, table_index } => {
let sigindex = SignatureIndex::from_u32(index); let sigindex = SignatureIndex::from_u32(index);
let func_type = &module.signatures[sigindex]; let func_type = &self.wasm_module.signatures[sigindex];
let expected_dynamic_sigindex = let expected_dynamic_sigindex =
self.ctx.dynamic_sigindex(sigindex, self.intrinsics); self.ctx.dynamic_sigindex(sigindex, self.intrinsics);
let (table_base, table_bound) = self.ctx.table( let (table_base, table_bound) = self.ctx.table(
TableIndex::from_u32(table_index), TableIndex::from_u32(table_index),
self.intrinsics, self.intrinsics,
self.module, self.module,
&self.builder,
); );
let func_index = self.state.pop1()?.into_int_value(); let func_index = self.state.pop1()?.into_int_value();
@@ -2312,32 +2262,30 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
}; };
// Load things from the anyfunc data structure. // Load things from the anyfunc data structure.
let (func_ptr, found_dynamic_sigindex, ctx_ptr) = unsafe { let (func_ptr, found_dynamic_sigindex, ctx_ptr) = (
( self.builder
self.builder .build_load(
.build_load(
self.builder
.build_struct_gep(anyfunc_struct_ptr, 0, "func_ptr_ptr")
.unwrap(),
"func_ptr",
)
.into_pointer_value(),
self.builder
.build_load(
self.builder
.build_struct_gep(anyfunc_struct_ptr, 1, "sigindex_ptr")
.unwrap(),
"sigindex",
)
.into_int_value(),
self.builder.build_load(
self.builder self.builder
.build_struct_gep(anyfunc_struct_ptr, 2, "ctx_ptr_ptr") .build_struct_gep(anyfunc_struct_ptr, 0, "func_ptr_ptr")
.unwrap(), .unwrap(),
"ctx_ptr", "func_ptr",
), )
) .into_pointer_value(),
}; self.builder
.build_load(
self.builder
.build_struct_gep(anyfunc_struct_ptr, 1, "sigindex_ptr")
.unwrap(),
"sigindex",
)
.into_int_value(),
self.builder.build_load(
self.builder
.build_struct_gep(anyfunc_struct_ptr, 2, "ctx_ptr_ptr")
.unwrap(),
"ctx_ptr",
),
);
let truncated_table_bounds = self.builder.build_int_truncate( let truncated_table_bounds = self.builder.build_int_truncate(
table_bound, table_bound,
@@ -8443,7 +8391,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
let mem_index = MemoryIndex::from_u32(reserved); let mem_index = MemoryIndex::from_u32(reserved);
let delta = self.state.pop1()?; let delta = self.state.pop1()?;
let (grow_fn, grow_fn_ty) = let (grow_fn, grow_fn_ty) =
if let Some(local_mem_index) = module.local_memory_index(mem_index) { if self.wasm_module.local_memory_index(mem_index).is_some() {
( (
VMBuiltinFunctionIndex::get_memory32_grow_index(), VMBuiltinFunctionIndex::get_memory32_grow_index(),
self.intrinsics.memory32_grow_ptr_ty, self.intrinsics.memory32_grow_ptr_ty,
@@ -8487,7 +8435,7 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
Operator::MemorySize { reserved } => { Operator::MemorySize { reserved } => {
let mem_index = MemoryIndex::from_u32(reserved); let mem_index = MemoryIndex::from_u32(reserved);
let (size_fn, size_fn_ty) = let (size_fn, size_fn_ty) =
if let Some(local_mem_index) = module.local_memory_index(mem_index) { if self.wasm_module.local_memory_index(mem_index).is_some() {
( (
VMBuiltinFunctionIndex::get_memory32_size_index(), VMBuiltinFunctionIndex::get_memory32_size_index(),
self.intrinsics.memory32_size_ptr_ty, self.intrinsics.memory32_size_ptr_ty,

View File

@@ -22,8 +22,8 @@ use inkwell::{
use std::collections::HashMap; use std::collections::HashMap;
use wasm_common::entity::{EntityRef, PrimaryMap}; use wasm_common::entity::{EntityRef, PrimaryMap};
use wasm_common::{ use wasm_common::{
FunctionIndex, FunctionType as FuncType, GlobalIndex, MemoryIndex, Mutability, Pages, FunctionType as FuncType, GlobalIndex, MemoryIndex, Mutability, SignatureIndex, TableIndex,
SignatureIndex, TableIndex, Type, Type,
}; };
use wasmer_runtime::ModuleInfo as WasmerCompilerModule; use wasmer_runtime::ModuleInfo as WasmerCompilerModule;
use wasmer_runtime::{MemoryPlan, MemoryStyle, TrapCode, VMOffsets}; use wasmer_runtime::{MemoryPlan, MemoryStyle, TrapCode, VMOffsets};
@@ -230,25 +230,7 @@ impl<'ctx> Intrinsics<'ctx> {
let ctx_ty = i8_ty; let ctx_ty = i8_ty;
let ctx_ptr_ty = ctx_ty.ptr_type(AddressSpace::Generic); let ctx_ptr_ty = ctx_ty.ptr_type(AddressSpace::Generic);
let local_memory_ty =
context.struct_type(&[i8_ptr_ty_basic, i64_ty_basic, i8_ptr_ty_basic], false);
let local_table_ty = local_memory_ty;
let local_global_ty = i64_ty;
let func_ctx_ty =
context.struct_type(&[ctx_ptr_ty.as_basic_type_enum(), i8_ptr_ty_basic], false);
let func_ctx_ptr_ty = func_ctx_ty.ptr_type(AddressSpace::Generic);
let imported_func_ty = context.struct_type(
&[i8_ptr_ty_basic, func_ctx_ptr_ty.as_basic_type_enum()],
false,
);
let sigindex_ty = i32_ty; let sigindex_ty = i32_ty;
let rt_intrinsics_ty = i8_ty;
let stack_lower_bound_ty = i8_ty;
let memory_base_ty = i8_ty;
let memory_bound_ty = i8_ty;
let internals_ty = i64_ty;
let interrupt_signal_mem_ty = i8_ty;
let local_function_ty = i8_ptr_ty;
let anyfunc_ty = context.struct_type( let anyfunc_ty = context.struct_type(
&[ &[
@@ -613,24 +595,16 @@ pub enum GlobalCache<'ctx> {
Const { value: BasicValueEnum<'ctx> }, Const { value: BasicValueEnum<'ctx> },
} }
struct ImportedFuncCache<'ctx> {
func_ptr: PointerValue<'ctx>,
ctx_ptr: PointerValue<'ctx>,
}
pub struct CtxType<'ctx, 'a> { pub struct CtxType<'ctx, 'a> {
ctx_ptr_value: PointerValue<'ctx>, ctx_ptr_value: PointerValue<'ctx>,
wasm_module: &'a WasmerCompilerModule, wasm_module: &'a WasmerCompilerModule,
cache_builder: &'a Builder<'ctx>, cache_builder: &'a Builder<'ctx>,
cached_signal_mem: Option<PointerValue<'ctx>>,
cached_memories: HashMap<MemoryIndex, MemoryCache<'ctx>>, cached_memories: HashMap<MemoryIndex, MemoryCache<'ctx>>,
cached_tables: HashMap<TableIndex, TableCache<'ctx>>, cached_tables: HashMap<TableIndex, TableCache<'ctx>>,
cached_sigindices: HashMap<SignatureIndex, IntValue<'ctx>>, cached_sigindices: HashMap<SignatureIndex, IntValue<'ctx>>,
cached_globals: HashMap<GlobalIndex, GlobalCache<'ctx>>, cached_globals: HashMap<GlobalIndex, GlobalCache<'ctx>>,
cached_imported_functions: HashMap<FunctionIndex, ImportedFuncCache<'ctx>>,
offsets: VMOffsets, offsets: VMOffsets,
} }
@@ -647,13 +621,10 @@ impl<'ctx, 'a> CtxType<'ctx, 'a> {
wasm_module, wasm_module,
cache_builder, cache_builder,
cached_signal_mem: None,
cached_memories: HashMap::new(), cached_memories: HashMap::new(),
cached_tables: HashMap::new(), cached_tables: HashMap::new(),
cached_sigindices: HashMap::new(), cached_sigindices: HashMap::new(),
cached_globals: HashMap::new(), cached_globals: HashMap::new(),
cached_imported_functions: HashMap::new(),
// TODO: pointer width // TODO: pointer width
offsets: VMOffsets::new(8, &wasm_module), offsets: VMOffsets::new(8, &wasm_module),
@@ -668,7 +639,7 @@ impl<'ctx, 'a> CtxType<'ctx, 'a> {
&mut self, &mut self,
index: MemoryIndex, index: MemoryIndex,
intrinsics: &Intrinsics<'ctx>, intrinsics: &Intrinsics<'ctx>,
module: &Module<'ctx>, _module: &Module<'ctx>,
memory_plans: &PrimaryMap<MemoryIndex, MemoryPlan>, memory_plans: &PrimaryMap<MemoryIndex, MemoryPlan>,
) -> MemoryCache<'ctx> { ) -> MemoryCache<'ctx> {
let (cached_memories, wasm_module, ctx_ptr_value, cache_builder, offsets) = ( let (cached_memories, wasm_module, ctx_ptr_value, cache_builder, offsets) = (
@@ -740,7 +711,7 @@ impl<'ctx, 'a> CtxType<'ctx, 'a> {
&mut self, &mut self,
table_index: TableIndex, table_index: TableIndex,
intrinsics: &Intrinsics<'ctx>, intrinsics: &Intrinsics<'ctx>,
module: &Module<'ctx>, _module: &Module<'ctx>,
) -> (PointerValue<'ctx>, PointerValue<'ctx>) { ) -> (PointerValue<'ctx>, PointerValue<'ctx>) {
let (cached_tables, wasm_module, ctx_ptr_value, cache_builder, offsets) = ( let (cached_tables, wasm_module, ctx_ptr_value, cache_builder, offsets) = (
&mut self.cached_tables, &mut self.cached_tables,
@@ -837,7 +808,6 @@ impl<'ctx, 'a> CtxType<'ctx, 'a> {
index: TableIndex, index: TableIndex,
intrinsics: &Intrinsics<'ctx>, intrinsics: &Intrinsics<'ctx>,
module: &Module<'ctx>, module: &Module<'ctx>,
builder: &Builder<'ctx>,
) -> (PointerValue<'ctx>, IntValue<'ctx>) { ) -> (PointerValue<'ctx>, IntValue<'ctx>) {
let (ptr_to_base_ptr, ptr_to_bounds) = self.table_prepare(index, intrinsics, module); let (ptr_to_base_ptr, ptr_to_bounds) = self.table_prepare(index, intrinsics, module);
let base_ptr = self let base_ptr = self
@@ -1048,7 +1018,7 @@ pub fn func_type_to_llvm<'ctx>(
match fntype.results() { match fntype.results() {
&[] => intrinsics.void_ty.fn_type(&param_types, false), &[] => intrinsics.void_ty.fn_type(&param_types, false),
&[single_value] => type_to_llvm(intrinsics, single_value).fn_type(&param_types, false), &[single_value] => type_to_llvm(intrinsics, single_value).fn_type(&param_types, false),
returns @ _ => { returns => {
let basic_types: Vec<_> = returns let basic_types: Vec<_> = returns
.iter() .iter()
.map(|&ty| type_to_llvm(intrinsics, ty)) .map(|&ty| type_to_llvm(intrinsics, ty))

View File

@@ -14,22 +14,18 @@ fn wp_type_to_type(ty: WpType) -> Result<Type, CompileError> {
WpType::F32 => Ok(Type::F32), WpType::F32 => Ok(Type::F32),
WpType::F64 => Ok(Type::F64), WpType::F64 => Ok(Type::F64),
WpType::V128 => Ok(Type::V128), WpType::V128 => Ok(Type::V128),
_ => { _ => Err(CompileError::Codegen(
return Err(CompileError::Codegen( "broken invariant, invalid type".to_string(),
"broken invariant, invalid type".to_string(), )),
));
}
} }
} }
pub fn blocktype_to_type(ty: WpTypeOrFuncType) -> Result<Type, CompileError> { pub fn blocktype_to_type(ty: WpTypeOrFuncType) -> Result<Type, CompileError> {
match ty { match ty {
WpTypeOrFuncType::Type(inner_ty) => Ok(wp_type_to_type(inner_ty)?), WpTypeOrFuncType::Type(inner_ty) => Ok(wp_type_to_type(inner_ty)?),
_ => { _ => Err(CompileError::Codegen(
return Err(CompileError::Codegen( "the wasmer llvm backend does not yet support the multi-value return extension"
"the wasmer llvm backend does not yet support the multi-value return extension" .to_string(),
.to_string(), )),
));
}
} }
} }

View File

@@ -181,13 +181,12 @@ impl BitAnd for ExtraInfo {
(false, true) => ExtraInfo::arithmetic_f64(), (false, true) => ExtraInfo::arithmetic_f64(),
(true, true) => ExtraInfo::arithmetic_f32() | ExtraInfo::arithmetic_f64(), (true, true) => ExtraInfo::arithmetic_f32() | ExtraInfo::arithmetic_f64(),
}; };
let info = match (self.has_pending_f32_nan(), self.has_pending_f64_nan()) { match (self.has_pending_f32_nan(), self.has_pending_f64_nan()) {
(false, false) => info, (false, false) => info,
(true, false) => info | ExtraInfo::pending_f32_nan(), (true, false) => info | ExtraInfo::pending_f32_nan(),
(false, true) => info | ExtraInfo::pending_f64_nan(), (false, true) => info | ExtraInfo::pending_f64_nan(),
(true, true) => unreachable!("Can't form ExtraInfo with two pending canonicalizations"), (true, true) => unreachable!("Can't form ExtraInfo with two pending canonicalizations"),
}; }
info
} }
} }
@@ -209,7 +208,7 @@ impl<'ctx> State<'ctx> {
} }
pub fn has_control_frames(&self) -> bool { pub fn has_control_frames(&self) -> bool {
return !self.control_stack.is_empty(); !self.control_stack.is_empty()
} }
pub fn reset_stack(&mut self, frame: &ControlFrame<'ctx>) { pub fn reset_stack(&mut self, frame: &ControlFrame<'ctx>) {
@@ -231,9 +230,9 @@ impl<'ctx> State<'ctx> {
} }
pub fn outermost_frame(&self) -> Result<&ControlFrame<'ctx>, CompileError> { pub fn outermost_frame(&self) -> Result<&ControlFrame<'ctx>, CompileError> {
self.control_stack.get(0).ok_or(CompileError::Codegen( self.control_stack.get(0).ok_or_else(|| {
"outermost_frame: invalid control stack depth".to_string(), CompileError::Codegen("outermost_frame: invalid control stack depth".to_string())
)) })
} }
pub fn frame_at_depth(&self, depth: u32) -> Result<&ControlFrame<'ctx>, CompileError> { pub fn frame_at_depth(&self, depth: u32) -> Result<&ControlFrame<'ctx>, CompileError> {
@@ -241,9 +240,9 @@ impl<'ctx> State<'ctx> {
.control_stack .control_stack
.len() .len()
.checked_sub(1 + (depth as usize)) .checked_sub(1 + (depth as usize))
.ok_or(CompileError::Codegen( .ok_or_else(|| {
"frame_at_depth: invalid control stack depth".to_string(), CompileError::Codegen("frame_at_depth: invalid control stack depth".to_string())
))?; })?;
Ok(&self.control_stack[index]) Ok(&self.control_stack[index])
} }
@@ -255,16 +254,16 @@ impl<'ctx> State<'ctx> {
.control_stack .control_stack
.len() .len()
.checked_sub(1 + (depth as usize)) .checked_sub(1 + (depth as usize))
.ok_or(CompileError::Codegen( .ok_or_else(|| {
"frame_at_depth_mut: invalid control stack depth".to_string(), CompileError::Codegen("frame_at_depth_mut: invalid control stack depth".to_string())
))?; })?;
Ok(&mut self.control_stack[index]) Ok(&mut self.control_stack[index])
} }
pub fn pop_frame(&mut self) -> Result<ControlFrame<'ctx>, CompileError> { pub fn pop_frame(&mut self) -> Result<ControlFrame<'ctx>, CompileError> {
self.control_stack.pop().ok_or(CompileError::Codegen( self.control_stack.pop().ok_or_else(|| {
"pop_frame: cannot pop from control stack".to_string(), CompileError::Codegen("pop_frame: cannot pop from control stack".to_string())
)) })
} }
pub fn push1<T: BasicValue<'ctx>>(&mut self, value: T) { pub fn push1<T: BasicValue<'ctx>>(&mut self, value: T) {
@@ -280,9 +279,9 @@ impl<'ctx> State<'ctx> {
} }
pub fn pop1_extra(&mut self) -> Result<(BasicValueEnum<'ctx>, ExtraInfo), CompileError> { pub fn pop1_extra(&mut self) -> Result<(BasicValueEnum<'ctx>, ExtraInfo), CompileError> {
self.stack.pop().ok_or(CompileError::Codegen( self.stack
"pop1_extra: invalid value stack".to_string(), .pop()
)) .ok_or_else(|| CompileError::Codegen("pop1_extra: invalid value stack".to_string()))
} }
pub fn pop2(&mut self) -> Result<(BasicValueEnum<'ctx>, BasicValueEnum<'ctx>), CompileError> { pub fn pop2(&mut self) -> Result<(BasicValueEnum<'ctx>, BasicValueEnum<'ctx>), CompileError> {
@@ -322,13 +321,10 @@ impl<'ctx> State<'ctx> {
} }
pub fn peek1_extra(&self) -> Result<(BasicValueEnum<'ctx>, ExtraInfo), CompileError> { pub fn peek1_extra(&self) -> Result<(BasicValueEnum<'ctx>, ExtraInfo), CompileError> {
let index = self let index =
.stack self.stack.len().checked_sub(1).ok_or_else(|| {
.len() CompileError::Codegen("peek1_extra: invalid value stack".to_string())
.checked_sub(1) })?;
.ok_or(CompileError::Codegen(
"peek1_extra: invalid value stack".to_string(),
))?;
Ok(self.stack[index]) Ok(self.stack[index])
} }
@@ -340,13 +336,10 @@ impl<'ctx> State<'ctx> {
&self, &self,
n: usize, n: usize,
) -> Result<&[(BasicValueEnum<'ctx>, ExtraInfo)], CompileError> { ) -> Result<&[(BasicValueEnum<'ctx>, ExtraInfo)], CompileError> {
let index = self let index =
.stack self.stack.len().checked_sub(n).ok_or_else(|| {
.len() CompileError::Codegen("peekn_extra: invalid value stack".to_string())
.checked_sub(n) })?;
.ok_or(CompileError::Codegen(
"peekn_extra: invalid value stack".to_string(),
))?;
Ok(&self.stack[index..]) Ok(&self.stack[index..])
} }
@@ -364,9 +357,7 @@ impl<'ctx> State<'ctx> {
.stack .stack
.len() .len()
.checked_sub(n) .checked_sub(n)
.ok_or(CompileError::Codegen( .ok_or_else(|| CompileError::Codegen("popn: invalid value stack".to_string()))?;
"popn: invalid value stack".to_string(),
))?;
self.stack.truncate(index); self.stack.truncate(index);
Ok(()) Ok(())