Merge branch 'master' into singlepass

# Conflicts:
#	lib/compiler-singlepass/src/codegen_x64.rs
#	lib/compiler-singlepass/src/common_decl.rs
#	lib/compiler-singlepass/src/emitter_x64.rs
#	lib/compiler-singlepass/src/lib.rs
#	lib/compiler-singlepass/src/machine.rs
#	lib/engine-jit/src/engine.rs
#	lib/engine-jit/src/serialize.rs
This commit is contained in:
Syrus
2020-05-18 15:14:44 -07:00
31 changed files with 917 additions and 435 deletions

View File

@@ -41,7 +41,7 @@ maintenance = { status = "actively-developed" }
default = ["wat", "cranelift", "jit"]
compiler = ["wasmer-engine-jit/compiler"]
engine = []
jit = ["wasmer-engine-jit"]
jit = ["wasmer-engine-jit", "engine"]
singlepass = [
"wasmer-compiler-singlepass",
"compiler",

View File

@@ -7,11 +7,14 @@ use crate::RuntimeError;
use crate::{ExternType, FunctionType, GlobalType, MemoryType, TableType, ValType};
use std::cmp::max;
use std::slice;
use wasm_common::{HostFunction, Pages, ValueType, WasmTypeList, WithEnv, WithoutEnv};
use wasm_common::{
HostFunction, Pages, SignatureIndex, ValueType, WasmTypeList, WithEnv, WithoutEnv,
};
use wasmer_runtime::{
wasmer_call_trampoline, Export, ExportFunction, ExportGlobal, ExportMemory, ExportTable,
LinearMemory, MemoryError, Table as RuntimeTable, VMCallerCheckedAnyfunc, VMContext,
VMFunctionBody, VMGlobalDefinition, VMMemoryDefinition, VMTrampoline,
InstanceHandle, LinearMemory, MemoryError, Table as RuntimeTable, VMCallerCheckedAnyfunc,
VMContext, VMDynamicFunctionImportContext, VMFunctionBody, VMFunctionKind, VMGlobalDefinition,
VMMemoryDefinition, VMTrampoline,
};
#[derive(Clone)]
@@ -477,32 +480,26 @@ impl Drop for Memory {
/// A function defined in the Wasm module
#[derive(Clone, PartialEq)]
pub struct WasmFunc {
pub struct WasmFunctionDefinition {
// The trampoline to do the call
trampoline: VMTrampoline,
}
/// A function defined in the Host
#[derive(Clone, PartialEq)]
pub struct HostFunc {
// func: wasm_common::Func<Args, Rets>,
}
/// The inner helper
#[derive(Clone, PartialEq)]
pub enum InnerFunc {
pub enum FunctionDefinition {
/// A function defined in the Wasm side
Wasm(WasmFunc),
Wasm(WasmFunctionDefinition),
/// A function defined in the Host side
Host(HostFunc),
Host,
}
/// A WebAssembly `function`.
#[derive(Clone, PartialEq)]
pub struct Function {
store: Store,
definition: FunctionDefinition,
// If the Function is owned by the Store, not the instance
inner: InnerFunc,
owned_by_store: bool,
exported: ExportFunction,
}
@@ -519,21 +516,71 @@ impl Function {
Rets: WasmTypeList,
Env: Sized,
{
let func: wasm_common::Func<Args, Rets, Env> = wasm_common::Func::new(func);
let func: wasm_common::Func<Args, Rets> = wasm_common::Func::new(func);
let address = func.address() as *const VMFunctionBody;
let vmctx = (func.env().unwrap_or(std::ptr::null_mut()) as *mut _) as *mut VMContext;
let vmctx = std::ptr::null_mut() as *mut _ as *mut VMContext;
let func_type = func.ty();
let signature = store.engine().register_signature(&func_type);
Self {
store: store.clone(),
owned_by_store: true,
inner: InnerFunc::Host(HostFunc {
// func
}),
definition: FunctionDefinition::Host,
exported: ExportFunction {
address,
vmctx,
signature,
kind: VMFunctionKind::Static,
},
}
}
#[allow(clippy::cast_ptr_alignment)]
pub fn new_dynamic<F>(store: &Store, ty: &FunctionType, func: F) -> Self
where
F: Fn(&[Val]) -> Result<Vec<Val>, RuntimeError> + 'static,
{
let dynamic_ctx =
VMDynamicFunctionImportContext::from_context(VMDynamicFunctionWithoutEnv {
func: Box::new(func),
});
let address = std::ptr::null() as *const () as *const VMFunctionBody;
let vmctx = Box::leak(Box::new(dynamic_ctx)) as *mut _ as *mut VMContext;
let signature = store.engine().register_signature(&ty);
Self {
store: store.clone(),
owned_by_store: true,
definition: FunctionDefinition::Host,
exported: ExportFunction {
address,
kind: VMFunctionKind::Dynamic,
vmctx,
signature,
},
}
}
#[allow(clippy::cast_ptr_alignment)]
pub fn new_dynamic_env<F, Env>(store: &Store, ty: &FunctionType, env: &mut Env, func: F) -> Self
where
F: Fn(&mut Env, &[Val]) -> Result<Vec<Val>, RuntimeError> + 'static,
Env: Sized,
{
let dynamic_ctx = VMDynamicFunctionImportContext::from_context(VMDynamicFunctionWithEnv {
env,
func: Box::new(func),
});
let address = std::ptr::null() as *const () as *const VMFunctionBody;
let vmctx = Box::leak(Box::new(dynamic_ctx)) as *mut _ as *mut VMContext;
let signature = store.engine().register_signature(&ty);
Self {
store: store.clone(),
owned_by_store: true,
definition: FunctionDefinition::Host,
exported: ExportFunction {
address,
kind: VMFunctionKind::Dynamic,
vmctx,
signature,
},
}
}
@@ -550,19 +597,23 @@ impl Function {
Rets: WasmTypeList,
Env: Sized,
{
let func: wasm_common::Func<Args, Rets, Env> = wasm_common::Func::new_env(env, func);
let func: wasm_common::Func<Args, Rets> = wasm_common::Func::new(func);
let address = func.address() as *const VMFunctionBody;
let vmctx = (func.env().unwrap_or(std::ptr::null_mut()) as *mut _) as *mut VMContext;
// TODO: We need to refactor the Function context.
// Right now is structured as it's always a `VMContext`. However, only
// Wasm-defined functions have a `VMContext`.
// In the case of Host-defined functions `VMContext` is whatever environment
// the user want to attach to the function.
let vmctx = env as *mut _ as *mut VMContext;
let func_type = func.ty();
let signature = store.engine().register_signature(&func_type);
Self {
store: store.clone(),
owned_by_store: true,
inner: InnerFunc::Host(HostFunc {
// func
}),
definition: FunctionDefinition::Host,
exported: ExportFunction {
address,
kind: VMFunctionKind::Static,
vmctx,
signature,
},
@@ -584,7 +635,7 @@ impl Function {
fn call_wasm(
&self,
func: &WasmFunc,
func: &WasmFunctionDefinition,
params: &[Val],
results: &mut [Val],
) -> Result<(), RuntimeError> {
@@ -675,8 +726,8 @@ impl Function {
/// call the trampoline.
pub fn call(&self, params: &[Val]) -> Result<Box<[Val]>, RuntimeError> {
let mut results = vec![Val::null(); self.result_arity()];
match &self.inner {
InnerFunc::Wasm(wasm) => {
match &self.definition {
FunctionDefinition::Wasm(wasm) => {
self.call_wasm(&wasm, params, &mut results)?;
}
_ => {} // _ => unimplemented!("The host is unimplemented"),
@@ -685,11 +736,14 @@ impl Function {
}
pub(crate) fn from_export(store: &Store, wasmer_export: ExportFunction) -> Self {
let trampoline = store.engine().trampoline(wasmer_export.signature).unwrap();
let trampoline = store
.engine()
.function_call_trampoline(wasmer_export.signature)
.unwrap();
Self {
store: store.clone(),
owned_by_store: false,
inner: InnerFunc::Wasm(WasmFunc { trampoline }),
definition: FunctionDefinition::Wasm(WasmFunctionDefinition { trampoline }),
exported: wasmer_export,
}
}
@@ -720,3 +774,113 @@ impl std::fmt::Debug for Function {
Ok(())
}
}
/// This trait is one that all dynamic funcitons must fulfill.
trait VMDynamicFunction {
fn call(&self, args: &[Val]) -> Result<Vec<Val>, RuntimeError>;
}
struct VMDynamicFunctionWithoutEnv {
func: Box<dyn Fn(&[Val]) -> Result<Vec<Val>, RuntimeError> + 'static>,
}
impl VMDynamicFunction for VMDynamicFunctionWithoutEnv {
fn call(&self, args: &[Val]) -> Result<Vec<Val>, RuntimeError> {
(*self.func)(&args)
}
}
struct VMDynamicFunctionWithEnv<Env>
where
Env: Sized,
{
func: Box<dyn Fn(&mut Env, &[Val]) -> Result<Vec<Val>, RuntimeError> + 'static>,
env: *mut Env,
}
impl<Env> VMDynamicFunction for VMDynamicFunctionWithEnv<Env>
where
Env: Sized,
{
fn call(&self, args: &[Val]) -> Result<Vec<Val>, RuntimeError> {
unsafe { (*self.func)(&mut *self.env, &args) }
}
}
trait VMDynamicFunctionImportCall<T: VMDynamicFunction> {
fn from_context(ctx: T) -> Self;
fn address_ptr() -> *const VMFunctionBody;
unsafe fn func_wrapper(
&self,
caller_vmctx: *mut VMContext,
sig_index: SignatureIndex,
values_vec: *mut i128,
);
}
impl<T: VMDynamicFunction> VMDynamicFunctionImportCall<T> for VMDynamicFunctionImportContext<T> {
fn from_context(ctx: T) -> Self {
Self {
address: Self::address_ptr(),
ctx,
}
}
fn address_ptr() -> *const VMFunctionBody {
Self::func_wrapper as *const () as *const VMFunctionBody
}
// This function wraps our func, to make it compatible with the
// reverse trampoline signature
unsafe fn func_wrapper(
// Note: we use the trick that the first param to this function is the `VMDynamicFunctionImportContext`
// itself, so rather than doing `dynamic_ctx: &VMDynamicFunctionImportContext<T>`, we simplify it a bit
&self,
caller_vmctx: *mut VMContext,
sig_index: SignatureIndex,
values_vec: *mut i128,
) {
use std::panic::{self, AssertUnwindSafe};
let result = panic::catch_unwind(AssertUnwindSafe(|| {
// This is actually safe, since right now the function signature
// receives two contexts:
// 1. `vmctx`: the context associated to where the function is defined.
// It will be `VMContext` in case is defined in Wasm, and a custom
// `Env` in case is host defined.
// 2. `caller_vmctx`: the context associated to whoever is calling that function.
//
// Because this code will only be reached when calling from wasm to host, we
// can assure the callee_vmctx is indeed a VMContext, and hence is completely
// safe to get a handle from it.
let handle = InstanceHandle::from_vmctx(caller_vmctx);
let module = handle.module_ref();
let func_ty = &module.signatures[sig_index];
let mut args = Vec::with_capacity(func_ty.params().len());
for (i, ty) in func_ty.params().iter().enumerate() {
args.push(Val::read_value_from(values_vec.add(i), *ty));
}
let returns = self.ctx.call(&args)?;
// We need to dynamically check that the returns
// match the expected types, as well as expected length.
let return_types = returns.iter().map(|ret| ret.ty()).collect::<Vec<_>>();
if return_types != func_ty.results() {
return Err(RuntimeError::new(format!(
"Dynamic function returned wrong signature. Expected {:?} but got {:?}",
func_ty.results(),
return_types
)));
}
for (i, ret) in returns.iter().enumerate() {
ret.write_value_to(values_vec.add(i));
}
Ok(())
}));
match result {
Ok(Ok(())) => {}
Ok(Err(trap)) => wasmer_runtime::raise_user_trap(Box::new(trap)),
Err(panic) => wasmer_runtime::resume_panic(panic),
}
}
}

View File

@@ -272,7 +272,7 @@ mod test {
// create a memory
let store = Store::default();
let memory_descriptor = MemoryType::new(1, Some(1), false);
let memory = Memory::new(&store, memory_descriptor);
let memory = Memory::new(&store, memory_descriptor).unwrap();
// test that basic access works and that len = 0 works, but oob does not
let start_wasm_ptr: WasmPtr<u8> = WasmPtr::new(0);

View File

@@ -64,6 +64,9 @@ impl ValAnyFunc for Val {
let export = wasmer_runtime::ExportFunction {
address: item.func_ptr,
signature: item.type_index,
// All functions in tables are already Static (as dynamic functions
// are converted to use the trampolines with static signatures).
kind: wasmer_runtime::VMFunctionKind::Static,
vmctx: item.vmctx,
};
let f = Function::from_export(store, export);

View File

@@ -4,7 +4,9 @@ use crate::address_map::get_function_address_map;
use crate::config::CraneliftConfig;
use crate::func_environ::{get_func_name, FuncEnvironment};
use crate::sink::{RelocSink, TrapSink};
use crate::trampoline::{make_wasm_trampoline, FunctionBuilderContext};
use crate::trampoline::{
make_trampoline_dynamic_function, make_trampoline_function_call, FunctionBuilderContext,
};
use crate::translator::{
compiled_function_unwind_info, signature_to_cranelift_ir, transform_jump_table, FuncTranslator,
};
@@ -14,7 +16,8 @@ use cranelift_codegen::{binemit, isa, Context};
use rayon::prelude::{IntoParallelRefIterator, ParallelIterator};
use wasm_common::entity::PrimaryMap;
use wasm_common::{
Features, FunctionType, LocalFunctionIndex, MemoryIndex, SignatureIndex, TableIndex,
Features, FunctionIndex, FunctionType, LocalFunctionIndex, MemoryIndex, SignatureIndex,
TableIndex,
};
use wasmer_compiler::CompileError;
use wasmer_compiler::{
@@ -155,15 +158,37 @@ impl Compiler for CraneliftCompiler {
Ok(Compilation::new(functions, custom_sections))
}
fn compile_wasm_trampolines(
fn compile_function_call_trampolines(
&self,
signatures: &[FunctionType],
) -> Result<Vec<FunctionBody>, CompileError> {
signatures
.par_iter()
.map_init(FunctionBuilderContext::new, |mut cx, sig| {
make_wasm_trampoline(&*self.isa, &mut cx, sig, std::mem::size_of::<u128>())
make_trampoline_function_call(&*self.isa, &mut cx, sig)
})
.collect::<Result<Vec<_>, CompileError>>()
}
fn compile_dynamic_function_trampolines(
&self,
module: &Module,
) -> Result<PrimaryMap<FunctionIndex, FunctionBody>, CompileError> {
use wasmer_runtime::VMOffsets;
let isa = self.isa();
let frontend_config = isa.frontend_config();
let offsets = VMOffsets::new(frontend_config.pointer_bytes(), module);
Ok(module
.functions
.values()
.take(module.num_imported_funcs)
.collect::<Vec<_>>()
.par_iter()
.map_init(FunctionBuilderContext::new, |mut cx, sig_index| {
make_trampoline_dynamic_function(&*self.isa, &module, &offsets, &mut cx, &sig_index)
})
.collect::<Result<Vec<_>, CompileError>>()?
.into_iter()
.collect::<PrimaryMap<FunctionIndex, FunctionBody>>())
}
}

View File

@@ -58,7 +58,7 @@ pub use crate::compiler::CraneliftCompiler;
pub use crate::config::CraneliftConfig;
pub use crate::debug::{FrameLayout, FrameLayoutChange, FrameLayouts};
pub use crate::debug::{ModuleMemoryOffset, ModuleVmctxInfo, ValueLabelsRanges};
pub use crate::trampoline::make_wasm_trampoline;
pub use crate::trampoline::make_trampoline_function_call;
/// Version number of this crate.
pub const VERSION: &str = env!("CARGO_PKG_VERSION");

View File

@@ -0,0 +1,146 @@
//! A trampoline generator for calling dynamic host functions from Wasm.
use super::binemit::TrampolineRelocSink;
use crate::translator::{compiled_function_unwind_info, signature_to_cranelift_ir};
use cranelift_codegen::ir::{
types, ExternalName, Function, InstBuilder, MemFlags, StackSlotData, StackSlotKind,
};
use cranelift_codegen::isa::TargetIsa;
use cranelift_codegen::print_errors::pretty_error;
use cranelift_codegen::Context;
use cranelift_codegen::{binemit, ir};
use std::cmp;
use std::mem;
use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext};
use wasm_common::entity::EntityRef;
use wasm_common::SignatureIndex;
use wasmer_compiler::{CompileError, FunctionBody};
use wasmer_runtime::{Module, VMOffsets};
/// Create a trampoline for invoking a WebAssembly function.
pub fn make_trampoline_dynamic_function(
isa: &dyn TargetIsa,
module: &Module,
offsets: &VMOffsets,
fn_builder_ctx: &mut FunctionBuilderContext,
sig_index: &SignatureIndex,
) -> Result<FunctionBody, CompileError> {
let func_type = &module.signatures[*sig_index];
let pointer_type = isa.pointer_type();
let frontend_config = isa.frontend_config();
let signature = signature_to_cranelift_ir(func_type, &frontend_config);
let mut stub_sig = ir::Signature::new(frontend_config.default_call_conv);
// Add the caller `vmctx` parameter.
stub_sig.params.push(ir::AbiParam::special(
pointer_type,
ir::ArgumentPurpose::VMContext,
));
// Add the caller/callee `vmctx` parameter.
stub_sig.params.push(ir::AbiParam::new(pointer_type));
// Add the `sig_index` parameter.
stub_sig.params.push(ir::AbiParam::new(types::I32));
// Add the `values_vec` parameter.
stub_sig.params.push(ir::AbiParam::new(pointer_type));
// Compute the size of the values vector. The vmctx and caller vmctx are passed separately.
let value_size = mem::size_of::<u128>();
let values_vec_len =
(value_size * cmp::max(signature.params.len() - 2, signature.returns.len())) as u32;
let mut context = Context::new();
context.func = Function::with_name_signature(ExternalName::user(0, 0), signature.clone());
let ss = context.func.create_stack_slot(StackSlotData::new(
StackSlotKind::ExplicitSlot,
values_vec_len,
));
{
let mut builder = FunctionBuilder::new(&mut context.func, fn_builder_ctx);
let block0 = builder.create_block();
builder.append_block_params_for_function_params(block0);
builder.switch_to_block(block0);
builder.seal_block(block0);
let values_vec_ptr_val = builder.ins().stack_addr(pointer_type, ss, 0);
let mflags = MemFlags::trusted();
// We only get the non-vmctx arguments
for i in 2..signature.params.len() {
let val = builder.func.dfg.block_params(block0)[i];
builder.ins().store(
mflags,
val,
values_vec_ptr_val,
((i - 2) * value_size) as i32,
);
}
let block_params = builder.func.dfg.block_params(block0);
let vmctx_ptr_val = block_params[0];
let caller_vmctx_ptr_val = block_params[1];
// Get the signature index
let caller_sig_id = builder.ins().iconst(types::I32, sig_index.index() as i64);
let callee_args = vec![
vmctx_ptr_val,
caller_vmctx_ptr_val,
caller_sig_id,
values_vec_ptr_val,
];
let new_sig = builder.import_signature(stub_sig);
let mem_flags = ir::MemFlags::trusted();
let callee_value = builder.ins().load(
pointer_type,
mem_flags,
vmctx_ptr_val,
offsets.vmdynamicfunction_import_context_address() as i32,
);
builder
.ins()
.call_indirect(new_sig, callee_value, &callee_args);
let mflags = MemFlags::trusted();
let mut results = Vec::new();
for (i, r) in signature.returns.iter().enumerate() {
let load = builder.ins().load(
r.value_type,
mflags,
values_vec_ptr_val,
(i * value_size) as i32,
);
results.push(load);
}
builder.ins().return_(&results);
builder.finalize()
}
let mut code_buf = Vec::new();
let mut reloc_sink = TrampolineRelocSink {};
let mut trap_sink = binemit::NullTrapSink {};
let mut stackmap_sink = binemit::NullStackmapSink {};
context
.compile_and_emit(
isa,
&mut code_buf,
&mut reloc_sink,
&mut trap_sink,
&mut stackmap_sink,
)
.map_err(|error| CompileError::Codegen(pretty_error(&context.func, Some(isa), error)))?;
let unwind_info = compiled_function_unwind_info(isa, &context);
Ok(FunctionBody {
body: code_buf,
unwind_info,
})
}

View File

@@ -15,15 +15,15 @@ use cranelift_codegen::print_errors::pretty_error;
use cranelift_codegen::Context;
use cranelift_codegen::{binemit, ir};
use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext};
use std::mem;
use wasm_common::FunctionType;
use wasmer_compiler::{CompileError, FunctionBody};
/// Create a trampoline for invoking a WebAssembly function.
pub fn make_wasm_trampoline(
pub fn make_trampoline_function_call(
isa: &dyn TargetIsa,
fn_builder_ctx: &mut FunctionBuilderContext,
func_type: &FunctionType,
value_size: usize,
) -> Result<FunctionBody, CompileError> {
let pointer_type = isa.pointer_type();
let frontend_config = isa.frontend_config();
@@ -49,6 +49,7 @@ pub fn make_wasm_trampoline(
context.func = ir::Function::with_name_signature(ir::ExternalName::user(0, 0), wrapper_sig);
context.func.collect_frame_layout_info();
let value_size = mem::size_of::<u128>();
{
let mut builder = FunctionBuilder::new(&mut context.func, fn_builder_ctx);
let block0 = builder.create_block();

View File

@@ -1,10 +1,10 @@
#![allow(missing_docs)]
// mod host;
mod wasm;
mod dynamic_function;
mod function_call;
// pub use host::make_host_trampoline;
pub use self::wasm::make_wasm_trampoline;
pub use self::dynamic_function::make_trampoline_dynamic_function;
pub use self::function_call::make_trampoline_function_call;
// TODO: Delete
pub mod ir {

View File

@@ -82,6 +82,7 @@ impl Compiler for LLVMCompiler {
.cloned()
.unwrap_or_else(|| format!("fn{}", func_index.index()));
}
let mut module_custom_sections = PrimaryMap::new();
let mut functions = function_body_inputs
.into_iter()
.collect::<Vec<(LocalFunctionIndex, &FunctionBodyData<'_>)>>()
@@ -101,46 +102,41 @@ impl Compiler for LLVMCompiler {
})
.collect::<Result<Vec<_>, CompileError>>()?
.into_iter()
.map(|(mut function, local_relocations, custom_sections)| {
/// We collect the sections data
for (local_idx, custom_section) in custom_sections.iter().enumerate() {
let local_idx = local_idx as u32;
// TODO: these section numbers are potentially wrong, if there's
// no Read and only a ReadExecute then ReadExecute is 0.
let (ref mut section, section_num) = match &custom_section.protection {
CustomSectionProtection::Read => {
(&mut readonly_section, SectionIndex::from_u32(0))
.map(|(mut compiled_function, mut function_custom_sections)| {
let first_section = module_custom_sections.len() as u32;
for (_, custom_section) in function_custom_sections.iter() {
// TODO: remove this call to clone()
let mut custom_section = custom_section.clone();
for mut reloc in &mut custom_section.relocations {
match reloc.reloc_target {
RelocationTarget::CustomSection(index) => {
reloc.reloc_target = RelocationTarget::CustomSection(
SectionIndex::from_u32(first_section + index.as_u32()),
)
}
};
let offset = section.bytes.len() as i64;
section.bytes.append(&custom_section.bytes);
// TODO: we're needlessly rescanning the whole list.
for local_relocation in &local_relocations {
if local_relocation.local_section_index == local_idx {
used_readonly_section = true;
function.relocations.push(Relocation {
kind: local_relocation.kind,
reloc_target: RelocationTarget::CustomSection(section_num),
offset: local_relocation.offset,
addend: local_relocation.addend + offset,
});
_ => {}
}
}
module_custom_sections.push(custom_section);
}
Ok(function)
for mut reloc in &mut compiled_function.relocations {
match reloc.reloc_target {
RelocationTarget::CustomSection(index) => {
reloc.reloc_target = RelocationTarget::CustomSection(
SectionIndex::from_u32(first_section + index.as_u32()),
)
}
_ => {}
}
}
compiled_function
})
.collect::<Result<Vec<_>, CompileError>>()?
.into_iter()
.collect::<PrimaryMap<LocalFunctionIndex, _>>();
let mut custom_sections = PrimaryMap::new();
if used_readonly_section {
custom_sections.push(readonly_section);
}
Ok(Compilation::new(functions, custom_sections))
Ok(Compilation::new(functions, module_custom_sections))
}
fn compile_wasm_trampolines(
fn compile_function_call_trampolines(
&self,
signatures: &[FunctionType],
) -> Result<Vec<FunctionBody>, CompileError> {
@@ -151,4 +147,12 @@ impl Compiler for LLVMCompiler {
})
.collect::<Result<Vec<_>, CompileError>>()
}
fn compile_dynamic_function_trampolines(
&self,
module: &Module,
) -> Result<PrimaryMap<FunctionIndex, FunctionBody>, CompileError> {
Ok(PrimaryMap::new())
// unimplemented!("Dynamic funciton trampolines not yet implemented");
}
}

View File

@@ -50,10 +50,22 @@ impl LLVMConfig {
// Override the default multi-value switch
features.multi_value = false;
let operating_system =
if target.triple().operating_system == wasmer_compiler::OperatingSystem::Darwin {
// LLVM detects static relocation + darwin + 64-bit and
// force-enables PIC because MachO doesn't support that
// combination. They don't check whether they're targeting
// MachO, they check whether the OS is set to Darwin.
//
// Since both linux and darwin use SysV ABI, this should work.
wasmer_compiler::OperatingSystem::Linux
} else {
target.triple().operating_system
};
let triple = Triple {
architecture: target.triple().architecture,
vendor: target.triple().vendor.clone(),
operating_system: target.triple().operating_system,
operating_system,
environment: target.triple().environment,
binary_format: target_lexicon::BinaryFormat::Elf,
};

View File

@@ -29,10 +29,12 @@ use inkwell::{
};
use smallvec::SmallVec;
use std::any::Any;
use std::collections::HashMap;
use std::collections::{HashMap, HashSet};
use std::convert::TryFrom;
use std::num::TryFromIntError;
use crate::config::LLVMConfig;
use wasm_common::entity::{EntityRef, PrimaryMap, SecondaryMap};
use wasm_common::entity::{PrimaryMap, SecondaryMap};
use wasm_common::{
FunctionIndex, FunctionType, GlobalIndex, LocalFunctionIndex, MemoryIndex, MemoryType,
Mutability, SignatureIndex, TableIndex, Type,
@@ -40,9 +42,9 @@ use wasm_common::{
use wasmer_compiler::wasmparser::{self, BinaryReader, MemoryImmediate, Operator};
use wasmer_compiler::{
to_wasm_error, wasm_unsupported, Addend, CodeOffset, CompileError, CompiledFunction,
CompiledFunctionFrameInfo, CustomSection, CustomSectionProtection, FunctionAddressMap,
FunctionBody, FunctionBodyData, InstructionAddressMap, Relocation, RelocationKind,
RelocationTarget, SectionBody, SourceLoc, WasmResult,
CompiledFunctionFrameInfo, CustomSection, CustomSectionProtection, CustomSections,
FunctionAddressMap, FunctionBody, FunctionBodyData, InstructionAddressMap, Relocation,
RelocationKind, RelocationTarget, SectionBody, SectionIndex, SourceLoc, WasmResult,
};
use wasmer_runtime::libcalls::LibCall;
use wasmer_runtime::Module as WasmerCompilerModule;
@@ -52,6 +54,30 @@ use wasmer_runtime::{MemoryPlan, MemoryStyle, TablePlan, VMBuiltinFunctionIndex,
use std::fs;
use std::io::Write;
use wasm_common::entity::entity_impl;
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)]
pub struct ElfSectionIndex(u32);
entity_impl!(ElfSectionIndex);
impl ElfSectionIndex {
pub fn is_undef(&self) -> bool {
self.as_u32() == goblin::elf::section_header::SHN_UNDEF
}
pub fn from_usize(value: usize) -> Result<Self, CompileError> {
match u32::try_from(value) {
Err(_) => Err(CompileError::Codegen(format!(
"elf section index {} does not fit in 32 bits",
value
))),
Ok(value) => Ok(ElfSectionIndex::from_u32(value)),
}
}
pub fn as_usize(&self) -> usize {
self.as_u32() as usize
}
}
// TODO
fn wptype_to_type(ty: wasmparser::Type) -> WasmResult<Type> {
match ty {
@@ -84,15 +110,6 @@ fn const_zero<'ctx>(ty: BasicTypeEnum<'ctx>) -> BasicValueEnum<'ctx> {
}
}
// Relocation against a per-function section.
#[derive(Debug)]
pub struct LocalRelocation {
pub kind: RelocationKind,
pub local_section_index: u32,
pub offset: CodeOffset,
pub addend: Addend,
}
impl FuncTranslator {
pub fn new() -> Self {
Self {
@@ -109,7 +126,7 @@ impl FuncTranslator {
memory_plans: &PrimaryMap<MemoryIndex, MemoryPlan>,
table_plans: &PrimaryMap<TableIndex, TablePlan>,
func_names: &SecondaryMap<FunctionIndex, String>,
) -> Result<(CompiledFunction, Vec<LocalRelocation>, Vec<CustomSection>), CompileError> {
) -> Result<(CompiledFunction, CustomSections), CompileError> {
let func_index = wasm_module.func_index(*local_func_index);
let func_name = &func_names[func_index];
let module_name = match wasm_module.name.as_ref() {
@@ -135,7 +152,7 @@ impl FuncTranslator {
// TODO: figure out how many bytes long vmctx is, and mark it dereferenceable. (no need to mark it nonnull once we do this.)
// TODO: mark vmctx nofree
func.set_personality_function(intrinsics.personality);
func.as_global_value().set_section("wasmer_function");
func.as_global_value().set_section(".wasmer_function");
let entry = self.ctx.append_basic_block(func, "entry");
let start_of_code = self.ctx.append_basic_block(func, "start_of_code");
@@ -165,7 +182,9 @@ impl FuncTranslator {
for idx in 0..wasm_fn_type.params().len() {
let ty = wasm_fn_type.params()[idx];
let ty = type_to_llvm(&intrinsics, ty);
let value = func.get_nth_param((idx + 2) as u32).unwrap();
let value = func
.get_nth_param((idx as u32).checked_add(2).unwrap())
.unwrap();
// TODO: don't interleave allocas and stores.
let alloca = cache_builder.build_alloca(ty, "param");
cache_builder.build_store(alloca, value);
@@ -335,74 +354,118 @@ impl FuncTranslator {
Some(name.unwrap())
};
let wasmer_function_idx = elf
// Build up a mapping from a section to its relocation sections.
let reloc_sections = elf.shdr_relocs.iter().fold(
HashMap::new(),
|mut map: HashMap<_, Vec<_>>, (section_index, reloc_section)| {
let target_section = elf.section_headers[*section_index].sh_info as usize;
let target_section = ElfSectionIndex::from_usize(target_section).unwrap();
map.entry(target_section).or_default().push(reloc_section);
map
},
);
let mut visited: HashSet<ElfSectionIndex> = HashSet::new();
let mut worklist: Vec<ElfSectionIndex> = Vec::new();
let mut section_targets: HashMap<ElfSectionIndex, RelocationTarget> = HashMap::new();
let wasmer_function_index = elf
.section_headers
.iter()
.enumerate()
.filter(|(_, section)| get_section_name(section) == Some("wasmer_function"))
.map(|(idx, _)| idx)
.take(1)
.filter(|(_, section)| get_section_name(section) == Some(".wasmer_function"))
.map(|(index, _)| index)
.collect::<Vec<_>>();
// TODO: handle errors here instead of asserting.
assert!(wasmer_function_idx.len() == 1);
let wasmer_function_idx = wasmer_function_idx[0];
let bytes = elf.section_headers[wasmer_function_idx].file_range();
let bytes = mem_buf_slice[bytes.start..bytes.end].to_vec();
let mut relocations = vec![];
let mut local_relocations = vec![];
let mut required_custom_sections = HashMap::new();
for (section_index, reloc_section) in &elf.shdr_relocs {
let section_name = get_section_name(&elf.section_headers[*section_index]);
if section_name == Some(".rel.rodata") || section_name == Some(".rela.rodata") {
return Err(CompileError::Codegen(
"jump tables not yet implemented".to_string(),
));
if wasmer_function_index.len() != 1 {
return Err(CompileError::Codegen(format!(
"found {} sections named .wasmer_function",
wasmer_function_index.len()
)));
}
if section_name != Some(".relawasmer_function")
&& section_name != Some(".relwasmer_function")
let wasmer_function_index = wasmer_function_index[0];
let wasmer_function_index = ElfSectionIndex::from_usize(wasmer_function_index)?;
let mut section_to_custom_section = HashMap::new();
section_targets.insert(
wasmer_function_index,
RelocationTarget::LocalFunc(*local_func_index),
);
let mut next_custom_section: u32 = 0;
let mut elf_section_to_target = |elf_section_index: ElfSectionIndex| {
*section_targets.entry(elf_section_index).or_insert_with(|| {
let next = SectionIndex::from_u32(next_custom_section);
section_to_custom_section.insert(elf_section_index, next);
let target = RelocationTarget::CustomSection(next);
next_custom_section += 1;
target
})
};
let section_bytes = |elf_section_index: ElfSectionIndex| {
let elf_section_index = elf_section_index.as_usize();
let byte_range = elf.section_headers[elf_section_index].file_range();
mem_buf_slice[byte_range.start..byte_range.end].to_vec()
};
// From elf section index to list of Relocations. Although we use a Vec,
// the order of relocations is not important.
let mut relocations: HashMap<ElfSectionIndex, Vec<Relocation>> = HashMap::new();
// Each iteration of this loop pulls a section and the relocations
// relocations that apply to it. We begin with the ".wasmer_function"
// section, and then parse all relocation sections that apply to that
// section. Those relocations may refer to additional sections which we
// then add to the worklist until we've visited the closure of
// everything needed to run the code in ".wasmer_function".
//
// `worklist` is the list of sections we have yet to visit. It never
// contains any duplicates or sections we've already visited. `visited`
// contains all the sections we've ever added to the worklist in a set
// so that we can quickly check whether a section is new before adding
// it to worklist. `section_to_custom_section` is filled in with all
// the sections we want to include.
worklist.push(wasmer_function_index);
visited.insert(wasmer_function_index);
while let Some(section_index) = worklist.pop() {
for reloc in reloc_sections
.get(&section_index)
.iter()
.flat_map(|inner| inner.iter().flat_map(|inner2| inner2.iter()))
{
continue;
}
for reloc in reloc_section.iter() {
let kind = match reloc.r_type {
// TODO: these constants are not per-arch, we'll need to
// make the whole match per-arch.
goblin::elf::reloc::R_X86_64_64 => RelocationKind::Abs8,
_ => unimplemented!("unknown relocation {}", reloc.r_type),
_ => {
return Err(CompileError::Codegen(format!(
"unknown ELF relocation {}",
reloc.r_type
)));
}
};
let offset = reloc.r_offset as u32;
let addend = reloc.r_addend.unwrap_or(0);
let target = reloc.r_sym;
// TODO: error handling
let target = elf.syms.get(target).unwrap();
if target.st_type() == goblin::elf::sym::STT_SECTION {
let len = required_custom_sections.len();
let entry = required_custom_sections.entry(target.st_shndx);
let local_section_index = *entry.or_insert(len) as _;
local_relocations.push(LocalRelocation {
kind,
local_section_index,
offset,
addend,
});
} else if target.st_type() == goblin::elf::sym::STT_FUNC
&& target.st_shndx == wasmer_function_idx
let elf_target = elf.syms.get(target).unwrap();
let elf_target_section = ElfSectionIndex::from_usize(elf_target.st_shndx)?;
let reloc_target = if elf_target.st_type() == goblin::elf::sym::STT_SECTION {
if visited.insert(elf_target_section) {
worklist.push(elf_target_section);
}
elf_section_to_target(elf_target_section)
} else if elf_target.st_type() == goblin::elf::sym::STT_FUNC
&& elf_target_section == wasmer_function_index
{
// This is a function referencing its own byte stream.
relocations.push(Relocation {
kind,
reloc_target: RelocationTarget::LocalFunc(*local_func_index),
offset,
addend,
});
} else if target.st_type() == goblin::elf::sym::STT_NOTYPE
&& target.st_shndx == goblin::elf::section_header::SHN_UNDEF as _
RelocationTarget::LocalFunc(*local_func_index)
} else if elf_target.st_type() == goblin::elf::sym::STT_NOTYPE
&& elf_target_section.is_undef()
{
// Not defined in this .o file. Maybe another local function?
let name = target.st_name;
let name = elf_target.st_name;
let name = elf.strtab.get(name).unwrap().unwrap();
if let Some((index, _)) =
func_names.iter().find(|(_, func_name)| *func_name == name)
@@ -410,70 +473,78 @@ impl FuncTranslator {
let local_index = wasm_module
.local_func_index(index)
.expect("Relocation to non-local function");
relocations.push(Relocation {
kind,
reloc_target: RelocationTarget::LocalFunc(local_index),
offset,
addend,
});
RelocationTarget::LocalFunc(local_index)
// Maybe a libcall then?
} else if let Some(libcall) = libcalls.get(name) {
relocations.push(Relocation {
kind,
reloc_target: RelocationTarget::LibCall(*libcall),
offset,
addend,
});
RelocationTarget::LibCall(*libcall)
} else {
unimplemented!("reference to unknown symbol {}", name);
}
} else {
unimplemented!("unknown relocation {:?} with target {:?}", reloc, target);
}
};
relocations
.entry(section_index)
.or_default()
.push(Relocation {
kind,
reloc_target,
offset,
addend,
});
}
}
let mut custom_sections = vec![];
custom_sections.resize(
required_custom_sections.len(),
let mut custom_sections = section_to_custom_section
.iter()
.map(|(elf_section_index, custom_section_index)| {
(
custom_section_index,
CustomSection {
protection: CustomSectionProtection::Read,
bytes: SectionBody::default(),
relocations: vec![],
bytes: SectionBody::new_with_vec(section_bytes(*elf_section_index)),
relocations: relocations
.remove_entry(elf_section_index)
.map_or(vec![], |(_, v)| v),
},
);
for (section_idx, local_section_idx) in required_custom_sections {
let bytes = elf.section_headers[section_idx as usize].file_range();
let bytes = &mem_buf_slice[bytes.start..bytes.end];
custom_sections[local_section_idx].bytes.extend(bytes);
}
)
})
.collect::<Vec<_>>();
custom_sections.sort_unstable_by_key(|a| a.0);
let custom_sections = custom_sections
.into_iter()
.map(|(_, v)| v)
.collect::<PrimaryMap<SectionIndex, _>>();
let function_body = FunctionBody {
body: section_bytes(wasmer_function_index),
unwind_info: None,
};
let address_map = FunctionAddressMap {
instructions: vec![InstructionAddressMap {
srcloc: SourceLoc::default(),
code_offset: 0,
code_len: bytes.len(),
code_len: function_body.body.len(),
}],
start_srcloc: SourceLoc::default(),
end_srcloc: SourceLoc::default(),
body_offset: 0,
body_len: bytes.len(),
body_len: function_body.body.len(),
};
Ok((
CompiledFunction {
body: FunctionBody {
body: bytes,
unwind_info: None,
},
body: function_body,
jt_offsets: SecondaryMap::new(),
relocations,
relocations: relocations
.remove_entry(&wasmer_function_index)
.map_or(vec![], |(_, v)| v),
frame_info: CompiledFunctionFrameInfo {
address_map,
traps: vec![],
},
},
local_relocations,
custom_sections,
))
}
@@ -2156,96 +2227,34 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
Operator::GlobalGet { global_index } => {
let global_index = GlobalIndex::from_u32(global_index);
let global_type = module.globals[global_index];
let global_value_type = global_type.ty;
// TODO: cache loads of const globals.
let _global_mutability = global_type.mutability;
let global_ptr =
if let Some(local_global_index) = module.local_global_index(global_index) {
let offset = self.vmoffsets.vmctx_vmglobal_definition(local_global_index);
let offset = intrinsics.i32_ty.const_int(offset.into(), false);
unsafe { builder.build_gep(*vmctx, &[offset], "") }
} else {
let offset = self.vmoffsets.vmctx_vmglobal_import(global_index);
let offset = intrinsics.i32_ty.const_int(offset.into(), false);
let global_ptr_ptr = unsafe { builder.build_gep(*vmctx, &[offset], "") };
let global_ptr_ptr = builder
.build_bitcast(global_ptr_ptr, intrinsics.i8_ptr_ty, "")
.into_pointer_value();
let global_ptr = builder.build_load(global_ptr_ptr, "");
builder
.build_bitcast(global_ptr, intrinsics.i8_ptr_ty, "")
.into_pointer_value()
};
let global_ptr = builder
.build_bitcast(
global_ptr,
type_to_llvm_ptr(&intrinsics, global_value_type),
"",
)
.into_pointer_value();
let value = builder.build_load(global_ptr, "");
// TODO: add TBAA info.
match ctx.global(global_index, intrinsics) {
GlobalCache::Const { value } => {
self.state.push1(value);
}
GlobalCache::Mut { ptr_to_value } => {
let value = builder.build_load(ptr_to_value, "");
// TODO: tbaa
self.state.push1(value);
}
}
}
Operator::GlobalSet { global_index } => {
let global_index = GlobalIndex::from_u32(global_index);
let global_type = module.globals[global_index];
let global_value_type = global_type.ty;
// Note that we don't check mutability, assuming that's already
// been checked by some other verifier.
let global_ptr =
if let Some(local_global_index) = module.local_global_index(global_index) {
let offset = self.vmoffsets.vmctx_vmglobal_definition(local_global_index);
let offset = intrinsics.i32_ty.const_int(offset.into(), false);
unsafe { builder.build_gep(*vmctx, &[offset], "") }
} else {
let offset = self.vmoffsets.vmctx_vmglobal_import(global_index);
let offset = intrinsics.i32_ty.const_int(offset.into(), false);
let global_ptr_ptr = unsafe { builder.build_gep(*vmctx, &[offset], "") };
let global_ptr_ptr = builder
.build_bitcast(global_ptr_ptr, intrinsics.i8_ptr_ty, "")
.into_pointer_value();
builder.build_load(global_ptr_ptr, "").into_pointer_value()
};
let global_ptr = builder
.build_bitcast(
global_ptr,
type_to_llvm_ptr(&intrinsics, global_value_type),
"",
)
.into_pointer_value();
let (value, info) = self.state.pop1_extra()?;
let value = apply_pending_canonicalization(builder, intrinsics, value, info);
builder.build_store(global_ptr, value);
// TODO: add TBAA info
/*
let (value, info) = self.state.pop1_extra()?;
let value = apply_pending_canonicalization(builder, intrinsics, value, info);
let index = GlobalIndex::from_u32(global_index);
let global_cache = ctx.global_cache(index, intrinsics, self.module);
match global_cache {
match ctx.global(global_index, intrinsics) {
GlobalCache::Const { value } => {
return Err(CompileError::Codegen(format!(
"global.set on immutable global index {}",
global_index.as_u32()
)))
}
GlobalCache::Mut { ptr_to_value } => {
let store = builder.build_store(ptr_to_value, value);
tbaa_label(
&self.module,
intrinsics,
"global",
store,
Some(global_index),
);
}
GlobalCache::Const { value: _ } => {
return Err(CompileError::Codegen("global is immutable".to_string()));
let (value, info) = self.state.pop1_extra()?;
let value =
apply_pending_canonicalization(builder, intrinsics, value, info);
builder.build_store(ptr_to_value, value);
// TODO: tbaa
}
}
*/
}
Operator::Select => {

View File

@@ -910,7 +910,7 @@ impl<'ctx, 'a> CtxType<'ctx, 'a> {
})
}
pub fn table_prepare(
fn table_prepare(
&mut self,
table_index: TableIndex,
intrinsics: &Intrinsics<'ctx>,
@@ -952,6 +952,9 @@ impl<'ctx, 'a> CtxType<'ctx, 'a> {
);
let ptr_to_bounds =
unsafe { cache_builder.build_gep(ctx_ptr_value, &[offset], "") };
let ptr_to_bounds = cache_builder
.build_bitcast(ptr_to_bounds, intrinsics.i32_ptr_ty, "")
.into_pointer_value();
(ptr_to_base_ptr, ptr_to_bounds)
} else {
let offset = intrinsics.i64_ty.const_int(
@@ -989,6 +992,9 @@ impl<'ctx, 'a> CtxType<'ctx, 'a> {
.const_int(offsets.vmtable_definition_current_elements().into(), false);
let ptr_to_bounds =
unsafe { cache_builder.build_gep(definition_ptr, &[offset], "") };
let ptr_to_bounds = cache_builder
.build_bitcast(ptr_to_bounds, intrinsics.i32_ptr_ty, "")
.into_pointer_value();
(ptr_to_base_ptr, ptr_to_bounds)
};
TableCache {
@@ -1008,10 +1014,14 @@ impl<'ctx, 'a> CtxType<'ctx, 'a> {
builder: &Builder<'ctx>,
) -> (PointerValue<'ctx>, IntValue<'ctx>) {
let (ptr_to_base_ptr, ptr_to_bounds) = self.table_prepare(index, intrinsics, module);
let base_ptr = builder
let base_ptr = self
.cache_builder
.build_load(ptr_to_base_ptr, "base_ptr")
.into_pointer_value();
let bounds = builder.build_load(ptr_to_bounds, "bounds").into_int_value();
let bounds = self
.cache_builder
.build_load(ptr_to_bounds, "bounds")
.into_int_value();
tbaa_label(
module,
intrinsics,
@@ -1078,106 +1088,59 @@ impl<'ctx, 'a> CtxType<'ctx, 'a> {
})
}
pub fn global_cache(
pub fn global(
&mut self,
index: GlobalIndex,
intrinsics: &Intrinsics<'ctx>,
module: &Module<'ctx>,
) -> GlobalCache<'ctx> {
let (cached_globals, ctx_ptr_value, wasm_module, cache_builder, offsets) = (
let (cached_globals, wasm_module, ctx_ptr_value, cache_builder, offsets) = (
&mut self.cached_globals,
self.ctx_ptr_value,
self.wasm_module,
self.ctx_ptr_value,
&self.cache_builder,
&self.offsets,
);
*cached_globals.entry(index).or_insert_with(|| {
let (globals_array_ptr_ptr, index, mutable, wasmer_ty, field_name) = {
let desc = wasm_module.globals.get(index).unwrap();
if let Some(_local_global_index) = wasm_module.local_global_index(index) {
(
unsafe {
cache_builder
.build_struct_gep(
ctx_ptr_value,
offset_to_index(offsets.vmctx_globals_begin()),
"globals_array_ptr_ptr",
)
.unwrap()
},
index.index() as u64,
desc.mutability,
desc.ty,
"context_field_ptr_to_local_globals",
)
let global_type = wasm_module.globals[index];
let global_value_type = global_type.ty;
let global_mutability = global_type.mutability;
let global_ptr = if let Some(local_global_index) = wasm_module.local_global_index(index)
{
let offset = offsets.vmctx_vmglobal_definition(local_global_index);
let offset = intrinsics.i32_ty.const_int(offset.into(), false);
unsafe { cache_builder.build_gep(ctx_ptr_value, &[offset], "") }
} else {
(
unsafe {
cache_builder
.build_struct_gep(
ctx_ptr_value,
offset_to_index(offsets.vmctx_imported_globals_begin()),
"globals_array_ptr_ptr",
let offset = offsets.vmctx_vmglobal_import(index);
let offset = intrinsics.i32_ty.const_int(offset.into(), false);
let global_ptr_ptr =
unsafe { cache_builder.build_gep(ctx_ptr_value, &[offset], "") };
let global_ptr_ptr = cache_builder
.build_bitcast(
global_ptr_ptr,
intrinsics.i32_ptr_ty.ptr_type(AddressSpace::Generic),
"",
)
.unwrap()
},
index.index() as u64,
desc.mutability,
desc.ty,
"context_field_ptr_to_imported_globals",
)
}
};
let llvm_ptr_ty = type_to_llvm_ptr(intrinsics, wasmer_ty);
let global_array_ptr = cache_builder
.build_load(globals_array_ptr_ptr, "global_array_ptr")
.into_pointer_value();
tbaa_label(
module,
intrinsics,
field_name,
global_array_ptr.as_instruction_value().unwrap(),
None,
);
let const_index = intrinsics.i32_ty.const_int(index, false);
let global_ptr_ptr = unsafe {
cache_builder.build_in_bounds_gep(
global_array_ptr,
&[const_index],
"global_ptr_ptr",
)
cache_builder
.build_load(global_ptr_ptr, "")
.into_pointer_value()
};
let global_ptr = cache_builder
.build_load(global_ptr_ptr, "global_ptr")
.build_bitcast(
global_ptr,
type_to_llvm_ptr(&intrinsics, global_value_type),
"",
)
.into_pointer_value();
tbaa_label(
module,
intrinsics,
"global_ptr",
global_ptr.as_instruction_value().unwrap(),
Some(index as u32),
);
let global_ptr_typed =
cache_builder.build_pointer_cast(global_ptr, llvm_ptr_ty, "global_ptr_typed");
let mutable = mutable == Mutability::Var;
if mutable {
GlobalCache::Mut {
ptr_to_value: global_ptr_typed,
}
} else {
let value = cache_builder.build_load(global_ptr_typed, "global_value");
tbaa_label(
module,
intrinsics,
"global",
value.as_instruction_value().unwrap(),
Some(index as u32),
);
GlobalCache::Const { value }
match global_mutability {
Mutability::Const => GlobalCache::Const {
value: cache_builder.build_load(global_ptr, ""),
},
Mutability::Var => GlobalCache::Mut {
ptr_to_value: global_ptr,
},
}
})
}

View File

@@ -8428,8 +8428,7 @@ pub fn gen_import_call_trampoline(
);
a.emit_host_redirection(GPR::RAX);
let mut section_body = SectionBody::default();
section_body.extend(&a.finalize().unwrap());
let section_body = SectionBody::new_with_vec(a.finalize().unwrap().to_vec());
CustomSection {
protection: CustomSectionProtection::ReadExecute,

View File

@@ -118,7 +118,7 @@ impl Compiler for SinglepassCompiler {
Ok(Compilation::new(functions, import_trampolines))
}
fn compile_wasm_trampolines(
fn compile_function_call_trampolines(
&self,
signatures: &[FunctionType],
) -> Result<Vec<FunctionBody>, CompileError> {
@@ -128,6 +128,14 @@ impl Compiler for SinglepassCompiler {
.map(gen_std_trampoline)
.collect())
}
fn compile_dynamic_function_trampolines(
&self,
module: &Module,
) -> Result<PrimaryMap<FunctionIndex, FunctionBody>, CompileError> {
Ok(PrimaryMap::new())
// unimplemented!("Dynamic funciton trampolines not yet implemented");
}
}
trait ToCompileError {

View File

@@ -9,7 +9,9 @@ use crate::target::Target;
use crate::FunctionBodyData;
use crate::ModuleTranslationState;
use wasm_common::entity::PrimaryMap;
use wasm_common::{Features, FunctionType, LocalFunctionIndex, MemoryIndex, TableIndex};
use wasm_common::{
Features, FunctionIndex, FunctionType, LocalFunctionIndex, MemoryIndex, TableIndex,
};
use wasmer_runtime::Module;
use wasmer_runtime::{MemoryPlan, TablePlan};
use wasmparser::{validate, OperatorValidatorConfig, ValidatingParserConfig};
@@ -80,8 +82,30 @@ pub trait Compiler {
/// let func = instance.exports.func("my_func");
/// func.call(&[Value::I32(1)]);
/// ```
fn compile_wasm_trampolines(
fn compile_function_call_trampolines(
&self,
signatures: &[FunctionType],
) -> Result<Vec<FunctionBody>, CompileError>;
/// Compile the trampolines to call a dynamic function defined in
/// a host, from a Wasm module.
///
/// This allows us to create dynamic Wasm functions, such as:
///
/// ```ignore
/// fn my_func(values: Vec<Val>) -> Vec<Val> {
/// // do something
/// }
///
/// let my_func_type = FuncType::new(vec![Type::I32], vec![Type::I32]);
/// let imports = imports!{
/// "namespace" => {
/// "my_func" => Func::new_dynamic(my_func_type, my_func),s
/// }
/// }
/// ```
fn compile_dynamic_function_trampolines(
&self,
module: &Module,
) -> Result<PrimaryMap<FunctionIndex, FunctionBody>, CompileError>;
}

View File

@@ -78,6 +78,7 @@ pub use crate::unwind::{CompiledFunctionUnwindInfo, FDERelocEntry, FunctionTable
pub use wasm_common::Features;
#[cfg(feature = "translator")]
/// wasmparser is exported as a module to slim compiler dependencies
pub mod wasmparser {
pub use wasmparser::*;

View File

@@ -25,7 +25,6 @@ pub enum CustomSectionProtection {
// We don't include `ReadWrite` here because it would complicate freeze
// and resumption of executing Modules.
/// A custom section with read and execute permissions.
ReadExecute,
}
@@ -56,14 +55,9 @@ pub struct CustomSection {
pub struct SectionBody(#[serde(with = "serde_bytes")] Vec<u8>);
impl SectionBody {
/// Extend the section with the bytes given.
pub fn extend(&mut self, contents: &[u8]) {
self.0.extend(contents);
}
/// Extends the section by appending bytes from another section.
pub fn append(&mut self, body: &Self) {
self.0.extend(&body.0);
/// Create a new section body with the given contents.
pub fn new_with_vec(contents: Vec<u8>) -> Self {
Self(contents)
}
/// Returns a raw pointer to the section's buffer.
@@ -83,6 +77,6 @@ impl SectionBody {
/// Returns whether or not the section body is empty.
pub fn is_empty(&self) -> bool {
self.len() == 0
self.0.is_empty()
}
}

View File

@@ -5,20 +5,19 @@ pub use target_lexicon::{Architecture, CallingConvention, OperatingSystem, Tripl
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use raw_cpuid::CpuId;
/// The nomenclature is inspired by the [raw-cpuid crate].
/// The nomenclature is inspired by the [`cpuid` crate].
/// The list of supported features was initially retrieved from
/// [cranelift-native].
/// [`cranelift-native`].
///
/// The `CpuFeature` enum vaues are likely to grow closer to the
/// original cpuid. However, we prefer to start small and grow from there.
/// The `CpuFeature` enum values are likely to grow closer to the
/// original `cpuid`. However, we prefer to start small and grow from there.
///
/// If you would like to use a flag that doesn't exist yet here, please
/// open a PR.
///
/// [cpuid crate]: https://docs.rs/cpuid/0.1.1/cpuid/enum.CpuFeature.html
/// [cranelift-native]: https://github.com/bytecodealliance/cranelift/blob/6988545fd20249b084c53f4761b8c861266f5d31/cranelift-native/src/lib.rs#L51-L92
#[allow(missing_docs)]
#[allow(clippy::derive_hash_xor_eq)]
/// [`cpuid` crate]: https://docs.rs/cpuid/0.1.1/cpuid/enum.CpuFeature.html
/// [`cranelift-native`]: https://github.com/bytecodealliance/cranelift/blob/6988545fd20249b084c53f4761b8c861266f5d31/cranelift-native/src/lib.rs#L51-L92
#[allow(missing_docs, clippy::derive_hash_xor_eq)]
#[derive(EnumSetType, Debug, Hash)]
pub enum CpuFeature {
// X86 features

View File

@@ -4,10 +4,9 @@ use crate::{CodeMemory, CompiledModule};
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use wasm_common::entity::PrimaryMap;
use wasm_common::{FunctionType, LocalFunctionIndex, MemoryIndex, SignatureIndex, TableIndex};
use wasm_common::{FunctionIndex, FunctionType, LocalFunctionIndex, SignatureIndex};
use wasmer_compiler::{
Compilation, CompileError, CustomSection, CustomSectionProtection, FunctionBody, SectionIndex,
Target,
CompileError, CustomSection, CustomSectionProtection, FunctionBody, SectionIndex, Target,
};
#[cfg(feature = "compiler")]
use wasmer_compiler::{Compiler, CompilerConfig};
@@ -42,7 +41,7 @@ impl JITEngine {
Self {
inner: Arc::new(Mutex::new(JITEngineInner {
compiler: Some(compiler),
trampolines: HashMap::new(),
function_call_trampolines: HashMap::new(),
code_memory: CodeMemory::new(),
signatures: SignatureRegistry::new(),
})),
@@ -68,7 +67,7 @@ impl JITEngine {
inner: Arc::new(Mutex::new(JITEngineInner {
#[cfg(feature = "compiler")]
compiler: None,
trampolines: HashMap::new(),
function_call_trampolines: HashMap::new(),
code_memory: CodeMemory::new(),
signatures: SignatureRegistry::new(),
})),
@@ -110,8 +109,8 @@ impl Engine for JITEngine {
}
/// Retrieves a trampoline given a signature
fn trampoline(&self, sig: VMSharedSignatureIndex) -> Option<VMTrampoline> {
self.compiler().trampoline(sig)
fn function_call_trampoline(&self, sig: VMSharedSignatureIndex) -> Option<VMTrampoline> {
self.compiler().function_call_trampoline(sig)
}
/// Validates a WebAssembly module
@@ -176,7 +175,7 @@ pub struct JITEngineInner {
#[cfg(feature = "compiler")]
compiler: Option<Box<dyn Compiler + Send>>,
/// Pointers to trampoline functions used to enter particular signatures
trampolines: HashMap<VMSharedSignatureIndex, VMTrampoline>,
function_call_trampolines: HashMap<VMSharedSignatureIndex, VMTrampoline>,
/// The code memory is responsible of publishing the compiled
/// functions to memory.
code_memory: CodeMemory,
@@ -237,8 +236,15 @@ impl JITEngineInner {
&mut self,
module: &Module,
functions: &PrimaryMap<LocalFunctionIndex, FunctionBody>,
trampolines: &PrimaryMap<SignatureIndex, FunctionBody>,
) -> Result<PrimaryMap<LocalFunctionIndex, *mut [VMFunctionBody]>, CompileError> {
function_call_trampolines: &PrimaryMap<SignatureIndex, FunctionBody>,
dynamic_function_trampolines: &PrimaryMap<FunctionIndex, FunctionBody>,
) -> Result<
(
PrimaryMap<LocalFunctionIndex, *mut [VMFunctionBody]>,
PrimaryMap<FunctionIndex, *const VMFunctionBody>,
),
CompileError,
> {
// Allocate all of the compiled functions into executable memory,
// copying over their contents.
let allocated_functions =
@@ -251,10 +257,10 @@ impl JITEngineInner {
))
})?;
for (sig_index, compiled_function) in trampolines.iter() {
for (sig_index, compiled_function) in function_call_trampolines.iter() {
let func_type = module.signatures.get(sig_index).unwrap();
let index = self.signatures.register(&func_type);
if self.trampolines.contains_key(&index) {
if self.function_call_trampolines.contains_key(&index) {
// We don't need to allocate the trampoline in case
// it's signature is already allocated.
continue;
@@ -264,16 +270,34 @@ impl JITEngineInner {
.allocate_for_function(&compiled_function)
.map_err(|message| {
CompileError::Resource(format!(
"failed to allocate memory for trampolines: {}",
"failed to allocate memory for function call trampolines: {}",
message
))
})?
.as_ptr();
let trampoline =
unsafe { std::mem::transmute::<*const VMFunctionBody, VMTrampoline>(ptr) };
self.trampolines.insert(index, trampoline);
self.function_call_trampolines.insert(index, trampoline);
}
Ok(allocated_functions)
let allocated_dynamic_function_trampolines = dynamic_function_trampolines
.values()
.map(|compiled_function| {
let ptr = self
.code_memory
.allocate_for_function(&compiled_function)
.map_err(|message| {
CompileError::Resource(format!(
"failed to allocate memory for dynamic function trampolines: {}",
message
))
})?
.as_ptr();
Ok(ptr)
})
.collect::<Result<PrimaryMap<FunctionIndex, _>, CompileError>>()?;
Ok((allocated_functions, allocated_dynamic_function_trampolines))
}
/// Make memory containing compiled code executable.
@@ -287,7 +311,7 @@ impl JITEngineInner {
}
/// Gets the trampoline pre-registered for a particular signature
pub fn trampoline(&self, sig: VMSharedSignatureIndex) -> Option<VMTrampoline> {
self.trampolines.get(&sig).cloned()
pub fn function_call_trampoline(&self, sig: VMSharedSignatureIndex) -> Option<VMTrampoline> {
self.function_call_trampolines.get(&sig).cloned()
}
}

View File

@@ -8,8 +8,8 @@ use std::any::Any;
use std::sync::{Arc, Mutex};
use wasm_common::entity::{BoxedSlice, PrimaryMap};
use wasm_common::{
DataInitializer, LocalFunctionIndex, MemoryIndex, OwnedDataInitializer, SignatureIndex,
TableIndex,
DataInitializer, FunctionIndex, LocalFunctionIndex, MemoryIndex, OwnedDataInitializer,
SignatureIndex, TableIndex,
};
use wasmer_compiler::CompileError;
#[cfg(feature = "compiler")]
@@ -30,6 +30,7 @@ pub struct CompiledModule {
serializable: SerializableModule,
finished_functions: BoxedSlice<LocalFunctionIndex, *mut [VMFunctionBody]>,
finished_dynamic_function_trampolines: BoxedSlice<FunctionIndex, *const VMFunctionBody>,
signatures: BoxedSlice<SignatureIndex, VMSharedSignatureIndex>,
frame_info_registration: Mutex<Option<Option<GlobalFrameInfoRegistration>>>,
}
@@ -75,11 +76,14 @@ impl CompiledModule {
.values()
.cloned()
.collect::<Vec<_>>();
let trampolines = compiler
.compile_wasm_trampolines(&func_types)?
let function_call_trampolines = compiler
.compile_function_call_trampolines(&func_types)?
.into_iter()
.collect::<PrimaryMap<SignatureIndex, _>>();
let dynamic_function_trampolines =
compiler.compile_dynamic_function_trampolines(&translation.module)?;
let data_initializers = translation
.data_initializers
.iter()
@@ -98,7 +102,8 @@ impl CompiledModule {
function_relocations: compilation.get_relocations(),
function_jt_offsets: compilation.get_jt_offsets(),
function_frame_info: frame_infos,
trampolines,
function_call_trampolines,
dynamic_function_trampolines,
custom_sections: compilation.get_custom_sections(),
custom_section_relocations: compilation.get_custom_section_relocations(),
};
@@ -146,10 +151,11 @@ impl CompiledModule {
jit_compiler: &mut JITEngineInner,
serializable: SerializableModule,
) -> Result<Self, CompileError> {
let finished_functions = jit_compiler.allocate(
let (finished_functions, finished_dynamic_function_trampolines) = jit_compiler.allocate(
&serializable.module,
&serializable.compilation.function_bodies,
&serializable.compilation.trampolines,
&serializable.compilation.function_call_trampolines,
&serializable.compilation.dynamic_function_trampolines,
)?;
let custom_sections =
jit_compiler.allocate_custom_sections(&serializable.compilation.custom_sections)?;
@@ -180,6 +186,8 @@ impl CompiledModule {
Ok(Self {
serializable,
finished_functions: finished_functions.into_boxed_slice(),
finished_dynamic_function_trampolines: finished_dynamic_function_trampolines
.into_boxed_slice(),
signatures: signatures.into_boxed_slice(),
frame_info_registration: Mutex::new(None),
})
@@ -211,6 +219,7 @@ impl CompiledModule {
&self.module(),
&sig_registry,
resolver,
&self.finished_dynamic_function_trampolines,
self.memory_plans(),
self.table_plans(),
)

View File

@@ -2,7 +2,8 @@ use serde::{Deserialize, Serialize};
use std::sync::Arc;
use wasm_common::entity::PrimaryMap;
use wasm_common::{
Features, LocalFunctionIndex, MemoryIndex, OwnedDataInitializer, SignatureIndex, TableIndex,
Features, FunctionIndex, LocalFunctionIndex, MemoryIndex, OwnedDataInitializer, SignatureIndex,
TableIndex,
};
use wasmer_compiler::{
CustomSection, FunctionBody, JumpTableOffsets, Relocation, SectionBody, SectionIndex,
@@ -21,7 +22,8 @@ pub struct SerializableCompilation {
// to allow lazy frame_info deserialization, we convert it to it's lazy binary
// format upon serialization.
pub function_frame_info: PrimaryMap<LocalFunctionIndex, SerializableFunctionFrameInfo>,
pub trampolines: PrimaryMap<SignatureIndex, FunctionBody>,
pub function_call_trampolines: PrimaryMap<SignatureIndex, FunctionBody>,
pub dynamic_function_trampolines: PrimaryMap<FunctionIndex, FunctionBody>,
pub custom_sections: PrimaryMap<SectionIndex, CustomSection>,
pub custom_section_relocations: PrimaryMap<SectionIndex, Vec<Relocation>>,
}

View File

@@ -24,7 +24,7 @@ pub trait Engine {
fn lookup_signature(&self, sig: VMSharedSignatureIndex) -> Option<FunctionType>;
/// Retrieves a trampoline given a signature
fn trampoline(&self, sig: VMSharedSignatureIndex) -> Option<VMTrampoline>;
fn function_call_trampoline(&self, sig: VMSharedSignatureIndex) -> Option<VMTrampoline>;
/// Validates a WebAssembly module
fn validate(&self, binary: &[u8]) -> Result<(), CompileError>;

View File

@@ -3,11 +3,11 @@
use crate::error::{ImportError, LinkError};
use more_asserts::assert_ge;
use wasm_common::entity::PrimaryMap;
use wasm_common::{ExternType, ImportIndex, MemoryIndex, TableIndex};
use wasm_common::entity::{BoxedSlice, EntityRef, PrimaryMap};
use wasm_common::{ExternType, FunctionIndex, ImportIndex, MemoryIndex, TableIndex};
use wasmer_runtime::{
Export, Imports, SignatureRegistry, VMFunctionImport, VMGlobalImport, VMMemoryImport,
VMTableImport,
Export, Imports, SignatureRegistry, VMFunctionBody, VMFunctionImport, VMFunctionKind,
VMGlobalImport, VMMemoryImport, VMTableImport,
};
use wasmer_runtime::{MemoryPlan, TablePlan};
@@ -91,6 +91,7 @@ pub fn resolve_imports(
module: &Module,
signatures: &SignatureRegistry,
resolver: &dyn Resolver,
finished_dynamic_function_trampolines: &BoxedSlice<FunctionIndex, *const VMFunctionBody>,
memory_plans: &PrimaryMap<MemoryIndex, MemoryPlan>,
_table_plans: &PrimaryMap<TableIndex, TablePlan>,
) -> Result<Imports, LinkError> {
@@ -122,8 +123,21 @@ pub fn resolve_imports(
}
match resolved {
Export::Function(ref f) => {
let address = match f.kind {
VMFunctionKind::Dynamic => {
// If this is a dynamic imported function,
// the address of the funciton is the address of the
// reverse trampoline.
let index = FunctionIndex::new(function_imports.len());
finished_dynamic_function_trampolines[index]
// TODO: We should check that the f.vmctx actually matches
// the shape of `VMDynamicFunctionImportContext`
}
VMFunctionKind::Static => f.address,
};
function_imports.push(VMFunctionImport {
body: f.address,
body: address,
vmctx: f.vmctx,
});
}

View File

@@ -2,8 +2,8 @@ use crate::memory::LinearMemory;
use crate::module::{MemoryPlan, TablePlan};
use crate::table::Table;
use crate::vmcontext::{
VMContext, VMFunctionBody, VMGlobalDefinition, VMMemoryDefinition, VMSharedSignatureIndex,
VMTableDefinition,
VMContext, VMFunctionBody, VMFunctionKind, VMGlobalDefinition, VMMemoryDefinition,
VMSharedSignatureIndex, VMTableDefinition,
};
use wasm_common::GlobalType;
@@ -34,6 +34,8 @@ pub struct ExportFunction {
///
/// Note that this indexes within the module associated with `vmctx`.
pub signature: VMSharedSignatureIndex,
/// The function kind (it defines how it's the signature that provided `address` have)
pub kind: VMFunctionKind,
}
impl From<ExportFunction> for Export {

View File

@@ -8,8 +8,8 @@ use crate::table::Table;
use crate::trap::{catch_traps, init_traps, Trap, TrapCode};
use crate::vmcontext::{
VMBuiltinFunctionsArray, VMCallerCheckedAnyfunc, VMContext, VMFunctionBody, VMFunctionImport,
VMGlobalDefinition, VMGlobalImport, VMMemoryDefinition, VMMemoryImport, VMSharedSignatureIndex,
VMTableDefinition, VMTableImport,
VMFunctionKind, VMGlobalDefinition, VMGlobalImport, VMMemoryDefinition, VMMemoryImport,
VMSharedSignatureIndex, VMTableDefinition, VMTableImport,
};
use crate::{ExportFunction, ExportGlobal, ExportMemory, ExportTable};
use crate::{Module, TableElements, VMOffsets};
@@ -294,6 +294,11 @@ impl Instance {
};
ExportFunction {
address,
// Any function received is already static at this point as:
// 1. All locally defined functions in the Wasm have a static signature.
// 2. All the imported functions are already static (because
// they point to the trampolines rather than the dynamic addresses).
kind: VMFunctionKind::Static,
signature,
vmctx,
}

View File

@@ -50,9 +50,10 @@ pub use crate::sig_registry::SignatureRegistry;
pub use crate::table::Table;
pub use crate::trap::*;
pub use crate::vmcontext::{
VMBuiltinFunctionIndex, VMCallerCheckedAnyfunc, VMContext, VMFunctionBody, VMFunctionImport,
VMGlobalDefinition, VMGlobalImport, VMMemoryDefinition, VMMemoryImport, VMSharedSignatureIndex,
VMTableDefinition, VMTableImport, VMTrampoline,
VMBuiltinFunctionIndex, VMCallerCheckedAnyfunc, VMContext, VMDynamicFunctionImportContext,
VMFunctionBody, VMFunctionImport, VMFunctionKind, VMGlobalDefinition, VMGlobalImport,
VMMemoryDefinition, VMMemoryImport, VMSharedSignatureIndex, VMTableDefinition, VMTableImport,
VMTrampoline,
};
pub use crate::vmoffsets::{TargetSharedSignatureIndex, VMOffsets};

View File

@@ -46,6 +46,52 @@ mod test_vmfunction_import {
}
}
/// The `VMDynamicFunctionImportContext` is the context that dynamic
/// functions will receive when called (rather than `vmctx`).
/// A dynamic function is a function for which we don't know the signature
/// until runtime.
///
/// As such, we need to expose the dynamic function `context`
/// containing the relevant context for running the function indicated
/// in `address`.
#[repr(C)]
pub struct VMDynamicFunctionImportContext<T: Sized> {
/// The address of the inner dynamic function.
///
/// Note: The function must be on the form of
/// `(*mut T, *mut VMContext, SignatureIndex, *mut i128)`.
pub address: *const VMFunctionBody,
/// The context that the inner dynamic function will receive.
pub ctx: T,
}
#[cfg(test)]
mod test_vmdynamicfunction_import_context {
use super::VMDynamicFunctionImportContext;
use crate::{Module, VMOffsets};
use memoffset::offset_of;
use std::mem::size_of;
#[test]
fn check_vmdynamicfunction_import_context_offsets() {
let module = Module::new();
let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
assert_eq!(
size_of::<VMDynamicFunctionImportContext<usize>>(),
usize::from(offsets.size_of_vmdynamicfunction_import_context())
);
assert_eq!(
offset_of!(VMDynamicFunctionImportContext<usize>, address),
usize::from(offsets.vmdynamicfunction_import_context_address())
);
assert_eq!(
offset_of!(VMDynamicFunctionImportContext<usize>, ctx),
usize::from(offsets.vmdynamicfunction_import_context_ctx())
);
}
}
/// A placeholder byte-sized type which is just used to provide some amount of type
/// safety when dealing with pointers to JIT-compiled function bodies. Note that it's
/// deliberately not Copy, as we shouldn't be carelessly copying function body bytes
@@ -64,6 +110,26 @@ mod test_vmfunction_body {
}
}
/// A function kind.
#[derive(Debug, Copy, Clone, PartialEq)]
#[repr(C)]
pub enum VMFunctionKind {
/// A function is static when it's address matches the signature:
/// (vmctx, vmctx, arg1, arg2...) -> (result1, result2, ...)
///
/// This is the default for functions that are defined:
/// 1. In the Host, natively
/// 2. In the WebAssembly file
Static,
/// A function is dynamic when it's address matches the signature:
/// (ctx, &[Type]) -> Vec<Type>
///
/// This is the default for functions that are defined:
/// 1. In the Host, dynamically
Dynamic,
}
/// The fields compiled code needs to access to utilize a WebAssembly table
/// imported from another instance.
#[derive(Debug, Copy, Clone)]

View File

@@ -92,6 +92,30 @@ impl VMOffsets {
}
}
/// Offsets for [`VMDynamicFunctionImportContext`].
///
/// [`VMDynamicFunctionImportContext`]: crate::vmcontext::VMDynamicFunctionImportContext
impl VMOffsets {
/// The offset of the `address` field.
#[allow(clippy::erasing_op)]
pub fn vmdynamicfunction_import_context_address(&self) -> u8 {
0 * self.pointer_size
}
/// The offset of the `ctx` field.
#[allow(clippy::identity_op)]
pub fn vmdynamicfunction_import_context_ctx(&self) -> u8 {
1 * self.pointer_size
}
/// Return the size of [`VMDynamicFunctionImportContext`].
///
/// [`VMDynamicFunctionImportContext`]: crate::vmcontext::VMDynamicFunctionImportContext
pub fn size_of_vmdynamicfunction_import_context(&self) -> u8 {
2 * self.pointer_size
}
}
/// Offsets for `*const VMFunctionBody`.
impl VMOffsets {
/// The size of the `current_elements` field.

View File

@@ -55,6 +55,7 @@ entity_impl!(MemoryIndex);
/// Index type of a signature (imported or local) inside the WebAssembly module.
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)]
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
#[repr(transparent)]
pub struct SignatureIndex(u32);
entity_impl!(SignatureIndex);

View File

@@ -261,39 +261,26 @@ pub struct FunctionBody(*mut u8);
/// Represents a function that can be used by WebAssembly.
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
pub struct Func<Args = (), Rets = (), Env = ()> {
pub struct Func<Args = (), Rets = ()> {
address: *const FunctionBody,
env: Option<*mut Env>,
_phantom: PhantomData<(Args, Rets)>,
}
unsafe impl<Args, Rets> Send for Func<Args, Rets> {}
impl<Args, Rets, Env> Func<Args, Rets, Env>
impl<Args, Rets> Func<Args, Rets>
where
Args: WasmTypeList,
Rets: WasmTypeList,
Env: Sized,
{
/// Creates a new `Func`.
pub fn new<F>(func: F) -> Self
pub fn new<F, T, E>(func: F) -> Self
where
F: HostFunction<Args, Rets, WithoutEnv, Env>,
F: HostFunction<Args, Rets, T, E>,
T: HostFunctionKind,
E: Sized,
{
Self {
env: None,
address: func.to_raw(),
_phantom: PhantomData,
}
}
/// Creates a new `Func` with a given `env`.
pub fn new_env<F>(env: &mut Env, func: F) -> Self
where
F: HostFunction<Args, Rets, WithEnv, Env>,
{
Self {
env: Some(env),
address: func.to_raw(),
_phantom: PhantomData,
}
@@ -304,11 +291,6 @@ where
FunctionType::new(Args::wasm_types(), Rets::wasm_types())
}
/// Get the type of the Func
pub fn env(&self) -> Option<*mut Env> {
self.env
}
/// Get the address of the Func
pub fn address(&self) -> *const FunctionBody {
self.address