mirror of
https://github.com/mii443/wasmer.git
synced 2025-12-11 07:08:21 +00:00
Added WasmPtr and MemoryView to the wasmer API
This commit is contained in:
@@ -1,4 +1,5 @@
|
||||
use crate::exports::{ExportError, Exportable};
|
||||
use crate::memory_view::MemoryView;
|
||||
use crate::store::{Store, StoreObject};
|
||||
use crate::types::{Val, ValAnyFunc};
|
||||
use crate::Mutability;
|
||||
@@ -6,7 +7,7 @@ use crate::RuntimeError;
|
||||
use crate::{ExternType, FuncType, GlobalType, MemoryType, TableType, ValType};
|
||||
use std::cmp::max;
|
||||
use std::slice;
|
||||
use wasm_common::{Bytes, HostFunction, Pages, WasmTypeList, WithEnv, WithoutEnv};
|
||||
use wasm_common::{Bytes, HostFunction, Pages, ValueType, WasmTypeList, WithEnv, WithoutEnv};
|
||||
use wasmer_runtime::{
|
||||
wasmer_call_trampoline, Export, ExportFunction, ExportGlobal, ExportMemory, ExportTable,
|
||||
Table as RuntimeTable, VMCallerCheckedAnyfunc, VMContext, VMFunctionBody, VMGlobalDefinition,
|
||||
@@ -393,6 +394,44 @@ impl Memory {
|
||||
pub fn grow(&self, delta: Pages) -> Result<Pages, RuntimeError> {
|
||||
Ok(unsafe { (&*self.exported.from) }.grow(delta).unwrap())
|
||||
}
|
||||
/// Return a "view" of the currently accessible memory. By
|
||||
/// default, the view is unsynchronized, using regular memory
|
||||
/// accesses. You can force a memory view to use atomic accesses
|
||||
/// by calling the [`MemoryView::atomically`] method.
|
||||
///
|
||||
/// # Notes:
|
||||
///
|
||||
/// This method is safe (as in, it won't cause the host to crash or have UB),
|
||||
/// but it doesn't obey rust's rules involving data races, especially concurrent ones.
|
||||
/// Therefore, if this memory is shared between multiple threads, a single memory
|
||||
/// location can be mutated concurrently without synchronization.
|
||||
///
|
||||
/// # Usage:
|
||||
///
|
||||
/// ```
|
||||
/// # use wasmer::{Memory, MemoryView};
|
||||
/// # use std::{cell::Cell, sync::atomic::Ordering};
|
||||
/// # fn view_memory(memory: Memory) {
|
||||
/// // Without synchronization.
|
||||
/// let view: MemoryView<u8> = memory.view();
|
||||
/// for byte in view[0x1000 .. 0x1010].iter().map(Cell::get) {
|
||||
/// println!("byte: {}", byte);
|
||||
/// }
|
||||
///
|
||||
/// // With synchronization.
|
||||
/// let atomic_view = view.atomically();
|
||||
/// for byte in atomic_view[0x1000 .. 0x1010].iter().map(|atom| atom.load(Ordering::SeqCst)) {
|
||||
/// println!("byte: {}", byte);
|
||||
/// }
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn view<T: ValueType>(&self) -> MemoryView<T> {
|
||||
let base = self.data_ptr();
|
||||
|
||||
let length = self.size().bytes().0 / std::mem::size_of::<T>();
|
||||
|
||||
unsafe { MemoryView::new(base as _, length as u32) }
|
||||
}
|
||||
|
||||
pub(crate) fn from_export(store: &Store, wasmer_export: ExportMemory) -> Memory {
|
||||
Memory {
|
||||
|
||||
@@ -30,6 +30,7 @@ impl Instance {
|
||||
/// The [`ImportObject`] is the easiest way to provide imports to the instance.
|
||||
///
|
||||
/// ```
|
||||
/// # use wasmer::{imports, Store, Module, Global, Instance};
|
||||
/// let store = Store::default();
|
||||
/// let module = Module::new(store, "(module)");
|
||||
/// let imports = imports!{
|
||||
|
||||
@@ -5,7 +5,9 @@ mod exports;
|
||||
mod externals;
|
||||
mod import_object;
|
||||
mod instance;
|
||||
mod memory_view;
|
||||
mod module;
|
||||
mod ptr;
|
||||
mod store;
|
||||
mod types;
|
||||
|
||||
@@ -13,7 +15,9 @@ pub use crate::exports::{ExportError, Exportable, Exports};
|
||||
pub use crate::externals::{Extern, Func, Global, Memory, Table};
|
||||
pub use crate::import_object::{ImportObject, ImportObjectIterator, LikeNamespace};
|
||||
pub use crate::instance::Instance;
|
||||
pub use crate::memory_view::MemoryView;
|
||||
pub use crate::module::Module;
|
||||
pub use crate::ptr::{Array, Item, WasmPtr};
|
||||
pub use crate::store::{Engine, Store, StoreObject};
|
||||
pub use crate::types::{
|
||||
AnyRef, ExportType, ExternType, FuncType, GlobalType, HostInfo, HostRef, ImportType,
|
||||
|
||||
96
lib/api/src/memory_view.rs
Normal file
96
lib/api/src/memory_view.rs
Normal file
@@ -0,0 +1,96 @@
|
||||
use wasm_common::ValueType;
|
||||
|
||||
use std::sync::atomic::{
|
||||
AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicU16, AtomicU32, AtomicU64, AtomicU8,
|
||||
};
|
||||
use std::{cell::Cell, marker::PhantomData, ops::Deref, slice};
|
||||
|
||||
pub trait Atomic {
|
||||
type Output;
|
||||
}
|
||||
impl Atomic for i8 {
|
||||
type Output = AtomicI8;
|
||||
}
|
||||
impl Atomic for i16 {
|
||||
type Output = AtomicI16;
|
||||
}
|
||||
impl Atomic for i32 {
|
||||
type Output = AtomicI32;
|
||||
}
|
||||
impl Atomic for i64 {
|
||||
type Output = AtomicI64;
|
||||
}
|
||||
impl Atomic for u8 {
|
||||
type Output = AtomicU8;
|
||||
}
|
||||
impl Atomic for u16 {
|
||||
type Output = AtomicU16;
|
||||
}
|
||||
impl Atomic for u32 {
|
||||
type Output = AtomicU32;
|
||||
}
|
||||
impl Atomic for u64 {
|
||||
type Output = AtomicU64;
|
||||
}
|
||||
impl Atomic for f32 {
|
||||
type Output = AtomicU32;
|
||||
}
|
||||
impl Atomic for f64 {
|
||||
type Output = AtomicU64;
|
||||
}
|
||||
|
||||
/// A trait that represants an atomic type.
|
||||
pub trait Atomicity {}
|
||||
/// Atomically.
|
||||
pub struct Atomically;
|
||||
impl Atomicity for Atomically {}
|
||||
/// Non-atomically.
|
||||
pub struct NonAtomically;
|
||||
impl Atomicity for NonAtomically {}
|
||||
|
||||
/// A view into a memory.
|
||||
pub struct MemoryView<'a, T: 'a, A = NonAtomically> {
|
||||
ptr: *mut T,
|
||||
length: usize,
|
||||
_phantom: PhantomData<(&'a [Cell<T>], A)>,
|
||||
}
|
||||
|
||||
impl<'a, T> MemoryView<'a, T, NonAtomically>
|
||||
where
|
||||
T: ValueType,
|
||||
{
|
||||
pub(super) unsafe fn new(ptr: *mut T, length: u32) -> Self {
|
||||
Self {
|
||||
ptr,
|
||||
length: length as usize,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Atomic> MemoryView<'a, T> {
|
||||
/// Get atomic access to a memory view.
|
||||
pub fn atomically(&self) -> MemoryView<'a, T::Output, Atomically> {
|
||||
MemoryView {
|
||||
ptr: self.ptr as *mut T::Output,
|
||||
length: self.length,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Deref for MemoryView<'a, T, NonAtomically> {
|
||||
type Target = [Cell<T>];
|
||||
fn deref(&self) -> &[Cell<T>] {
|
||||
let mut_slice: &mut [T] = unsafe { slice::from_raw_parts_mut(self.ptr, self.length) };
|
||||
let cell_slice: &Cell<[T]> = Cell::from_mut(mut_slice);
|
||||
cell_slice.as_slice_of_cells()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Deref for MemoryView<'a, T, Atomically> {
|
||||
type Target = [T];
|
||||
fn deref(&self) -> &[T] {
|
||||
unsafe { slice::from_raw_parts(self.ptr as *const T, self.length) }
|
||||
}
|
||||
}
|
||||
351
lib/api/src/ptr.rs
Normal file
351
lib/api/src/ptr.rs
Normal file
@@ -0,0 +1,351 @@
|
||||
//! Types for a reusable pointer abstraction for accessing Wasm linear memory.
|
||||
//!
|
||||
//! This abstraction is safe: it ensures the memory is in bounds and that the pointer
|
||||
//! is aligned (avoiding undefined behavior).
|
||||
//!
|
||||
//! Therefore, you should use this abstraction whenever possible to avoid memory
|
||||
//! related bugs when implementing an ABI.
|
||||
|
||||
use crate::externals::Memory;
|
||||
use wasm_common::{ValueType, WasmExternType};
|
||||
|
||||
use std::{cell::Cell, fmt, marker::PhantomData, mem};
|
||||
|
||||
/// The `Array` marker type. This type can be used like `WasmPtr<T, Array>`
|
||||
/// to get access to methods
|
||||
pub struct Array;
|
||||
/// The `Item` marker type. This is the default and does not usually need to be
|
||||
/// specified.
|
||||
pub struct Item;
|
||||
|
||||
/// A zero-cost type that represents a pointer to something in Wasm linear
|
||||
/// memory.
|
||||
///
|
||||
/// This type can be used directly in the host function arguments:
|
||||
/// ```
|
||||
/// # use wasmer::Memory;
|
||||
/// # use wasmer::WasmPtr;
|
||||
/// pub fn host_import(memory: Memory, ptr: WasmPtr<u32>) {
|
||||
/// let derefed_ptr = ptr.deref(memory).expect("pointer in bounds");
|
||||
/// let inner_val: u32 = derefed_ptr.get();
|
||||
/// println!("Got {} from Wasm memory address 0x{:X}", inner_val, ptr.offset());
|
||||
/// // update the value being pointed to
|
||||
/// derefed_ptr.set(inner_val + 1);
|
||||
/// }
|
||||
/// ```
|
||||
#[repr(transparent)]
|
||||
pub struct WasmPtr<T: Copy, Ty = Item> {
|
||||
offset: u32,
|
||||
_phantom: PhantomData<(T, Ty)>,
|
||||
}
|
||||
|
||||
/// Methods relevant to all types of `WasmPtr`.
|
||||
impl<T: Copy, Ty> WasmPtr<T, Ty> {
|
||||
/// Create a new `WasmPtr` at the given offset.
|
||||
#[inline]
|
||||
pub fn new(offset: u32) -> Self {
|
||||
Self {
|
||||
offset,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the offset into Wasm linear memory for this `WasmPtr`.
|
||||
#[inline]
|
||||
pub fn offset(self) -> u32 {
|
||||
self.offset
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn align_pointer(ptr: usize, align: usize) -> usize {
|
||||
// clears bits below aligment amount (assumes power of 2) to align pointer
|
||||
debug_assert!(align.count_ones() == 1);
|
||||
ptr & !(align - 1)
|
||||
}
|
||||
|
||||
/// Methods for `WasmPtr`s to data that can be dereferenced, namely to types
|
||||
/// that implement [`ValueType`], meaning that they're valid for all possible
|
||||
/// bit patterns.
|
||||
impl<T: Copy + ValueType> WasmPtr<T, Item> {
|
||||
/// Dereference the `WasmPtr` getting access to a `&Cell<T>` allowing for
|
||||
/// reading and mutating of the inner value.
|
||||
///
|
||||
/// This method is unsound if used with unsynchronized shared memory.
|
||||
/// If you're unsure what that means, it likely does not apply to you.
|
||||
/// This invariant will be enforced in the future.
|
||||
#[inline]
|
||||
pub fn deref<'a>(self, memory: &'a Memory) -> Option<&'a Cell<T>> {
|
||||
if (self.offset as usize) + mem::size_of::<T>() > memory.size().bytes().0
|
||||
|| mem::size_of::<T>() == 0
|
||||
{
|
||||
return None;
|
||||
}
|
||||
unsafe {
|
||||
let cell_ptr = align_pointer(
|
||||
memory.view::<u8>().as_ptr().add(self.offset as usize) as usize,
|
||||
mem::align_of::<T>(),
|
||||
) as *const Cell<T>;
|
||||
Some(&*cell_ptr)
|
||||
}
|
||||
}
|
||||
|
||||
/// Mutably dereference this `WasmPtr` getting a `&mut Cell<T>` allowing for
|
||||
/// direct access to a `&mut T`.
|
||||
///
|
||||
/// # Safety
|
||||
/// - This method does not do any aliasing checks: it's possible to create
|
||||
/// `&mut T` that point to the same memory. You should ensure that you have
|
||||
/// exclusive access to Wasm linear memory before calling this method.
|
||||
#[inline]
|
||||
pub unsafe fn deref_mut<'a>(self, memory: &'a Memory) -> Option<&'a mut Cell<T>> {
|
||||
if (self.offset as usize) + mem::size_of::<T>() > memory.size().bytes().0
|
||||
|| mem::size_of::<T>() == 0
|
||||
{
|
||||
return None;
|
||||
}
|
||||
let cell_ptr = align_pointer(
|
||||
memory.view::<u8>().as_ptr().add(self.offset as usize) as usize,
|
||||
mem::align_of::<T>(),
|
||||
) as *mut Cell<T>;
|
||||
Some(&mut *cell_ptr)
|
||||
}
|
||||
}
|
||||
|
||||
/// Methods for `WasmPtr`s to arrays of data that can be dereferenced, namely to
|
||||
/// types that implement [`ValueType`], meaning that they're valid for all
|
||||
/// possible bit patterns.
|
||||
impl<T: Copy + ValueType> WasmPtr<T, Array> {
|
||||
/// Dereference the `WasmPtr` getting access to a `&[Cell<T>]` allowing for
|
||||
/// reading and mutating of the inner values.
|
||||
///
|
||||
/// This method is unsound if used with unsynchronized shared memory.
|
||||
/// If you're unsure what that means, it likely does not apply to you.
|
||||
/// This invariant will be enforced in the future.
|
||||
#[inline]
|
||||
pub fn deref(self, memory: &Memory, index: u32, length: u32) -> Option<&[Cell<T>]> {
|
||||
// gets the size of the item in the array with padding added such that
|
||||
// for any index, we will always result an aligned memory access
|
||||
let item_size = mem::size_of::<T>() + (mem::size_of::<T>() % mem::align_of::<T>());
|
||||
let slice_full_len = index as usize + length as usize;
|
||||
let memory_size = memory.size().bytes().0;
|
||||
|
||||
if (self.offset as usize) + (item_size * slice_full_len) > memory_size
|
||||
|| self.offset as usize >= memory_size
|
||||
|| mem::size_of::<T>() == 0
|
||||
{
|
||||
return None;
|
||||
}
|
||||
|
||||
unsafe {
|
||||
let cell_ptr = align_pointer(
|
||||
memory.view::<u8>().as_ptr().add(self.offset as usize) as usize,
|
||||
mem::align_of::<T>(),
|
||||
) as *const Cell<T>;
|
||||
let cell_ptrs = &std::slice::from_raw_parts(cell_ptr, slice_full_len)
|
||||
[index as usize..slice_full_len];
|
||||
Some(cell_ptrs)
|
||||
}
|
||||
}
|
||||
|
||||
/// Mutably dereference this `WasmPtr` getting a `&mut [Cell<T>]` allowing for
|
||||
/// direct access to a `&mut [T]`.
|
||||
///
|
||||
/// # Safety
|
||||
/// - This method does not do any aliasing checks: it's possible to create
|
||||
/// `&mut T` that point to the same memory. You should ensure that you have
|
||||
/// exclusive access to Wasm linear memory before calling this method.
|
||||
#[inline]
|
||||
pub unsafe fn deref_mut(
|
||||
self,
|
||||
memory: &Memory,
|
||||
index: u32,
|
||||
length: u32,
|
||||
) -> Option<&mut [Cell<T>]> {
|
||||
// gets the size of the item in the array with padding added such that
|
||||
// for any index, we will always result an aligned memory access
|
||||
let item_size = mem::size_of::<T>() + (mem::size_of::<T>() % mem::align_of::<T>());
|
||||
let slice_full_len = index as usize + length as usize;
|
||||
let memory_size = memory.size().bytes().0;
|
||||
|
||||
if (self.offset as usize) + (item_size * slice_full_len) > memory.size().bytes().0
|
||||
|| self.offset as usize >= memory_size
|
||||
|| mem::size_of::<T>() == 0
|
||||
{
|
||||
return None;
|
||||
}
|
||||
|
||||
let cell_ptr = align_pointer(
|
||||
memory.view::<u8>().as_ptr().add(self.offset as usize) as usize,
|
||||
mem::align_of::<T>(),
|
||||
) as *mut Cell<T>;
|
||||
let cell_ptrs = &mut std::slice::from_raw_parts_mut(cell_ptr, slice_full_len)
|
||||
[index as usize..slice_full_len];
|
||||
Some(cell_ptrs)
|
||||
}
|
||||
|
||||
/// Get a UTF-8 string from the `WasmPtr` with the given length.
|
||||
///
|
||||
/// Note that this method returns a reference to Wasm linear memory. The
|
||||
/// underlying data can be mutated if the Wasm is allowed to execute or
|
||||
/// an aliasing `WasmPtr` is used to mutate memory.
|
||||
pub fn get_utf8_string(self, memory: &Memory, str_len: u32) -> Option<&str> {
|
||||
let memory_size = memory.size().bytes().0;
|
||||
|
||||
if self.offset as usize + str_len as usize > memory.size().bytes().0
|
||||
|| self.offset as usize >= memory_size
|
||||
{
|
||||
return None;
|
||||
}
|
||||
let ptr = unsafe { memory.view::<u8>().as_ptr().add(self.offset as usize) as *const u8 };
|
||||
let slice: &[u8] = unsafe { std::slice::from_raw_parts(ptr, str_len as usize) };
|
||||
std::str::from_utf8(slice).ok()
|
||||
}
|
||||
|
||||
/// Get a UTF-8 string from the `WasmPtr`, where the string is nul-terminated.
|
||||
///
|
||||
/// Note that this does not account for UTF-8 strings that _contain_ nul themselves,
|
||||
/// [`get_utf8_string`] has to be used for those.
|
||||
///
|
||||
/// Also note that this method returns a reference to Wasm linear memory. The
|
||||
/// underlying data can be mutated if the Wasm is allowed to execute or
|
||||
/// an aliasing `WasmPtr` is used to mutate memory.
|
||||
pub fn get_utf8_string_with_nul(self, memory: &Memory) -> Option<&str> {
|
||||
memory.view::<u8>()[(self.offset as usize)..]
|
||||
.iter()
|
||||
.map(|cell| cell.get())
|
||||
.position(|byte| byte == 0)
|
||||
.and_then(|length| self.get_utf8_string(memory, length as u32))
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: Copy, Ty> WasmExternType for WasmPtr<T, Ty> {
|
||||
type Native = i32;
|
||||
|
||||
fn to_native(self) -> Self::Native {
|
||||
self.offset as i32
|
||||
}
|
||||
fn from_native(n: Self::Native) -> Self {
|
||||
Self {
|
||||
offset: n as u32,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: Copy, Ty> ValueType for WasmPtr<T, Ty> {}
|
||||
|
||||
impl<T: Copy, Ty> Clone for WasmPtr<T, Ty> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
offset: self.offset,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Copy, Ty> Copy for WasmPtr<T, Ty> {}
|
||||
|
||||
impl<T: Copy, Ty> PartialEq for WasmPtr<T, Ty> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.offset == other.offset
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Copy, Ty> Eq for WasmPtr<T, Ty> {}
|
||||
|
||||
impl<T: Copy, Ty> fmt::Debug for WasmPtr<T, Ty> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "WasmPtr({:#x})", self.offset)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::{Memory, MemoryType, Store};
|
||||
|
||||
/// Ensure that memory accesses work on the edges of memory and that out of
|
||||
/// bounds errors are caught with both `deref` and `deref_mut`.
|
||||
#[test]
|
||||
fn wasm_ptr_memory_bounds_checks_hold() {
|
||||
// create a memory
|
||||
let store = Store::default();
|
||||
let memory_descriptor = MemoryType::new(1, Some(1), false);
|
||||
let memory = Memory::new(&store, memory_descriptor);
|
||||
|
||||
// test that basic access works and that len = 0 works, but oob does not
|
||||
let start_wasm_ptr: WasmPtr<u8> = WasmPtr::new(0);
|
||||
let start_wasm_ptr_array: WasmPtr<u8, Array> = WasmPtr::new(0);
|
||||
|
||||
assert!(start_wasm_ptr.deref(&memory).is_some());
|
||||
assert!(unsafe { start_wasm_ptr.deref_mut(&memory).is_some() });
|
||||
assert!(start_wasm_ptr_array.deref(&memory, 0, 0).is_some());
|
||||
assert!(start_wasm_ptr_array.get_utf8_string(&memory, 0).is_some());
|
||||
assert!(unsafe { start_wasm_ptr_array.deref_mut(&memory, 0, 0).is_some() });
|
||||
assert!(start_wasm_ptr_array.deref(&memory, 0, 1).is_some());
|
||||
assert!(unsafe { start_wasm_ptr_array.deref_mut(&memory, 0, 1).is_some() });
|
||||
|
||||
// test that accessing the last valid memory address works correctly and OOB is caught
|
||||
let last_valid_address_for_u8 = (memory.size().bytes().0 - 1) as u32;
|
||||
let end_wasm_ptr: WasmPtr<u8> = WasmPtr::new(last_valid_address_for_u8);
|
||||
assert!(end_wasm_ptr.deref(&memory).is_some());
|
||||
assert!(unsafe { end_wasm_ptr.deref_mut(&memory).is_some() });
|
||||
|
||||
let end_wasm_ptr_array: WasmPtr<u8, Array> = WasmPtr::new(last_valid_address_for_u8);
|
||||
|
||||
assert!(end_wasm_ptr_array.deref(&memory, 0, 1).is_some());
|
||||
assert!(unsafe { end_wasm_ptr_array.deref_mut(&memory, 0, 1).is_some() });
|
||||
let invalid_idx_len_combos: [(u32, u32); 3] =
|
||||
[(last_valid_address_for_u8 + 1, 0), (0, 2), (1, 1)];
|
||||
for &(idx, len) in invalid_idx_len_combos.iter() {
|
||||
assert!(end_wasm_ptr_array.deref(&memory, idx, len).is_none());
|
||||
assert!(unsafe { end_wasm_ptr_array.deref_mut(&memory, idx, len).is_none() });
|
||||
}
|
||||
assert!(end_wasm_ptr_array.get_utf8_string(&memory, 2).is_none());
|
||||
|
||||
// test that accesing the last valid memory address for a u32 is valid
|
||||
// (same as above test but with more edge cases to assert on)
|
||||
let last_valid_address_for_u32 = (memory.size().bytes().0 - 4) as u32;
|
||||
let end_wasm_ptr: WasmPtr<u32> = WasmPtr::new(last_valid_address_for_u32);
|
||||
assert!(end_wasm_ptr.deref(&memory).is_some());
|
||||
assert!(unsafe { end_wasm_ptr.deref_mut(&memory).is_some() });
|
||||
assert!(end_wasm_ptr.deref(&memory).is_some());
|
||||
assert!(unsafe { end_wasm_ptr.deref_mut(&memory).is_some() });
|
||||
|
||||
let end_wasm_ptr_oob_array: [WasmPtr<u32>; 4] = [
|
||||
WasmPtr::new(last_valid_address_for_u32 + 1),
|
||||
WasmPtr::new(last_valid_address_for_u32 + 2),
|
||||
WasmPtr::new(last_valid_address_for_u32 + 3),
|
||||
WasmPtr::new(last_valid_address_for_u32 + 4),
|
||||
];
|
||||
for oob_end_ptr in end_wasm_ptr_oob_array.iter() {
|
||||
assert!(oob_end_ptr.deref(&memory).is_none());
|
||||
assert!(unsafe { oob_end_ptr.deref_mut(&memory).is_none() });
|
||||
}
|
||||
let end_wasm_ptr_array: WasmPtr<u32, Array> = WasmPtr::new(last_valid_address_for_u32);
|
||||
assert!(end_wasm_ptr_array.deref(&memory, 0, 1).is_some());
|
||||
assert!(unsafe { end_wasm_ptr_array.deref_mut(&memory, 0, 1).is_some() });
|
||||
|
||||
let invalid_idx_len_combos: [(u32, u32); 3] =
|
||||
[(last_valid_address_for_u32 + 1, 0), (0, 2), (1, 1)];
|
||||
for &(idx, len) in invalid_idx_len_combos.iter() {
|
||||
assert!(end_wasm_ptr_array.deref(&memory, idx, len).is_none());
|
||||
assert!(unsafe { end_wasm_ptr_array.deref_mut(&memory, idx, len).is_none() });
|
||||
}
|
||||
|
||||
let end_wasm_ptr_array_oob_array: [WasmPtr<u32, Array>; 4] = [
|
||||
WasmPtr::new(last_valid_address_for_u32 + 1),
|
||||
WasmPtr::new(last_valid_address_for_u32 + 2),
|
||||
WasmPtr::new(last_valid_address_for_u32 + 3),
|
||||
WasmPtr::new(last_valid_address_for_u32 + 4),
|
||||
];
|
||||
|
||||
for oob_end_array_ptr in end_wasm_ptr_array_oob_array.iter() {
|
||||
assert!(oob_end_array_ptr.deref(&memory, 0, 1).is_none());
|
||||
assert!(unsafe { oob_end_array_ptr.deref_mut(&memory, 0, 1).is_none() });
|
||||
assert!(oob_end_array_ptr.deref(&memory, 1, 0).is_none());
|
||||
assert!(unsafe { oob_end_array_ptr.deref_mut(&memory, 1, 0).is_none() });
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,5 @@
|
||||
mod address_map;
|
||||
mod frame_layout;
|
||||
|
||||
pub use self::address_map::{
|
||||
ModuleMemoryOffset,
|
||||
ModuleVmctxInfo, ValueLabelsRanges,
|
||||
};
|
||||
pub use self::address_map::{ModuleMemoryOffset, ModuleVmctxInfo, ValueLabelsRanges};
|
||||
pub use self::frame_layout::{FrameLayout, FrameLayoutChange, FrameLayouts};
|
||||
|
||||
@@ -57,10 +57,7 @@ mod unwind;
|
||||
pub use crate::compiler::{transform_jump_table, CraneliftCompiler};
|
||||
pub use crate::config::CraneliftConfig;
|
||||
pub use crate::debug::{FrameLayout, FrameLayoutChange, FrameLayouts};
|
||||
pub use crate::debug::{
|
||||
ModuleMemoryOffset,
|
||||
ModuleVmctxInfo, ValueLabelsRanges,
|
||||
};
|
||||
pub use crate::debug::{ModuleMemoryOffset, ModuleVmctxInfo, ValueLabelsRanges};
|
||||
pub use crate::trampoline::make_wasm_trampoline;
|
||||
pub use crate::unwind::compiled_function_unwind_info;
|
||||
|
||||
|
||||
@@ -43,7 +43,10 @@ pub use crate::indexes::{
|
||||
DataIndex, ElemIndex, ExportIndex, FuncIndex, GlobalIndex, ImportIndex, LocalFuncIndex,
|
||||
LocalGlobalIndex, LocalMemoryIndex, LocalTableIndex, MemoryIndex, SignatureIndex, TableIndex,
|
||||
};
|
||||
pub use crate::native::{Func, HostFunction, NativeWasmType, WasmTypeList, WithEnv, WithoutEnv};
|
||||
pub use crate::native::{
|
||||
Func, HostFunction, NativeWasmType, ValueType, WasmExternType, WasmTypeList, WithEnv,
|
||||
WithoutEnv,
|
||||
};
|
||||
pub use crate::r#ref::{AnyRef, HostInfo, HostRef};
|
||||
pub use crate::units::{Bytes, Pages};
|
||||
pub use crate::values::Value;
|
||||
|
||||
@@ -70,6 +70,96 @@ mod test_native_type {
|
||||
}
|
||||
}
|
||||
|
||||
/// A trait to represent a wasm extern type.
|
||||
pub unsafe trait WasmExternType: Copy
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
/// Native wasm type for this `WasmExternType`.
|
||||
type Native: NativeWasmType;
|
||||
|
||||
/// Convert from given `Native` type to self.
|
||||
fn from_native(native: Self::Native) -> Self;
|
||||
|
||||
/// Convert self to `Native` type.
|
||||
fn to_native(self) -> Self::Native;
|
||||
}
|
||||
|
||||
macro_rules! wasm_extern_type {
|
||||
($type:ty => $native_type:ty) => {
|
||||
unsafe impl WasmExternType for $type {
|
||||
type Native = $native_type;
|
||||
|
||||
fn from_native(native: Self::Native) -> Self {
|
||||
native as _
|
||||
}
|
||||
|
||||
fn to_native(self) -> Self::Native {
|
||||
self as _
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
wasm_extern_type!(i8 => i32);
|
||||
wasm_extern_type!(u8 => i32);
|
||||
wasm_extern_type!(i16 => i32);
|
||||
wasm_extern_type!(u16 => i32);
|
||||
wasm_extern_type!(i32 => i32);
|
||||
wasm_extern_type!(u32 => i32);
|
||||
wasm_extern_type!(i64 => i64);
|
||||
wasm_extern_type!(u64 => i64);
|
||||
wasm_extern_type!(f32 => f32);
|
||||
wasm_extern_type!(f64 => f64);
|
||||
// wasm_extern_type!(u128 => i128);
|
||||
// wasm_extern_type!(i128 => i128);
|
||||
|
||||
// pub trait IntegerAtomic
|
||||
// where
|
||||
// Self: Sized
|
||||
// {
|
||||
// type Primitive;
|
||||
|
||||
// fn add(&self, other: Self::Primitive) -> Self::Primitive;
|
||||
// fn sub(&self, other: Self::Primitive) -> Self::Primitive;
|
||||
// fn and(&self, other: Self::Primitive) -> Self::Primitive;
|
||||
// fn or(&self, other: Self::Primitive) -> Self::Primitive;
|
||||
// fn xor(&self, other: Self::Primitive) -> Self::Primitive;
|
||||
// fn load(&self) -> Self::Primitive;
|
||||
// fn store(&self, other: Self::Primitive) -> Self::Primitive;
|
||||
// fn compare_exchange(&self, expected: Self::Primitive, new: Self::Primitive) -> Self::Primitive;
|
||||
// fn swap(&self, other: Self::Primitive) -> Self::Primitive;
|
||||
// }
|
||||
|
||||
/// Trait for a Value type. A Value type is a type that is always valid and may
|
||||
/// be safely copied.
|
||||
///
|
||||
/// That is, for all possible bit patterns a valid Value type can be constructed
|
||||
/// from those bits.
|
||||
///
|
||||
/// Concretely a `u32` is a Value type because every combination of 32 bits is
|
||||
/// a valid `u32`. However a `bool` is _not_ a Value type because any bit patterns
|
||||
/// other than `0` and `1` are invalid in Rust and may cause undefined behavior if
|
||||
/// a `bool` is constructed from those bytes.
|
||||
pub unsafe trait ValueType: Copy
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
}
|
||||
|
||||
macro_rules! convert_value_impl {
|
||||
($t:ty) => {
|
||||
unsafe impl ValueType for $t {}
|
||||
};
|
||||
( $($t:ty),* ) => {
|
||||
$(
|
||||
convert_value_impl!($t);
|
||||
)*
|
||||
};
|
||||
}
|
||||
|
||||
convert_value_impl!(u8, i8, u16, i16, u32, i32, u64, i64, f32, f64);
|
||||
|
||||
/// Represents a list of WebAssembly values.
|
||||
pub trait WasmTypeList {
|
||||
/// CStruct type.
|
||||
|
||||
Reference in New Issue
Block a user