Implement the new Context API for the core API

Co-authored-by: ptitSeb <sebastien.chev@gmail.com>
This commit is contained in:
Amanieu d'Antras
2022-05-20 12:38:28 +01:00
committed by Manos Pitsidianakis
parent f16bd35d3c
commit 738a66f719
63 changed files with 2716 additions and 4821 deletions

View File

@@ -1,16 +1,17 @@
use crate::sys::context::{AsContextMut, AsContextRef};
use crate::sys::exports::{ExportError, Exportable};
use crate::sys::externals::Extern;
use crate::sys::store::Store;
use crate::sys::MemoryType;
use crate::MemoryAccessError;
use crate::{ContextRef, MemoryAccessError};
use std::convert::TryInto;
use std::marker::PhantomData;
use std::mem;
use std::mem::MaybeUninit;
use std::slice;
use std::sync::Arc;
use wasmer_compiler::Export;
use wasmer_types::Pages;
use wasmer_vm::{MemoryError, VMMemory};
use wasmer_vm::{
ContextHandle, ContextObjects, InternalContextHandle, MemoryError, VMExtern, VMMemory,
};
/// A WebAssembly `memory` instance.
///
@@ -26,10 +27,9 @@ use wasmer_vm::{MemoryError, VMMemory};
/// mutable from both host and WebAssembly.
///
/// Spec: <https://webassembly.github.io/spec/core/exec/runtime.html#memory-instances>
#[derive(Debug)]
#[derive(Debug, Clone)]
pub struct Memory {
store: Store,
vm_memory: VMMemory,
handle: ContextHandle<VMMemory>,
}
impl Memory {
@@ -46,19 +46,14 @@ impl Memory {
/// #
/// let m = Memory::new(&store, MemoryType::new(1, None, false)).unwrap();
/// ```
pub fn new(store: &Store, ty: MemoryType) -> Result<Self, MemoryError> {
let tunables = store.tunables();
pub fn new(mut ctx: impl AsContextMut, ty: MemoryType) -> Result<Self, MemoryError> {
let mut ctx = ctx.as_context_mut();
let tunables = ctx.store().tunables();
let style = tunables.memory_style(&ty);
let memory = tunables.create_host_memory(&ty, &style)?;
Ok(Self {
store: store.clone(),
vm_memory: VMMemory {
from: memory,
// We are creating it from the host, and therefore there is no
// associated instance with this memory
instance_ref: None,
},
handle: ContextHandle::new(ctx.objects_mut(), memory),
})
}
@@ -75,24 +70,8 @@ impl Memory {
///
/// assert_eq!(m.ty(), mt);
/// ```
pub fn ty(&self) -> MemoryType {
self.vm_memory.from.ty()
}
/// Returns the [`Store`] where the `Memory` belongs.
///
/// # Example
///
/// ```
/// # use wasmer::{Memory, MemoryType, Pages, Store, Type, Value};
/// # let store = Store::default();
/// #
/// let m = Memory::new(&store, MemoryType::new(1, None, false)).unwrap();
///
/// assert_eq!(m.store(), &store);
/// ```
pub fn store(&self) -> &Store {
&self.store
pub fn ty(&self, ctx: impl AsContextRef) -> MemoryType {
self.handle.get(ctx.as_context_ref().objects()).ty()
}
/// Returns the pointer to the raw bytes of the `Memory`.
@@ -100,17 +79,13 @@ impl Memory {
// This used by wasmer-emscripten and wasmer-c-api, but should be treated
// as deprecated and not used in future code.
#[doc(hidden)]
pub fn data_ptr(&self) -> *mut u8 {
let definition = self.vm_memory.from.vmmemory();
let def = unsafe { definition.as_ref() };
def.base
pub fn data_ptr(&self, ctx: impl AsContextRef) -> *mut u8 {
self.buffer(ctx.as_context_ref()).base
}
/// Returns the size (in bytes) of the `Memory`.
pub fn data_size(&self) -> u64 {
let definition = self.vm_memory.from.vmmemory();
let def = unsafe { definition.as_ref() };
def.current_length.try_into().unwrap()
pub fn data_size(&self, ctx: impl AsContextRef) -> u64 {
self.buffer(ctx.as_context_ref()).len.try_into().unwrap()
}
/// Returns the size (in [`Pages`]) of the `Memory`.
@@ -125,8 +100,8 @@ impl Memory {
///
/// assert_eq!(m.size(), Pages(1));
/// ```
pub fn size(&self) -> Pages {
self.vm_memory.from.size()
pub fn size(&self, ctx: impl AsContextRef) -> Pages {
self.handle.get(ctx.as_context_ref().objects()).size()
}
/// Grow memory by the specified amount of WebAssembly [`Pages`] and return
@@ -159,46 +134,17 @@ impl Memory {
/// // This results in an error: `MemoryError::CouldNotGrow`.
/// let s = m.grow(1).unwrap();
/// ```
pub fn grow<IntoPages>(&self, delta: IntoPages) -> Result<Pages, MemoryError>
pub fn grow<IntoPages>(
&self,
mut ctx: impl AsContextMut,
delta: IntoPages,
) -> Result<Pages, MemoryError>
where
IntoPages: Into<Pages>,
{
self.vm_memory.from.grow(delta.into())
}
pub(crate) fn from_vm_export(store: &Store, vm_memory: VMMemory) -> Self {
Self {
store: store.clone(),
vm_memory,
}
}
/// Returns whether or not these two memories refer to the same data.
///
/// # Example
///
/// ```
/// # use wasmer::{Memory, MemoryType, Store, Value};
/// # let store = Store::default();
/// #
/// let m = Memory::new(&store, MemoryType::new(1, None, false)).unwrap();
///
/// assert!(m.same(&m));
/// ```
pub fn same(&self, other: &Self) -> bool {
Arc::ptr_eq(&self.vm_memory.from, &other.vm_memory.from)
}
/// Get access to the backing VM value for this extern. This function is for
/// tests it should not be called by users of the Wasmer API.
///
/// # Safety
/// This function is unsafe to call outside of tests for the wasmer crate
/// because there is no stability guarantee for the returned type and we may
/// make breaking changes to it at any time or remove this method.
#[doc(hidden)]
pub unsafe fn get_vm_memory(&self) -> &VMMemory {
&self.vm_memory
self.handle
.get_mut(ctx.as_context_mut().objects_mut())
.grow(delta.into())
}
/// Safely reads bytes from the memory at the given offset.
@@ -208,20 +154,13 @@ impl Memory {
///
/// This method is guaranteed to be safe (from the host side) in the face of
/// concurrent writes.
pub fn read(&self, offset: u64, buf: &mut [u8]) -> Result<(), MemoryAccessError> {
let definition = self.vm_memory.from.vmmemory();
let def = unsafe { definition.as_ref() };
let end = offset
.checked_add(buf.len() as u64)
.ok_or(MemoryAccessError::Overflow)?;
if end > def.current_length.try_into().unwrap() {
return Err(MemoryAccessError::HeapOutOfBounds);
}
unsafe {
volatile_memcpy_read(def.base.add(offset as usize), buf.as_mut_ptr(), buf.len());
}
Ok(())
pub fn read(
&self,
ctx: impl AsContextRef,
offset: u64,
buf: &mut [u8],
) -> Result<(), MemoryAccessError> {
self.buffer(ctx.as_context_ref()).read(offset, buf)
}
/// Safely reads bytes from the memory at the given offset.
@@ -236,23 +175,11 @@ impl Memory {
/// concurrent writes.
pub fn read_uninit<'a>(
&self,
ctx: impl AsContextRef,
offset: u64,
buf: &'a mut [MaybeUninit<u8>],
) -> Result<&'a mut [u8], MemoryAccessError> {
let definition = self.vm_memory.from.vmmemory();
let def = unsafe { definition.as_ref() };
let end = offset
.checked_add(buf.len() as u64)
.ok_or(MemoryAccessError::Overflow)?;
if end > def.current_length.try_into().unwrap() {
return Err(MemoryAccessError::HeapOutOfBounds);
}
let buf_ptr = buf.as_mut_ptr() as *mut u8;
unsafe {
volatile_memcpy_read(def.base.add(offset as usize), buf_ptr, buf.len());
}
Ok(unsafe { slice::from_raw_parts_mut(buf_ptr, buf.len()) })
self.buffer(ctx.as_context_ref()).read_uninit(offset, buf)
}
/// Safely writes bytes to the memory at the given offset.
@@ -262,50 +189,107 @@ impl Memory {
///
/// This method is guaranteed to be safe (from the host side) in the face of
/// concurrent reads/writes.
pub fn write(&self, offset: u64, data: &[u8]) -> Result<(), MemoryAccessError> {
let definition = self.vm_memory.from.vmmemory();
let def = unsafe { definition.as_ref() };
let end = offset
.checked_add(data.len() as u64)
.ok_or(MemoryAccessError::Overflow)?;
if end > def.current_length.try_into().unwrap() {
return Err(MemoryAccessError::HeapOutOfBounds);
}
unsafe {
volatile_memcpy_write(data.as_ptr(), def.base.add(offset as usize), data.len());
}
Ok(())
pub fn write(
&self,
ctx: impl AsContextRef,
offset: u64,
data: &[u8],
) -> Result<(), MemoryAccessError> {
self.buffer(ctx.as_context_ref()).write(offset, data)
}
}
impl Clone for Memory {
fn clone(&self) -> Self {
let mut vm_memory = self.vm_memory.clone();
vm_memory.upgrade_instance_ref().unwrap();
Self {
store: self.store.clone(),
vm_memory,
pub(crate) fn buffer<'a, U>(&'a self, ctx: ContextRef<'a, U>) -> MemoryBuffer<'a> {
let definition = self.handle.get(ctx.objects()).vmmemory();
let def = unsafe { definition.as_ref() };
MemoryBuffer {
base: def.base,
len: def.current_length,
marker: PhantomData,
}
}
pub(crate) fn from_vm_extern(
ctx: impl AsContextRef,
internal: InternalContextHandle<VMMemory>,
) -> Self {
Self {
handle: unsafe {
ContextHandle::from_internal(ctx.as_context_ref().objects().id(), internal)
},
}
}
/// Checks whether this `Memory` can be used with the given context.
pub fn is_from_context(&self, ctx: impl AsContextRef) -> bool {
self.handle.context_id() == ctx.as_context_ref().objects().id()
}
pub(crate) fn to_vm_extern(&self) -> VMExtern {
VMExtern::Memory(self.handle.internal_handle())
}
}
impl<'a> Exportable<'a> for Memory {
fn to_export(&self) -> Export {
self.vm_memory.clone().into()
}
fn get_self_from_extern(_extern: &'a Extern) -> Result<&'a Self, ExportError> {
match _extern {
Extern::Memory(memory) => Ok(memory),
_ => Err(ExportError::IncompatibleType),
}
}
}
fn convert_to_weak_instance_ref(&mut self) {
if let Some(v) = self.vm_memory.instance_ref.as_mut() {
*v = v.downgrade();
/// Underlying buffer for a memory.
#[derive(Copy, Clone)]
pub(crate) struct MemoryBuffer<'a> {
base: *mut u8,
len: usize,
marker: PhantomData<(&'a Memory, &'a ContextObjects)>,
}
impl<'a> MemoryBuffer<'a> {
pub(crate) fn read(&self, offset: u64, buf: &mut [u8]) -> Result<(), MemoryAccessError> {
let end = offset
.checked_add(buf.len() as u64)
.ok_or(MemoryAccessError::Overflow)?;
if end > self.len.try_into().unwrap() {
return Err(MemoryAccessError::HeapOutOfBounds);
}
unsafe {
volatile_memcpy_read(self.base.add(offset as usize), buf.as_mut_ptr(), buf.len());
}
Ok(())
}
pub(crate) fn read_uninit<'b>(
&self,
offset: u64,
buf: &'b mut [MaybeUninit<u8>],
) -> Result<&'b mut [u8], MemoryAccessError> {
let end = offset
.checked_add(buf.len() as u64)
.ok_or(MemoryAccessError::Overflow)?;
if end > self.len.try_into().unwrap() {
return Err(MemoryAccessError::HeapOutOfBounds);
}
let buf_ptr = buf.as_mut_ptr() as *mut u8;
unsafe {
volatile_memcpy_read(self.base.add(offset as usize), buf_ptr, buf.len());
}
Ok(unsafe { slice::from_raw_parts_mut(buf_ptr, buf.len()) })
}
pub(crate) fn write(&self, offset: u64, data: &[u8]) -> Result<(), MemoryAccessError> {
let end = offset
.checked_add(data.len() as u64)
.ok_or(MemoryAccessError::Overflow)?;
if end > self.len.try_into().unwrap() {
return Err(MemoryAccessError::HeapOutOfBounds);
}
unsafe {
volatile_memcpy_write(data.as_ptr(), self.base.add(offset as usize), data.len());
}
Ok(())
}
}