diff --git a/lib/compiler-llvm/src/object_file.rs b/lib/compiler-llvm/src/object_file.rs index 2807c3633..5e5734e97 100644 --- a/lib/compiler-llvm/src/object_file.rs +++ b/lib/compiler-llvm/src/object_file.rs @@ -96,6 +96,30 @@ where libcalls.insert("wasmer_vm_memory32_init".to_string(), LibCall::Memory32Init); libcalls.insert("wasmer_vm_data_drop".to_string(), LibCall::DataDrop); libcalls.insert("wasmer_vm_raise_trap".to_string(), LibCall::RaiseTrap); + libcalls.insert( + "wasmer_vm_memory32_atomic_wait32".to_string(), + LibCall::Memory32AtomicWait32, + ); + libcalls.insert( + "wasmer_vm_imported_memory32_atomic_wait32".to_string(), + LibCall::ImportedMemory32AtomicWait32, + ); + libcalls.insert( + "wasmer_vm_memory32_atomic_wait64".to_string(), + LibCall::Memory32AtomicWait64, + ); + libcalls.insert( + "wasmer_vm_imported_memory32_atomic_wait64".to_string(), + LibCall::ImportedMemory32AtomicWait64, + ); + libcalls.insert( + "wasmer_vm_memory32_atomic_notify".to_string(), + LibCall::Memory32AtomicNotify, + ); + libcalls.insert( + "wasmer_vm_imported_memory32_atomic_notify".to_string(), + LibCall::ImportedMemory32AtomicNotify, + ); let elf = object::File::parse(contents).map_err(map_object_err)?; diff --git a/lib/compiler-llvm/src/translator/code.rs b/lib/compiler-llvm/src/translator/code.rs index 0d372e95e..8dc74b0a5 100644 --- a/lib/compiler-llvm/src/translator/code.rs +++ b/lib/compiler-llvm/src/translator/code.rs @@ -11231,6 +11231,59 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> { .unwrap(); self.state.push1(size); } + Operator::MemoryAtomicWait32 { memarg } => { + let memory_index = MemoryIndex::from_u32(memarg.memory); + let (dst, val, timeout) = self.state.pop3()?; + let wait32_fn_ptr = self.ctx.memory_wait32(memory_index, self.intrinsics); + let callable_func = inkwell::values::CallableValue::try_from(wait32_fn_ptr).unwrap(); + let ret = self.builder.build_call( + callable_func, + &[ + vmctx.as_basic_value_enum().into(), + self.intrinsics.i32_ty.const_int(memarg.memory as u64, false).into(), + dst.into(), + val.into(), + timeout.into(), + ], + "", + ); + self.state.push1(ret.try_as_basic_value().left().unwrap()); + } + Operator::MemoryAtomicWait64 { memarg } => { + let memory_index = MemoryIndex::from_u32(memarg.memory); + let (dst, val, timeout) = self.state.pop3()?; + let wait64_fn_ptr = self.ctx.memory_wait64(memory_index, self.intrinsics); + let callable_func = inkwell::values::CallableValue::try_from(wait64_fn_ptr).unwrap(); + let ret = self.builder.build_call( + callable_func, + &[ + vmctx.as_basic_value_enum().into(), + self.intrinsics.i32_ty.const_int(memarg.memory as u64, false).into(), + dst.into(), + val.into(), + timeout.into(), + ], + "", + ); + self.state.push1(ret.try_as_basic_value().left().unwrap()); + } + Operator::MemoryAtomicNotify { memarg } => { + let memory_index = MemoryIndex::from_u32(memarg.memory); + let (dst, count) = self.state.pop2()?; + let notify_fn_ptr = self.ctx.memory_notify(memory_index, self.intrinsics); + let callable_func = inkwell::values::CallableValue::try_from(notify_fn_ptr).unwrap(); + let cnt = self.builder.build_call( + callable_func, + &[ + vmctx.as_basic_value_enum().into(), + self.intrinsics.i32_ty.const_int(memarg.memory as u64, false).into(), + dst.into(), + count.into(), + ], + "", + ); + self.state.push1(cnt.try_as_basic_value().left().unwrap()); + } _ => { return Err(CompileError::Codegen(format!( "Operator {:?} unimplemented", diff --git a/lib/compiler-llvm/src/translator/intrinsics.rs b/lib/compiler-llvm/src/translator/intrinsics.rs index 7a1d1ebb9..d20b4c07b 100644 --- a/lib/compiler-llvm/src/translator/intrinsics.rs +++ b/lib/compiler-llvm/src/translator/intrinsics.rs @@ -240,6 +240,12 @@ pub struct Intrinsics<'ctx> { pub imported_memory_copy: FunctionValue<'ctx>, pub memory_fill: FunctionValue<'ctx>, pub imported_memory_fill: FunctionValue<'ctx>, + pub memory_wait32: FunctionValue<'ctx>, + pub imported_memory_wait32: FunctionValue<'ctx>, + pub memory_wait64: FunctionValue<'ctx>, + pub imported_memory_wait64: FunctionValue<'ctx>, + pub memory_notify: FunctionValue<'ctx>, + pub imported_memory_notify: FunctionValue<'ctx>, pub throw_trap: FunctionValue<'ctx>, @@ -256,6 +262,12 @@ pub struct Intrinsics<'ctx> { pub imported_memory32_grow_ptr_ty: PointerType<'ctx>, pub memory32_size_ptr_ty: PointerType<'ctx>, pub imported_memory32_size_ptr_ty: PointerType<'ctx>, + pub memory32_wait32_ptr_ty: PointerType<'ctx>, + pub imported_memory32_wait32_ptr_ty: PointerType<'ctx>, + pub memory32_wait64_ptr_ty: PointerType<'ctx>, + pub imported_memory32_wait64_ptr_ty: PointerType<'ctx>, + pub memory32_notify_ptr_ty: PointerType<'ctx>, + pub imported_memory32_notify_ptr_ty: PointerType<'ctx>, // Pointer to the VM. pub ctx_ptr_ty: PointerType<'ctx>, @@ -1007,6 +1019,86 @@ impl<'ctx> Intrinsics<'ctx> { void_ty.fn_type(&[i32_ty_basic_md], false), None, ), + memory_wait32: module.add_function( + "wasmer_vm_memory32_atomic_wait32", + i32_ty.fn_type( + &[ + ctx_ptr_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + i64_ty_basic_md, + ], + false, + ), + None, + ), + imported_memory_wait32: module.add_function( + "wasmer_vm_imported_memory32_atomic_wait32", + i32_ty.fn_type( + &[ + ctx_ptr_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + i64_ty_basic_md, + ], + false, + ), + None, + ), + memory_wait64: module.add_function( + "wasmer_vm_memory32_atomic_wait64", + i32_ty.fn_type( + &[ + ctx_ptr_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + i64_ty_basic_md, + i64_ty_basic_md, + ], + false, + ), + None, + ), + imported_memory_wait64: module.add_function( + "wasmer_vm_imported_memory32_atomic_wait64", + i32_ty.fn_type( + &[ + ctx_ptr_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + i64_ty_basic_md, + i64_ty_basic_md, + ], + false, + ), + None, + ), + memory_notify: module.add_function( + "wasmer_vm_memory32_atomic_notify", + i32_ty.fn_type( + &[ + ctx_ptr_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + ], + false, + ), + None, + ), + imported_memory_notify: module.add_function( + "wasmer_vm_imported_memory32_atomic_notify", + i32_ty.fn_type( + &[ + ctx_ptr_ty_basic_md, + i32_ty_basic_md, + i32_ty_basic_md, + ], + false, + ), + None, + ), vmfunction_import_ptr_ty: context .struct_type(&[i8_ptr_ty_basic, i8_ptr_ty_basic], false) @@ -1038,6 +1130,24 @@ impl<'ctx> Intrinsics<'ctx> { imported_memory32_size_ptr_ty: i32_ty .fn_type(&[ctx_ptr_ty_basic_md, i32_ty_basic_md], false) .ptr_type(AddressSpace::Generic), + memory32_wait32_ptr_ty: i32_ty + .fn_type(&[ctx_ptr_ty_basic_md, i32_ty_basic_md, i32_ty_basic_md, i32_ty_basic_md, i64_ty_basic_md], false) + .ptr_type(AddressSpace::Generic), + imported_memory32_wait32_ptr_ty: i32_ty + .fn_type(&[ctx_ptr_ty_basic_md, i32_ty_basic_md, i32_ty_basic_md, i32_ty_basic_md, i64_ty_basic_md], false) + .ptr_type(AddressSpace::Generic), + memory32_wait64_ptr_ty: i32_ty + .fn_type(&[ctx_ptr_ty_basic_md, i32_ty_basic_md, i32_ty_basic_md, i64_ty_basic_md, i64_ty_basic_md], false) + .ptr_type(AddressSpace::Generic), + imported_memory32_wait64_ptr_ty: i32_ty + .fn_type(&[ctx_ptr_ty_basic_md, i32_ty_basic_md, i32_ty_basic_md, i64_ty_basic_md, i64_ty_basic_md], false) + .ptr_type(AddressSpace::Generic), + memory32_notify_ptr_ty: i32_ty + .fn_type(&[ctx_ptr_ty_basic_md, i32_ty_basic_md, i32_ty_basic_md, i32_ty_basic_md], false) + .ptr_type(AddressSpace::Generic), + imported_memory32_notify_ptr_ty: i32_ty + .fn_type(&[ctx_ptr_ty_basic_md, i32_ty_basic_md, i32_ty_basic_md, i32_ty_basic_md], false) + .ptr_type(AddressSpace::Generic), ctx_ptr_ty, }; @@ -1658,6 +1768,132 @@ impl<'ctx, 'a> CtxType<'ctx, 'a> { }) } + pub fn memory_wait32( + &mut self, + memory_index: MemoryIndex, + intrinsics: &Intrinsics<'ctx>, + ) -> PointerValue<'ctx> { + let (cached_memory_size, wasm_module, offsets, cache_builder, ctx_ptr_value) = ( + &mut self.cached_memory_size, + &self.wasm_module, + &self.offsets, + &self.cache_builder, + &self.ctx_ptr_value, + ); + *cached_memory_size.entry(memory_index).or_insert_with(|| { + let (size_fn, size_fn_ty) = if wasm_module.local_memory_index(memory_index).is_some() { + ( + VMBuiltinFunctionIndex::get_memory_atomic_wait32_index(), + intrinsics.memory32_wait32_ptr_ty, + ) + } else { + ( + VMBuiltinFunctionIndex::get_imported_memory_atomic_wait32_index(), + intrinsics.imported_memory32_wait32_ptr_ty, + ) + }; + let offset = offsets.vmctx_builtin_function(size_fn); + let offset = intrinsics.i32_ty.const_int(offset.into(), false); + let size_fn_ptr_ptr = unsafe { cache_builder.build_gep(*ctx_ptr_value, &[offset], "") }; + + let size_fn_ptr_ptr = cache_builder + .build_bitcast( + size_fn_ptr_ptr, + size_fn_ty.ptr_type(AddressSpace::Generic), + "", + ) + .into_pointer_value(); + + cache_builder + .build_load(size_fn_ptr_ptr, "") + .into_pointer_value() + }) + } + + pub fn memory_wait64( + &mut self, + memory_index: MemoryIndex, + intrinsics: &Intrinsics<'ctx>, + ) -> PointerValue<'ctx> { + let (cached_memory_size, wasm_module, offsets, cache_builder, ctx_ptr_value) = ( + &mut self.cached_memory_size, + &self.wasm_module, + &self.offsets, + &self.cache_builder, + &self.ctx_ptr_value, + ); + *cached_memory_size.entry(memory_index).or_insert_with(|| { + let (size_fn, size_fn_ty) = if wasm_module.local_memory_index(memory_index).is_some() { + ( + VMBuiltinFunctionIndex::get_memory_atomic_wait64_index(), + intrinsics.memory32_wait64_ptr_ty, + ) + } else { + ( + VMBuiltinFunctionIndex::get_imported_memory_atomic_wait64_index(), + intrinsics.imported_memory32_wait64_ptr_ty, + ) + }; + let offset = offsets.vmctx_builtin_function(size_fn); + let offset = intrinsics.i32_ty.const_int(offset.into(), false); + let size_fn_ptr_ptr = unsafe { cache_builder.build_gep(*ctx_ptr_value, &[offset], "") }; + + let size_fn_ptr_ptr = cache_builder + .build_bitcast( + size_fn_ptr_ptr, + size_fn_ty.ptr_type(AddressSpace::Generic), + "", + ) + .into_pointer_value(); + + cache_builder + .build_load(size_fn_ptr_ptr, "") + .into_pointer_value() + }) + } + + pub fn memory_notify( + &mut self, + memory_index: MemoryIndex, + intrinsics: &Intrinsics<'ctx>, + ) -> PointerValue<'ctx> { + let (cached_memory_size, wasm_module, offsets, cache_builder, ctx_ptr_value) = ( + &mut self.cached_memory_size, + &self.wasm_module, + &self.offsets, + &self.cache_builder, + &self.ctx_ptr_value, + ); + *cached_memory_size.entry(memory_index).or_insert_with(|| { + let (size_fn, size_fn_ty) = if wasm_module.local_memory_index(memory_index).is_some() { + ( + VMBuiltinFunctionIndex::get_memory_atomic_notify_index(), + intrinsics.memory32_notify_ptr_ty, + ) + } else { + ( + VMBuiltinFunctionIndex::get_imported_memory_atomic_notify_index(), + intrinsics.imported_memory32_notify_ptr_ty, + ) + }; + let offset = offsets.vmctx_builtin_function(size_fn); + let offset = intrinsics.i32_ty.const_int(offset.into(), false); + let size_fn_ptr_ptr = unsafe { cache_builder.build_gep(*ctx_ptr_value, &[offset], "") }; + + let size_fn_ptr_ptr = cache_builder + .build_bitcast( + size_fn_ptr_ptr, + size_fn_ty.ptr_type(AddressSpace::Generic), + "", + ) + .into_pointer_value(); + + cache_builder + .build_load(size_fn_ptr_ptr, "") + .into_pointer_value() + }) + } + pub fn get_offsets(&self) -> &VMOffsets { &self.offsets }