In engine-jit, load code for each artifact contiguously.

This commit is contained in:
Nick Lewycky
2020-09-16 12:53:30 -07:00
parent c6e4563b08
commit c6bec2f23a
5 changed files with 225 additions and 308 deletions

View File

@@ -155,15 +155,15 @@ impl JITArtifact {
finished_functions,
_finished_function_call_trampolines,
finished_dynamic_function_trampolines,
custom_sections,
) = inner_jit.allocate(
&mut unwind_registry,
&serializable.compile_info.module,
&serializable.compilation.function_bodies,
&serializable.compilation.function_call_trampolines,
&serializable.compilation.dynamic_function_trampolines,
&serializable.compilation.custom_sections,
)?;
let custom_sections =
inner_jit.allocate_custom_sections(&serializable.compilation.custom_sections)?;
link_module(
&serializable.compile_info.module,
@@ -194,7 +194,7 @@ impl JITArtifact {
.len();
let eh_frame_section_pointer = custom_sections[debug.eh_frame];
Some(unsafe {
std::slice::from_raw_parts(eh_frame_section_pointer, eh_frame_section_size)
std::slice::from_raw_parts(*eh_frame_section_pointer, eh_frame_section_size)
})
}
None => None,

View File

@@ -3,12 +3,9 @@
//! Memory management for executable code.
use crate::unwind::UnwindRegistry;
use std::mem::ManuallyDrop;
use std::sync::Arc;
use std::{cmp, mem};
use wasmer_compiler::{CompiledFunctionUnwindInfo, FunctionBody, SectionBody};
use wasmer_types::entity::{EntityRef, PrimaryMap};
use wasmer_vm::{FunctionBodyPtr, Mmap, VMFunctionBody};
use wasmer_compiler::{CompiledFunctionUnwindInfo, CustomSection, FunctionBody};
use wasmer_vm::{Mmap, VMFunctionBody};
/// The optimal alignment for functions.
///
@@ -17,193 +14,145 @@ use wasmer_vm::{FunctionBodyPtr, Mmap, VMFunctionBody};
/// optimal alignment values.
const ARCH_FUNCTION_ALIGNMENT: usize = 16;
struct CodeMemoryEntry {
mmap: ManuallyDrop<Mmap>,
}
impl CodeMemoryEntry {
fn new() -> Self {
let mmap = ManuallyDrop::new(Mmap::new());
Self { mmap }
}
fn with_capacity(cap: usize) -> Result<Self, String> {
let mmap = ManuallyDrop::new(Mmap::with_at_least(cap)?);
Ok(Self { mmap })
}
}
impl Drop for CodeMemoryEntry {
fn drop(&mut self) {
unsafe {
ManuallyDrop::drop(&mut self.mmap);
}
}
}
/// The optimal alignment for data.
///
const DATA_SECTION_ALIGNMENT: usize = 64;
/// Memory manager for executable code.
pub struct CodeMemory {
current: CodeMemoryEntry,
entries: Vec<CodeMemoryEntry>,
unwind_registries: Vec<Arc<UnwindRegistry>>,
read_sections: Vec<Vec<u8>>,
position: usize,
published: usize,
mmap: Mmap,
start_of_nonexecutable_pages: usize,
}
impl CodeMemory {
/// Create a new `CodeMemory` instance.
pub fn new() -> Self {
Self {
current: CodeMemoryEntry::new(),
entries: Vec::new(),
read_sections: Vec::new(),
unwind_registries: Vec::new(),
position: 0,
published: 0,
mmap: Mmap::new(),
start_of_nonexecutable_pages: 0,
}
}
/// Allocate a single contiguous block of memory for the functions and custom sections, and copy the data in place.
pub fn allocate(
&mut self,
registry: &mut UnwindRegistry,
functions: &[&FunctionBody],
executable_sections: &[&CustomSection],
data_sections: &[&CustomSection],
) -> Result<(Vec<&mut [VMFunctionBody]>, Vec<&mut [u8]>, Vec<&mut [u8]>), String> {
let mut function_result = vec![];
let mut data_section_result = vec![];
let mut executable_section_result = vec![];
let page_size = region::page::size();
// 1. Calculate the total size, that is:
// - function body size, including all trampolines
// -- windows unwind info
// -- padding between functions
// - executable section body
// -- padding between executable sections
// - padding until a new page to change page permissions
// - data section body size
// -- padding between data sections
let total_len = round_up(
functions.iter().fold(0, |acc, func| {
round_up(
acc + Self::function_allocation_size(func),
ARCH_FUNCTION_ALIGNMENT,
)
}) + executable_sections.iter().fold(0, |acc, exec| {
round_up(acc + exec.bytes.len(), ARCH_FUNCTION_ALIGNMENT)
}),
page_size,
) + data_sections.iter().fold(0, |acc, data| {
round_up(acc + data.bytes.len(), DATA_SECTION_ALIGNMENT)
});
// 2. Allocate the pages. Mark them all read-write.
self.mmap = Mmap::with_at_least(total_len)?;
// 3. Determine where the pointers to each function, executable section
// or data section are. Copy the functions. Collect the addresses of each and return them.
let mut bytes = 0;
let mut buf = self.mmap.as_mut_slice();
for func in functions {
let len = round_up(
Self::function_allocation_size(func),
ARCH_FUNCTION_ALIGNMENT,
);
let (func_buf, next_buf) = buf.split_at_mut(len);
buf = next_buf;
bytes += len;
let vmfunc = Self::copy_function(registry, func, func_buf);
assert!(vmfunc as *mut _ as *mut u8 as usize % ARCH_FUNCTION_ALIGNMENT == 0);
function_result.push(vmfunc);
}
for section in executable_sections {
let section = &section.bytes;
assert!(buf.as_mut_ptr() as *mut _ as *mut u8 as usize % ARCH_FUNCTION_ALIGNMENT == 0);
let len = round_up(section.len(), ARCH_FUNCTION_ALIGNMENT);
let (s, next_buf) = buf.split_at_mut(len);
buf = next_buf;
bytes += len;
s[..section.len()].copy_from_slice(section.as_slice());
executable_section_result.push(s);
}
self.start_of_nonexecutable_pages = bytes;
if !data_sections.is_empty() {
// Data sections have different page permissions from the executable
// code that came before it, so they need to be on different pages.
let padding = round_up(bytes, page_size) - bytes;
buf = buf.split_at_mut(padding).1;
for section in data_sections {
let section = &section.bytes;
assert!(
buf.as_mut_ptr() as *mut _ as *mut u8 as usize % DATA_SECTION_ALIGNMENT == 0
);
let len = round_up(section.len(), DATA_SECTION_ALIGNMENT);
let (s, next_buf) = buf.split_at_mut(len);
buf = next_buf;
s[..section.len()].copy_from_slice(section.as_slice());
data_section_result.push(s);
}
}
Ok((
function_result,
executable_section_result,
data_section_result,
))
}
/// Publish the unwind registry into code memory.
pub(crate) fn publish_unwind_registry(&mut self, unwind_registry: Arc<UnwindRegistry>) {
self.unwind_registries.push(unwind_registry);
}
/// Allocate a continuous memory block for a compilation.
///
/// Allocates memory for both the function bodies as well as function unwind data.
pub fn allocate_functions<K>(
&mut self,
registry: &mut UnwindRegistry,
compilation: &PrimaryMap<K, FunctionBody>,
) -> Result<PrimaryMap<K, FunctionBodyPtr>, String>
where
K: EntityRef,
{
let total_len = compilation.values().fold(0, |acc, func| {
acc + get_align_padding_size(acc, ARCH_FUNCTION_ALIGNMENT)
+ Self::function_allocation_size(func)
});
let (mut buf, start) = self.allocate(total_len, ARCH_FUNCTION_ALIGNMENT)?;
let base_address = buf.as_ptr() as usize - start;
let mut result = PrimaryMap::with_capacity(compilation.len());
let mut start = start as u32;
let mut padding = 0usize;
for func in compilation.values() {
let (next_start, next_buf, vmfunc) = Self::copy_function(
registry,
base_address,
func,
start + padding as u32,
&mut buf[padding..],
);
assert!(vmfunc as *mut _ as *mut u8 as usize % ARCH_FUNCTION_ALIGNMENT == 0);
result.push(FunctionBodyPtr(vmfunc as *mut [VMFunctionBody]));
padding = get_align_padding_size(next_start as usize, ARCH_FUNCTION_ALIGNMENT);
start = next_start;
buf = next_buf;
}
Ok(result)
}
/// Allocate a continuous memory block for a single compiled function.
/// TODO: Reorganize the code that calls this to emit code directly into the
/// mmap region rather than into a Vec that we need to copy in.
pub fn allocate_for_function(
&mut self,
registry: &mut UnwindRegistry,
func: &FunctionBody,
) -> Result<&mut [VMFunctionBody], String> {
let size = Self::function_allocation_size(func);
let (buf, start) = self.allocate(size, ARCH_FUNCTION_ALIGNMENT)?;
let base_address = buf.as_ptr() as usize - start;
let (_, _, vmfunc) = Self::copy_function(registry, base_address, func, start as u32, buf);
assert!(vmfunc as *mut _ as *mut u8 as usize % ARCH_FUNCTION_ALIGNMENT == 0);
Ok(vmfunc)
}
/// Allocate a continuous memory block for an executable custom section.
pub fn allocate_for_executable_custom_section(
&mut self,
section: &SectionBody,
) -> Result<&mut [u8], String> {
let section = section.as_slice();
let (buf, _) = self.allocate(section.len(), ARCH_FUNCTION_ALIGNMENT)?;
buf.copy_from_slice(section);
Ok(buf)
}
/// Allocate a continuous memory block for a readable custom section.
pub fn allocate_for_custom_section(
&mut self,
section: &SectionBody,
) -> Result<&mut [u8], String> {
let section = section.as_slice().to_vec();
self.read_sections.push(section);
Ok(self
.read_sections
.last_mut()
.ok_or_else(|| "Can't get last section".to_string())?)
}
/// Make all allocated memory executable.
/// Apply the page permissions.
pub fn publish(&mut self) {
self.push_current(0)
.expect("failed to push current memory map");
for CodeMemoryEntry { mmap: m } in &mut self.entries[self.published..] {
if !m.is_empty() {
unsafe {
region::protect(m.as_mut_ptr(), m.len(), region::Protection::READ_EXECUTE)
}
.expect("unable to make memory readonly and executable");
}
if self.mmap.is_empty() || self.start_of_nonexecutable_pages == 0 {
return;
}
self.published = self.entries.len();
}
/// Allocate `size` bytes of memory which can be made executable later by
/// calling `publish()`. Note that we allocate the memory as writeable so
/// that it can be written to and patched, though we make it readonly before
/// actually executing from it.
///
/// A few values are returned:
///
/// * A mutable slice which references the allocated memory
/// * A function table instance where unwind information is registered
/// * The offset within the current mmap that the slice starts at
fn allocate(&mut self, size: usize, alignment: usize) -> Result<(&mut [u8], usize), String> {
assert!(alignment > 0);
let align_padding = get_align_padding_size(self.position, alignment);
let padded_size = size + align_padding;
let old_position;
if self.current.mmap.len() - self.position < padded_size {
// If we are allocating a new region, then it is already aligned to page boundary - no need to apply padding here.
self.push_current(cmp::max(0x10000, size))?;
old_position = 0;
self.position += size;
} else {
// Otherwise, apply padding.
old_position = self.position + align_padding;
self.position += padded_size;
assert!(self.mmap.len() >= self.start_of_nonexecutable_pages);
unsafe {
region::protect(
self.mmap.as_mut_ptr(),
self.start_of_nonexecutable_pages,
region::Protection::READ_EXECUTE,
)
}
assert!(old_position % alignment == 0);
Ok((
&mut self.current.mmap.as_mut_slice()[old_position..self.position],
old_position,
))
.expect("unable to make memory readonly and executable");
}
/// Calculates the allocation size of the given compiled function.
@@ -224,41 +173,35 @@ impl CodeMemory {
/// This will also add the function to the current function table.
fn copy_function<'a>(
registry: &mut UnwindRegistry,
base_address: usize,
func: &FunctionBody,
func_start: u32,
buf: &'a mut [u8],
) -> (u32, &'a mut [u8], &'a mut [VMFunctionBody]) {
assert!((func_start as usize) % ARCH_FUNCTION_ALIGNMENT == 0);
) -> &'a mut [VMFunctionBody] {
assert!((buf.as_ptr() as usize) % ARCH_FUNCTION_ALIGNMENT == 0);
let func_len = func.body.len();
let mut func_end = func_start + (func_len as u32);
let (body, mut remainder) = buf.split_at_mut(func_len);
let (body, remainder) = buf.split_at_mut(func_len);
body.copy_from_slice(&func.body);
let vmfunc = Self::view_as_mut_vmfunc_slice(body);
if let Some(CompiledFunctionUnwindInfo::WindowsX64(info)) = &func.unwind_info {
// Windows unwind information is written following the function body
// Keep unwind information 32-bit aligned (round up to the nearest 4 byte boundary)
let unwind_start = (func_end + 3) & !3;
let unwind_start = (func_len + 3) & !3;
let unwind_size = info.len();
let padding = (unwind_start - func_end) as usize;
assert_eq!((func_start as usize + func_len + padding) % 4, 0);
let (slice, r) = remainder.split_at_mut(padding + unwind_size);
let padding = unwind_start - func_len;
assert_eq!((func_len + padding) % 4, 0);
let slice = remainder.split_at_mut(padding + unwind_size).0;
slice[padding..].copy_from_slice(&info);
// println!("Info {:?} (func_len: {}, padded: {})", info, func_len, padding);
func_end = unwind_start + (unwind_size as u32);
remainder = r;
}
if let Some(info) = &func.unwind_info {
registry
.register(base_address, func_start, func_len as u32, info)
.register(vmfunc.as_ptr() as usize, 0, func_len as u32, info)
.expect("failed to register unwind information");
}
(func_end, remainder, vmfunc)
vmfunc
}
/// Convert mut a slice from u8 to VMFunctionBody.
@@ -267,34 +210,11 @@ impl CodeMemory {
let body_ptr = byte_ptr as *mut [VMFunctionBody];
unsafe { &mut *body_ptr }
}
/// Pushes the current Mmap and allocates a new Mmap of the given size.
fn push_current(&mut self, new_size: usize) -> Result<(), String> {
let previous = mem::replace(
&mut self.current,
if new_size == 0 {
CodeMemoryEntry::new()
} else {
CodeMemoryEntry::with_capacity(cmp::max(0x10000, new_size))?
},
);
if !previous.mmap.is_empty() {
self.entries.push(previous);
}
self.position = 0;
Ok(())
}
}
/// Calculates the minimum number of padding bytes required to fulfill `alignment`.
fn get_align_padding_size(position: usize, alignment: usize) -> usize {
match position % alignment {
0 => 0,
x => alignment - x,
}
fn round_up(size: usize, multiple: usize) -> usize {
debug_assert!(multiple.is_power_of_two());
(size + (multiple - 1)) & !(multiple - 1)
}
#[cfg(test)]

View File

@@ -14,8 +14,8 @@ use wasmer_types::entity::PrimaryMap;
use wasmer_types::Features;
use wasmer_types::{FunctionIndex, FunctionType, LocalFunctionIndex, SignatureIndex};
use wasmer_vm::{
FunctionBodyPtr, ModuleInfo, SignatureRegistry, VMFunctionBody, VMSharedSignatureIndex,
VMTrampoline,
FunctionBodyPtr, ModuleInfo, SectionBodyPtr, SignatureRegistry, VMFunctionBody,
VMSharedSignatureIndex, VMTrampoline,
};
/// A WebAssembly `JIT` Engine.
@@ -35,7 +35,7 @@ impl JITEngine {
inner: Arc::new(Mutex::new(JITEngineInner {
compiler: Some(compiler),
function_call_trampolines: HashMap::new(),
code_memory: CodeMemory::new(),
code_memory: vec![],
signatures: SignatureRegistry::new(),
features,
})),
@@ -63,7 +63,7 @@ impl JITEngine {
#[cfg(feature = "compiler")]
compiler: None,
function_call_trampolines: HashMap::new(),
code_memory: CodeMemory::new(),
code_memory: vec![],
signatures: SignatureRegistry::new(),
features: Features::default(),
})),
@@ -157,7 +157,7 @@ pub struct JITEngineInner {
features: Features,
/// The code memory is responsible of publishing the compiled
/// functions to memory.
code_memory: CodeMemory,
code_memory: Vec<CodeMemory>,
/// The signature registry is used mainly to operate with trampolines
/// performantly.
signatures: SignatureRegistry,
@@ -193,38 +193,6 @@ impl JITEngineInner {
&self.features
}
/// Allocate custom sections into memory
pub(crate) fn allocate_custom_sections(
&mut self,
custom_sections: &PrimaryMap<SectionIndex, CustomSection>,
) -> Result<PrimaryMap<SectionIndex, *const u8>, CompileError> {
let mut result = PrimaryMap::with_capacity(custom_sections.len());
for (_, section) in custom_sections.iter() {
let buffer: &[u8] = match section.protection {
CustomSectionProtection::Read => self
.code_memory
.allocate_for_custom_section(&section.bytes)
.map_err(|message| {
CompileError::Resource(format!(
"failed to allocate readable memory for custom section: {}",
message
))
})?,
CustomSectionProtection::ReadExecute => self
.code_memory
.allocate_for_executable_custom_section(&section.bytes)
.map_err(|message| {
CompileError::Resource(format!(
"failed to allocate executable memory for custom section: {}",
message
))
})?,
};
result.push(buffer.as_ptr());
}
Ok(result)
}
/// Allocate compiled functions into memory
#[allow(clippy::type_complexity)]
pub(crate) fn allocate(
@@ -234,89 +202,105 @@ impl JITEngineInner {
functions: &PrimaryMap<LocalFunctionIndex, FunctionBody>,
function_call_trampolines: &PrimaryMap<SignatureIndex, FunctionBody>,
dynamic_function_trampolines: &PrimaryMap<FunctionIndex, FunctionBody>,
custom_sections: &PrimaryMap<SectionIndex, CustomSection>,
) -> Result<
(
PrimaryMap<LocalFunctionIndex, FunctionBodyPtr>,
PrimaryMap<SignatureIndex, FunctionBodyPtr>,
PrimaryMap<FunctionIndex, FunctionBodyPtr>,
PrimaryMap<SectionIndex, SectionBodyPtr>,
),
CompileError,
> {
// Allocate all of the compiled functions into executable memory,
// copying over their contents.
let allocated_functions = self
.code_memory
.allocate_functions(registry, &functions)
.map_err(|message| {
CompileError::Resource(format!(
"failed to allocate memory for functions: {}",
message
))
})?;
let function_bodies = functions
.values()
.chain(function_call_trampolines.values())
.chain(dynamic_function_trampolines.values())
.collect::<Vec<_>>();
let (executable_sections, data_sections): (Vec<_>, _) = custom_sections
.values()
.partition(|section| section.protection == CustomSectionProtection::ReadExecute);
self.code_memory.push(CodeMemory::new());
let mut allocated_function_call_trampolines: PrimaryMap<SignatureIndex, FunctionBodyPtr> =
PrimaryMap::new();
// let (indices, compiled_functions): (Vec<VMSharedSignatureIndex>, PrimaryMap<FunctionIndex, FunctionBody>) = function_call_trampolines.iter().map(|(sig_index, compiled_function)| {
// let func_type = module.signatures.get(sig_index).unwrap();
// let index = self.signatures.register(&func_type);
// (index, compiled_function)
// }).filter(|(index, _)| {
// !self.function_call_trampolines.contains_key(index)
// }).unzip();
for (sig_index, compiled_function) in function_call_trampolines.iter() {
let func_type = module.signatures.get(sig_index).unwrap();
let index = self.signatures.register(&func_type);
// if self.function_call_trampolines.contains_key(&index) {
// // We don't need to allocate the trampoline in case
// // it's signature is already allocated.
// continue;
// }
let ptr = self
.code_memory
.allocate_for_function(registry, &compiled_function)
let (mut allocated_functions, allocated_executable_sections, allocated_data_sections) =
self.code_memory
.last_mut()
.unwrap()
.allocate(
registry,
function_bodies.as_slice(),
executable_sections.as_slice(),
data_sections.as_slice(),
)
.map_err(|message| {
CompileError::Resource(format!(
"failed to allocate memory for function call trampolines: {}",
"failed to allocate memory for functions: {}",
message
))
})?;
allocated_function_call_trampolines.push(FunctionBodyPtr(ptr));
let trampoline =
unsafe { std::mem::transmute::<*const VMFunctionBody, VMTrampoline>(ptr.as_ptr()) };
let allocated_functions_result = allocated_functions
.drain(0..functions.len())
.map(|slice| FunctionBodyPtr(slice as *mut [_]))
.collect::<PrimaryMap<LocalFunctionIndex, _>>();
let mut allocated_function_call_trampolines: PrimaryMap<SignatureIndex, FunctionBodyPtr> =
PrimaryMap::new();
for (sig_index, _) in function_call_trampolines.iter() {
let func_type = module.signatures.get(sig_index).unwrap();
let index = self.signatures.register(&func_type);
let ptr = allocated_functions
.drain(0..1)
.map(|slice| FunctionBodyPtr(slice as *mut [_]))
.collect::<Vec<_>>()[0];
allocated_function_call_trampolines.push(ptr);
let trampoline = unsafe {
std::mem::transmute::<*const VMFunctionBody, VMTrampoline>((**ptr).as_ptr())
};
self.function_call_trampolines.insert(index, trampoline);
}
let allocated_dynamic_function_trampolines = dynamic_function_trampolines
.values()
.map(|compiled_function| {
let ptr = self
.code_memory
.allocate_for_function(registry, &compiled_function)
.map_err(|message| {
CompileError::Resource(format!(
"failed to allocate memory for dynamic function trampolines: {}",
message
))
})?;
Ok(FunctionBodyPtr(ptr as _))
let allocated_dynamic_function_trampolines = allocated_functions
.drain(..)
.map(|slice| FunctionBodyPtr(slice as *mut [_]))
.collect::<PrimaryMap<FunctionIndex, _>>();
let mut exec_iter = allocated_executable_sections.iter();
let mut data_iter = allocated_data_sections.iter();
let allocated_custom_sections = custom_sections
.iter()
.map(|(_, section)| {
SectionBodyPtr(
if section.protection == CustomSectionProtection::ReadExecute {
exec_iter.next()
} else {
data_iter.next()
}
.unwrap()
.as_ptr(),
)
})
.collect::<Result<PrimaryMap<FunctionIndex, _>, CompileError>>()?;
.collect::<PrimaryMap<SectionIndex, _>>();
Ok((
allocated_functions,
allocated_functions_result,
allocated_function_call_trampolines,
allocated_dynamic_function_trampolines,
allocated_custom_sections,
))
}
/// Make memory containing compiled code executable.
pub(crate) fn publish_compiled_code(&mut self) {
self.code_memory.publish();
self.code_memory.last_mut().unwrap().publish();
}
/// Publish the unwind registry into code memory.
pub(crate) fn publish_unwind_registry(&mut self, unwind_registry: Arc<UnwindRegistry>) {
self.code_memory.publish_unwind_registry(unwind_registry);
self.code_memory
.last_mut()
.unwrap()
.publish_unwind_registry(unwind_registry);
}
/// Shared signature registry.

View File

@@ -8,14 +8,14 @@ use wasmer_compiler::{
use wasmer_types::entity::{EntityRef, PrimaryMap};
use wasmer_types::LocalFunctionIndex;
use wasmer_vm::ModuleInfo;
use wasmer_vm::{FunctionBodyPtr, VMFunctionBody};
use wasmer_vm::{FunctionBodyPtr, SectionBodyPtr, VMFunctionBody};
fn apply_relocation(
body: usize,
r: &Relocation,
allocated_functions: &PrimaryMap<LocalFunctionIndex, FunctionBodyPtr>,
jt_offsets: &PrimaryMap<LocalFunctionIndex, JumpTableOffsets>,
allocated_sections: &PrimaryMap<SectionIndex, *const u8>,
allocated_sections: &PrimaryMap<SectionIndex, SectionBodyPtr>,
) {
let target_func_address: usize = match r.reloc_target {
RelocationTarget::LocalFunc(index) => {
@@ -24,7 +24,7 @@ fn apply_relocation(
}
RelocationTarget::LibCall(libcall) => libcall.function_pointer(),
RelocationTarget::CustomSection(custom_section) => {
allocated_sections[custom_section] as usize
*allocated_sections[custom_section] as usize
}
RelocationTarget::JumpTable(func_index, jt) => {
let offset = *jt_offsets
@@ -72,11 +72,11 @@ pub fn link_module(
allocated_functions: &PrimaryMap<LocalFunctionIndex, FunctionBodyPtr>,
jt_offsets: &PrimaryMap<LocalFunctionIndex, JumpTableOffsets>,
function_relocations: Relocations,
allocated_sections: &PrimaryMap<SectionIndex, *const u8>,
allocated_sections: &PrimaryMap<SectionIndex, SectionBodyPtr>,
section_relocations: &PrimaryMap<SectionIndex, Vec<Relocation>>,
) {
for (i, section_relocs) in section_relocations.iter() {
let body = allocated_sections[i] as usize;
let body = *allocated_sections[i] as usize;
for r in section_relocs {
apply_relocation(body, r, allocated_functions, jt_offsets, allocated_sections);
}

View File

@@ -77,3 +77,16 @@ unsafe impl Send for FunctionBodyPtr {}
/// # Safety
/// TODO:
unsafe impl Sync for FunctionBodyPtr {}
/// Pointers to section data.
#[derive(Clone, Copy, Debug)]
#[repr(transparent)]
pub struct SectionBodyPtr(pub *const u8);
impl std::ops::Deref for SectionBodyPtr {
type Target = *const u8;
fn deref(&self) -> &Self::Target {
&self.0
}
}