aligned region cache

This commit is contained in:
Masato Imai
2025-05-02 06:07:25 +00:00
parent c21da557e7
commit ce32d4b150
2 changed files with 82 additions and 37 deletions

View File

@ -1,4 +1,4 @@
use core::sync::atomic::AtomicU64;
use core::sync::atomic::{AtomicU64, Ordering};
use bootloader::bootinfo::{MemoryMap, MemoryRegionType};
use x86_64::{
@ -8,25 +8,52 @@ use x86_64::{
pub static PHYSICAL_MEMORY_OFFSET: AtomicU64 = AtomicU64::new(0);
const SIZE_2MIB: u64 = 0x20_0000;
const ALIGN_2MIB_MASK: u64 = SIZE_2MIB - 1;
pub struct BootInfoFrameAllocator {
memory_map: &'static MemoryMap,
next: usize,
aligned_regions: [(u64, u64); 32],
aligned_count: usize,
}
impl BootInfoFrameAllocator {
pub unsafe fn init(memory_map: &'static MemoryMap) -> Self {
Self {
let mut allocator = Self {
memory_map,
next: 0,
}
aligned_regions: [(0, 0); 32],
aligned_count: 0,
};
allocator.cache_aligned_regions();
allocator
}
fn usable_frames(&self) -> impl Iterator<Item = PhysFrame> {
let regions = self.memory_map.iter();
let usable_regions = regions.filter(|r| r.region_type == MemoryRegionType::Usable);
let addr_ranges = usable_regions.map(|r| r.range.start_addr()..r.range.end_addr());
let frame_addresses = addr_ranges.flat_map(|r| r.step_by(4096));
frame_addresses.map(|addr| PhysFrame::containing_address(PhysAddr::new(addr)))
fn cache_aligned_regions(&mut self) {
self.aligned_count = 0;
for region in self.memory_map.iter() {
if region.region_type != MemoryRegionType::Usable {
continue;
}
let start = region.range.start_addr();
let end = region.range.end_addr();
if end - start < SIZE_2MIB {
continue;
}
let aligned_start = (start + ALIGN_2MIB_MASK) & !ALIGN_2MIB_MASK;
if aligned_start + SIZE_2MIB <= end && self.aligned_count < self.aligned_regions.len() {
self.aligned_regions[self.aligned_count] = (aligned_start, end);
self.aligned_count += 1;
}
}
}
pub fn allocate_2mib_aligned(&mut self) -> Option<PhysAddr> {
@ -35,36 +62,54 @@ impl BootInfoFrameAllocator {
}
pub fn allocate_2mib_frame(&mut self) -> Option<PhysFrame<Size4KiB>> {
let mut frames = self.usable_frames().skip(self.next);
for i in 0..self.aligned_count {
let (start, end) = self.aligned_regions[i];
let base_frame = frames.find(|frame| frame.start_address().as_u64() & 0x1F_FFFF == 0)?;
if start + SIZE_2MIB <= end {
let frame = PhysFrame::containing_address(PhysAddr::new(start));
let base_idx = self.next
+ frames
.enumerate()
.find(|(_, frame)| frame.start_address() == base_frame.start_address())
.map(|(idx, _)| idx)
.unwrap_or(0);
self.aligned_regions[i].0 = start + SIZE_2MIB;
let frame_count = 512;
let frames_are_available = self
.usable_frames()
.skip(base_idx)
.take(frame_count)
.enumerate()
.all(|(idx, frame)| {
let expected_addr = base_frame.start_address().as_u64() + (idx as u64 * 4096);
frame.start_address().as_u64() == expected_addr
});
return Some(frame);
}
}
if !frames_are_available {
self.next = base_idx + 1;
if self.aligned_count == 0 {
self.cache_aligned_regions();
return self.allocate_2mib_frame();
}
self.next = base_idx + frame_count;
let mut frames = self.usable_frames().skip(self.next);
Some(base_frame)
let base_frame =
frames.find(|frame| frame.start_address().as_u64() & ALIGN_2MIB_MASK == 0)?;
let base_addr = base_frame.start_address().as_u64();
let is_continuous = self
.memory_map
.iter()
.filter(|r| r.region_type == MemoryRegionType::Usable)
.any(|r| {
let start = r.range.start_addr();
let end = r.range.end_addr();
base_addr >= start && base_addr + SIZE_2MIB <= end
});
if is_continuous {
self.next += 512;
return Some(base_frame);
}
None
}
fn usable_frames(&self) -> impl Iterator<Item = PhysFrame> {
let regions = self.memory_map.iter();
let usable_regions = regions.filter(|r| r.region_type == MemoryRegionType::Usable);
let addr_ranges = usable_regions.map(|r| r.range.start_addr()..r.range.end_addr());
let frame_addresses = addr_ranges.flat_map(|r| r.step_by(4096));
frame_addresses.map(|addr| PhysFrame::containing_address(PhysAddr::new(addr)))
}
}
@ -85,12 +130,12 @@ unsafe fn active_level_4_table(physical_memory_offset: VirtAddr) -> &'static mut
let virt = physical_memory_offset + phys.as_u64();
let page_table_ptr: *mut PageTable = virt.as_mut_ptr();
unsafe { &mut *page_table_ptr }
&mut *page_table_ptr
}
pub unsafe fn init(physical_memory_offset: VirtAddr) -> OffsetPageTable<'static> {
unsafe {
let level_4_table = active_level_4_table(physical_memory_offset);
OffsetPageTable::new(level_4_table, physical_memory_offset)
}
PHYSICAL_MEMORY_OFFSET.store(physical_memory_offset.as_u64(), Ordering::SeqCst);
let level_4_table = active_level_4_table(physical_memory_offset);
OffsetPageTable::new(level_4_table, physical_memory_offset)
}

View File

@ -122,7 +122,7 @@ impl VCpu {
primary_exec_ctrl.0 |= (reserved_bits & 0xFFFFFFFF) as u32;
primary_exec_ctrl.0 &= (reserved_bits >> 32) as u32;
primary_exec_ctrl.set_hlt(true);
primary_exec_ctrl.set_hlt(false);
primary_exec_ctrl.set_activate_secondary_controls(true);
primary_exec_ctrl.write();