mirror of
https://github.com/mii443/nel_os.git
synced 2025-08-22 16:15:38 +00:00
aligned region cache
This commit is contained in:
117
src/memory.rs
117
src/memory.rs
@ -1,4 +1,4 @@
|
|||||||
use core::sync::atomic::AtomicU64;
|
use core::sync::atomic::{AtomicU64, Ordering};
|
||||||
|
|
||||||
use bootloader::bootinfo::{MemoryMap, MemoryRegionType};
|
use bootloader::bootinfo::{MemoryMap, MemoryRegionType};
|
||||||
use x86_64::{
|
use x86_64::{
|
||||||
@ -8,25 +8,52 @@ use x86_64::{
|
|||||||
|
|
||||||
pub static PHYSICAL_MEMORY_OFFSET: AtomicU64 = AtomicU64::new(0);
|
pub static PHYSICAL_MEMORY_OFFSET: AtomicU64 = AtomicU64::new(0);
|
||||||
|
|
||||||
|
const SIZE_2MIB: u64 = 0x20_0000;
|
||||||
|
const ALIGN_2MIB_MASK: u64 = SIZE_2MIB - 1;
|
||||||
|
|
||||||
pub struct BootInfoFrameAllocator {
|
pub struct BootInfoFrameAllocator {
|
||||||
memory_map: &'static MemoryMap,
|
memory_map: &'static MemoryMap,
|
||||||
next: usize,
|
next: usize,
|
||||||
|
aligned_regions: [(u64, u64); 32],
|
||||||
|
aligned_count: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BootInfoFrameAllocator {
|
impl BootInfoFrameAllocator {
|
||||||
pub unsafe fn init(memory_map: &'static MemoryMap) -> Self {
|
pub unsafe fn init(memory_map: &'static MemoryMap) -> Self {
|
||||||
Self {
|
let mut allocator = Self {
|
||||||
memory_map,
|
memory_map,
|
||||||
next: 0,
|
next: 0,
|
||||||
}
|
aligned_regions: [(0, 0); 32],
|
||||||
|
aligned_count: 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
allocator.cache_aligned_regions();
|
||||||
|
|
||||||
|
allocator
|
||||||
}
|
}
|
||||||
|
|
||||||
fn usable_frames(&self) -> impl Iterator<Item = PhysFrame> {
|
fn cache_aligned_regions(&mut self) {
|
||||||
let regions = self.memory_map.iter();
|
self.aligned_count = 0;
|
||||||
let usable_regions = regions.filter(|r| r.region_type == MemoryRegionType::Usable);
|
|
||||||
let addr_ranges = usable_regions.map(|r| r.range.start_addr()..r.range.end_addr());
|
for region in self.memory_map.iter() {
|
||||||
let frame_addresses = addr_ranges.flat_map(|r| r.step_by(4096));
|
if region.region_type != MemoryRegionType::Usable {
|
||||||
frame_addresses.map(|addr| PhysFrame::containing_address(PhysAddr::new(addr)))
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let start = region.range.start_addr();
|
||||||
|
let end = region.range.end_addr();
|
||||||
|
|
||||||
|
if end - start < SIZE_2MIB {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let aligned_start = (start + ALIGN_2MIB_MASK) & !ALIGN_2MIB_MASK;
|
||||||
|
|
||||||
|
if aligned_start + SIZE_2MIB <= end && self.aligned_count < self.aligned_regions.len() {
|
||||||
|
self.aligned_regions[self.aligned_count] = (aligned_start, end);
|
||||||
|
self.aligned_count += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn allocate_2mib_aligned(&mut self) -> Option<PhysAddr> {
|
pub fn allocate_2mib_aligned(&mut self) -> Option<PhysAddr> {
|
||||||
@ -35,36 +62,54 @@ impl BootInfoFrameAllocator {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn allocate_2mib_frame(&mut self) -> Option<PhysFrame<Size4KiB>> {
|
pub fn allocate_2mib_frame(&mut self) -> Option<PhysFrame<Size4KiB>> {
|
||||||
let mut frames = self.usable_frames().skip(self.next);
|
for i in 0..self.aligned_count {
|
||||||
|
let (start, end) = self.aligned_regions[i];
|
||||||
|
|
||||||
let base_frame = frames.find(|frame| frame.start_address().as_u64() & 0x1F_FFFF == 0)?;
|
if start + SIZE_2MIB <= end {
|
||||||
|
let frame = PhysFrame::containing_address(PhysAddr::new(start));
|
||||||
|
|
||||||
let base_idx = self.next
|
self.aligned_regions[i].0 = start + SIZE_2MIB;
|
||||||
+ frames
|
|
||||||
.enumerate()
|
|
||||||
.find(|(_, frame)| frame.start_address() == base_frame.start_address())
|
|
||||||
.map(|(idx, _)| idx)
|
|
||||||
.unwrap_or(0);
|
|
||||||
|
|
||||||
let frame_count = 512;
|
return Some(frame);
|
||||||
let frames_are_available = self
|
}
|
||||||
.usable_frames()
|
}
|
||||||
.skip(base_idx)
|
|
||||||
.take(frame_count)
|
|
||||||
.enumerate()
|
|
||||||
.all(|(idx, frame)| {
|
|
||||||
let expected_addr = base_frame.start_address().as_u64() + (idx as u64 * 4096);
|
|
||||||
frame.start_address().as_u64() == expected_addr
|
|
||||||
});
|
|
||||||
|
|
||||||
if !frames_are_available {
|
if self.aligned_count == 0 {
|
||||||
self.next = base_idx + 1;
|
self.cache_aligned_regions();
|
||||||
return self.allocate_2mib_frame();
|
return self.allocate_2mib_frame();
|
||||||
}
|
}
|
||||||
|
|
||||||
self.next = base_idx + frame_count;
|
let mut frames = self.usable_frames().skip(self.next);
|
||||||
|
|
||||||
Some(base_frame)
|
let base_frame =
|
||||||
|
frames.find(|frame| frame.start_address().as_u64() & ALIGN_2MIB_MASK == 0)?;
|
||||||
|
|
||||||
|
let base_addr = base_frame.start_address().as_u64();
|
||||||
|
|
||||||
|
let is_continuous = self
|
||||||
|
.memory_map
|
||||||
|
.iter()
|
||||||
|
.filter(|r| r.region_type == MemoryRegionType::Usable)
|
||||||
|
.any(|r| {
|
||||||
|
let start = r.range.start_addr();
|
||||||
|
let end = r.range.end_addr();
|
||||||
|
base_addr >= start && base_addr + SIZE_2MIB <= end
|
||||||
|
});
|
||||||
|
|
||||||
|
if is_continuous {
|
||||||
|
self.next += 512;
|
||||||
|
return Some(base_frame);
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
fn usable_frames(&self) -> impl Iterator<Item = PhysFrame> {
|
||||||
|
let regions = self.memory_map.iter();
|
||||||
|
let usable_regions = regions.filter(|r| r.region_type == MemoryRegionType::Usable);
|
||||||
|
let addr_ranges = usable_regions.map(|r| r.range.start_addr()..r.range.end_addr());
|
||||||
|
let frame_addresses = addr_ranges.flat_map(|r| r.step_by(4096));
|
||||||
|
frame_addresses.map(|addr| PhysFrame::containing_address(PhysAddr::new(addr)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -85,12 +130,12 @@ unsafe fn active_level_4_table(physical_memory_offset: VirtAddr) -> &'static mut
|
|||||||
let virt = physical_memory_offset + phys.as_u64();
|
let virt = physical_memory_offset + phys.as_u64();
|
||||||
let page_table_ptr: *mut PageTable = virt.as_mut_ptr();
|
let page_table_ptr: *mut PageTable = virt.as_mut_ptr();
|
||||||
|
|
||||||
unsafe { &mut *page_table_ptr }
|
&mut *page_table_ptr
|
||||||
}
|
}
|
||||||
|
|
||||||
pub unsafe fn init(physical_memory_offset: VirtAddr) -> OffsetPageTable<'static> {
|
pub unsafe fn init(physical_memory_offset: VirtAddr) -> OffsetPageTable<'static> {
|
||||||
unsafe {
|
PHYSICAL_MEMORY_OFFSET.store(physical_memory_offset.as_u64(), Ordering::SeqCst);
|
||||||
let level_4_table = active_level_4_table(physical_memory_offset);
|
|
||||||
OffsetPageTable::new(level_4_table, physical_memory_offset)
|
let level_4_table = active_level_4_table(physical_memory_offset);
|
||||||
}
|
OffsetPageTable::new(level_4_table, physical_memory_offset)
|
||||||
}
|
}
|
||||||
|
@ -122,7 +122,7 @@ impl VCpu {
|
|||||||
|
|
||||||
primary_exec_ctrl.0 |= (reserved_bits & 0xFFFFFFFF) as u32;
|
primary_exec_ctrl.0 |= (reserved_bits & 0xFFFFFFFF) as u32;
|
||||||
primary_exec_ctrl.0 &= (reserved_bits >> 32) as u32;
|
primary_exec_ctrl.0 &= (reserved_bits >> 32) as u32;
|
||||||
primary_exec_ctrl.set_hlt(true);
|
primary_exec_ctrl.set_hlt(false);
|
||||||
primary_exec_ctrl.set_activate_secondary_controls(true);
|
primary_exec_ctrl.set_activate_secondary_controls(true);
|
||||||
|
|
||||||
primary_exec_ctrl.write();
|
primary_exec_ctrl.write();
|
||||||
|
Reference in New Issue
Block a user