diff --git a/src/allocator.rs b/src/allocator.rs index 06863b9..2f58c42 100644 --- a/src/allocator.rs +++ b/src/allocator.rs @@ -6,7 +6,7 @@ use x86_64::{ VirtAddr, }; -use crate::{info, println}; +use crate::info; pub const HEAP_START: usize = 0x_4444_4444_0000; pub const HEAP_SIZE: usize = 100 * 1024; diff --git a/src/main.rs b/src/main.rs index 5d3ab9d..64cd0e8 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,7 +7,6 @@ extern crate alloc; -use alloc::boxed::Box; use bootloader::{entry_point, BootInfo}; use core::arch::asm; use core::panic::PanicInfo; @@ -20,11 +19,9 @@ use nel_os::{ vmxon::Vmxon, }, }; -use x86::bits64::{paging::BASE_PAGE_SIZE, rflags}; -use x86_64::structures::paging::FrameAllocator; +use x86::bits64::rflags; use x86_64::{ registers::{control::Cr0Flags, segmentation::Segment}, - structures::paging::Translate, VirtAddr, }; @@ -54,16 +51,16 @@ fn kernel_main(boot_info: &'static BootInfo) -> ! { allocator::init_heap(&mut mapper, &mut frame_allocator).expect("heap initialization failed"); - let mut vmxon = unsafe { Box::::new_zeroed().assume_init() }; - if has_intel_cpu() && has_vmx_support() { info!("Intel CPU with VMX support detected"); } else { panic!("VMX not supported"); } - vmxon.init(); - vmxon.activate_vmxon(mapper).unwrap(); + let mut vmxon = Vmxon::new(&mut frame_allocator); + + vmxon.init(phys_mem_offset.as_u64()); + vmxon.activate_vmxon().unwrap(); info!("Checking vmlaunch requirements..."); { @@ -111,6 +108,8 @@ fn kernel_main(boot_info: &'static BootInfo) -> ! { asm!("vmlaunch"); } + info!("vmlaunch succeeded"); + #[cfg(test)] test_main(); diff --git a/src/vmm/vmxon.rs b/src/vmm/vmxon.rs index 29549c7..ec17688 100644 --- a/src/vmm/vmxon.rs +++ b/src/vmm/vmxon.rs @@ -1,23 +1,25 @@ -use core::arch::asm; -use x86::bits64::rflags::{self, RFlags}; use x86::bits64::vmx; use x86::controlregs::{cr4, Cr4}; -use x86::vmx::{Result, VmFail}; use x86::{msr, msr::rdmsr}; use x86_64::registers::control::Cr0; -use x86_64::structures::paging::{OffsetPageTable, Translate}; -use x86_64::VirtAddr; +use x86_64::structures::paging::{FrameAllocator, PhysFrame}; use crate::info; +use crate::memory::BootInfoFrameAllocator; #[repr(C, align(4096))] pub struct Vmxon { - pub revision_id: u32, - pub data: [u8; 4092], + frame: PhysFrame, } impl Vmxon { - pub fn check_vmxon_requirements(&mut self, mapper: OffsetPageTable) -> bool { + pub fn new(frame_allocator: &mut BootInfoFrameAllocator) -> Self { + let frame = frame_allocator.allocate_frame().unwrap(); + + Self { frame } + } + + pub fn check_vmxon_requirements(&mut self) -> bool { info!(""); info!("Checking VMXON requirements..."); let cr4 = unsafe { x86::controlregs::cr4() }; @@ -46,7 +48,7 @@ impl Vmxon { "IA32_VMX_CR0_FIXED0: {:#x}, IA32_VMX_CR0_FIXED1: {:#x}", ia32_vmx_cr0_fixed0, ia32_vmx_cr0_fixed1 ); - let cr0 = unsafe { Cr0::read_raw() }; + let cr0 = Cr0::read_raw(); info!("CR0: {:#x}", cr0); if (cr0 & ia32_vmx_cr0_fixed0) != ia32_vmx_cr0_fixed0 { info!("CR0 does not meet VMX requirements"); @@ -77,10 +79,7 @@ impl Vmxon { info!("CR4 meets VMX requirements"); // check self data(VMXON region) is aligned to 4K - let vmxon_region = mapper - .translate_addr(VirtAddr::from_ptr(&self)) - .unwrap() - .as_u64(); + let vmxon_region = self.frame.start_address().as_u64(); info!("VMXON region: {:#x}", vmxon_region); if vmxon_region & 0xFFF != 0 { info!("VMXON region is not aligned to 4K"); @@ -91,15 +90,15 @@ impl Vmxon { true } - pub fn zeroed() -> Self { - Vmxon { - revision_id: 0, - data: [0; 4092], - } - } + pub fn init(&mut self, phys_mem_offset: u64) { + let revision_id = unsafe { rdmsr(x86::msr::IA32_VMX_BASIC) } as u32; + let vmxon_region = self.frame.start_address().as_u64() + phys_mem_offset; + info!("VMXON region: {:#x}", vmxon_region); + info!("VMXON revision ID: {:#x}", revision_id); - pub fn init(&mut self) { - self.revision_id = unsafe { rdmsr(x86::msr::IA32_VMX_BASIC) } as u32; + unsafe { + core::ptr::write_volatile(vmxon_region as *mut u32, revision_id); + } } pub fn enable_vmx_operation() { @@ -138,58 +137,18 @@ impl Vmxon { unsafe { Cr0::write_raw(cr0) }; } - /*pub fn set_cr4_bits() { - let ia32_vmx_cr4_fixed0 = unsafe { x86::msr::rdmsr(x86::msr::IA32_VMX_CR4_FIXED0) }; - let ia32_vmx_cr4_fixed1 = unsafe { x86::msr::rdmsr(x86::msr::IA32_VMX_CR4_FIXED1) }; - - let mut cr4 = Cr4::read_raw(); - - cr4 |= ia32_vmx_cr4_fixed0; - cr4 &= ia32_vmx_cr4_fixed1; - - unsafe { - Cr4::write_raw(cr4); - } - }*/ - - fn vmx_capture_status() -> Result<()> { - let flags = rflags::read(); - info!("RFlags: {:?}", flags); - - if flags.contains(RFlags::FLAGS_ZF) { - Err(VmFail::VmFailValid) - } else if flags.contains(RFlags::FLAGS_CF) { - Err(VmFail::VmFailInvalid) - } else { - Ok(()) - } - } - - pub unsafe fn vmxon(&mut self) { - //asm!("vmxon ({0})", in(reg) addr, options(att_syntax)); - x86::bits64::vmx::vmxon(core::ptr::from_mut(self) as u64).unwrap() - } - - pub fn activate_vmxon(&mut self, mapper: OffsetPageTable) -> core::result::Result<(), ()> { + pub fn activate_vmxon(&mut self) -> core::result::Result<(), ()> { info!("activating vmxon..."); self.setup_vmxon()?; - info!("VMXON region at virtual address: {:p}", &self.data); - let phys_addr = mapper - .translate_addr(VirtAddr::from_ptr(&self)) - .unwrap() - .as_u64(); - info!("VMXON region at physical address: {:#x}", phys_addr); - info!("VMXON revision ID: {:#x}", self.revision_id); - if self.check_vmxon_requirements(mapper) { + if self.check_vmxon_requirements() { info!("VMXON requirements met"); } else { panic!("VMXON requirements not met"); } unsafe { - vmx::vmxon(phys_addr).unwrap(); - //self.vmxon(); + vmx::vmxon(self.frame.start_address().as_u64()).unwrap(); }; info!("vmxon success"); @@ -204,8 +163,7 @@ impl Vmxon { info!("Feature control MSR adjusted"); Vmxon::set_cr0_bits(); - //Vmxon::set_cr4_bits(); - info!("CR0 and CR4 bits set"); + info!("CR0 bits set"); Ok(()) }