This commit is contained in:
Masato Imai
2025-05-02 08:41:57 +00:00
parent ce32d4b150
commit 7168cd0826
3 changed files with 377 additions and 18 deletions

View File

@ -132,6 +132,35 @@ impl EPT {
Some(lv2_entry.address().as_u64())
}
pub fn set(&mut self, gpa: u64, value: u8) -> Result<(), &'static str> {
let hpa = self
.get_phys_addr(gpa)
.ok_or("Failed to get physical address")?;
let phys_addr_offset = memory::PHYSICAL_MEMORY_OFFSET.load(Ordering::Relaxed);
let hpa = hpa + phys_addr_offset;
let guest_memory = unsafe { &mut *(hpa as *mut [u8; 0x100000]) };
let offset = (gpa & 0xFFFFF) as usize;
guest_memory[offset] = value;
Ok(())
}
pub fn set_range(
&mut self,
gpa_start: u64,
gpa_end: u64,
value: u8,
) -> Result<(), &'static str> {
let mut gpa = gpa_start;
while gpa <= gpa_end {
self.set(gpa, value)?;
gpa += 1;
}
Ok(())
}
}
bitfield! {

View File

@ -1 +1,258 @@
use core::ptr::read_unaligned;
pub const BZIMAGE: &'static [u8] = include_bytes!("../../bzImage");
pub const LAYOUT_BOOTPARAM: u64 = 0x0001_0000;
pub const LAYOUT_CMDLINE: u64 = 0x0002_0000;
pub const LAYOUT_KERNEL_BASE: u64 = 0x0010_0000;
pub const LAYOUT_INITRD: u64 = 0x0600_0000;
#[repr(C, packed)]
#[derive(Debug, Clone, Copy)]
pub struct BootParams {
pub _screen_info: [u8; 0x40],
pub _apm_bios_info: [u8; 0x14],
pub _pad2: [u8; 4],
pub tboot_addr: u64,
pub ist_info: [u8; 0x10],
pub _pad3: [u8; 0x10],
pub hd0_info: [u8; 0x10],
pub hd1_info: [u8; 0x10],
pub _sys_desc_table: [u8; 0x10],
pub _olpc_ofw_header: [u8; 0x10],
pub _pad4: [u8; 0x80],
pub _edid_info: [u8; 0x80],
pub _efi_info: [u8; 0x20],
pub alt_mem_k: u32,
pub scratch: u32,
pub e820_entries: u8,
pub eddbuf_entries: u8,
pub edd_mbr_sig_buf_entries: u8,
pub kbd_status: u8,
pub _pad6: [u8; 5],
pub hdr: SetupHeader,
pub _pad7: [u8; 0x290 - SetupHeader::HEADER_OFFSET - size_of::<SetupHeader>()],
pub _edd_mbr_sig_buffer: [u32; 0x10],
pub e820_map: [E820Entry; Self::E820MAX],
pub _unimplemented: [u8; 0x330],
}
impl BootParams {
pub const E820MAX: usize = 128;
pub fn from_bytes(bytes: &[u8]) -> Result<Self, &'static str> {
if bytes.len() < size_of::<Self>() {
return Err("バイト配列が小さすぎます");
}
unsafe {
let boot_params_ptr = bytes.as_ptr() as *const Self;
Ok(read_unaligned(boot_params_ptr))
}
}
pub fn add_e820_entry(&mut self, addr: u64, size: u64, type_: E820Type) {
self.e820_map[self.e820_entries as usize].addr = addr;
self.e820_map[self.e820_entries as usize].size = size;
self.e820_map[self.e820_entries as usize].type_ = type_ as u32;
self.e820_entries += 1;
}
}
#[repr(C, packed)]
#[derive(Debug, Clone, Copy)]
pub struct SetupHeader {
pub setup_sects: u8,
pub root_flags: u16,
pub syssize: u32,
pub ram_size: u16,
pub vid_mode: u16,
pub root_dev: u16,
pub boot_flag: u16,
pub jump: u16,
pub header: u32,
pub version: u16,
pub realmode_switch: u32,
pub start_sys_seg: u16,
pub kernel_version: u16,
pub type_of_loader: u8,
pub loadflags: LoadflagBitfield,
pub setup_move_size: u16,
pub code32_start: u32,
pub ramdisk_image: u32,
pub ramdisk_size: u32,
pub bootsect_kludge: u32,
pub heap_end_ptr: u16,
pub ext_loader_ver: u8,
pub ext_loader_type: u8,
pub cmd_line_ptr: u32,
pub initrd_addr_max: u32,
pub kernel_alignment: u32,
pub relocatable_kernel: u8,
pub min_alignment: u8,
pub xloadflags: u16,
pub cmdline_size: u32,
pub hardware_subarch: u32,
pub hardware_subarch_data: u64,
pub payload_offset: u32,
pub payload_length: u32,
pub setup_data: u64,
pub pref_address: u64,
pub init_size: u32,
pub handover_offset: u32,
pub kernel_info_offset: u32,
}
impl SetupHeader {
pub const HEADER_OFFSET: usize = 0x1F1;
pub fn from_bytes(bytes: &[u8]) -> Result<Self, &'static str> {
if bytes.len() < Self::HEADER_OFFSET + size_of::<Self>() {
return Err("バイト配列が小さすぎます");
}
let mut header = unsafe {
let header_ptr = bytes.as_ptr().add(Self::HEADER_OFFSET) as *const Self;
read_unaligned(header_ptr)
};
if header.setup_sects == 0 {
header.setup_sects = 4;
}
Ok(header)
}
pub fn get_protected_code_offset(&self) -> usize {
(self.setup_sects as usize + 1) * 512
}
}
#[repr(C, packed)]
#[derive(Debug, Clone, Copy)]
pub struct LoadflagBitfield {
raw: u8,
}
impl LoadflagBitfield {
pub fn loaded_high(&self) -> bool {
(self.raw & 0x01) != 0
}
pub fn set_loaded_high(&mut self, loaded_high: bool) {
if loaded_high {
self.raw |= 0x01;
} else {
self.raw &= !0x01;
}
}
pub fn kaslr_flag(&self) -> bool {
(self.raw & 0x02) != 0
}
pub fn quiet_flag(&self) -> bool {
(self.raw & 0x20) != 0
}
pub fn keep_segments(&self) -> bool {
(self.raw & 0x40) != 0
}
pub fn set_keep_segments(&mut self, keep_segments: bool) {
if keep_segments {
self.raw |= 0x40;
} else {
self.raw &= !0x40;
}
}
pub fn can_use_heap(&self) -> bool {
(self.raw & 0x80) != 0
}
pub fn set_can_use_heap(&mut self, can_use_heap: bool) {
if can_use_heap {
self.raw |= 0x80;
} else {
self.raw &= !0x80;
}
}
pub fn new(
loaded_high: bool,
kaslr_flag: bool,
quiet_flag: bool,
keep_segments: bool,
can_use_heap: bool,
) -> Self {
let mut raw = 0u8;
if loaded_high {
raw |= 0x01;
}
if kaslr_flag {
raw |= 0x02;
}
if quiet_flag {
raw |= 0x20;
}
if keep_segments {
raw |= 0x40;
}
if can_use_heap {
raw |= 0x80;
}
Self { raw }
}
pub fn to_u8(&self) -> u8 {
self.raw
}
}
#[repr(C, packed)]
#[derive(Debug, Clone, Copy)]
pub struct E820Entry {
addr: u64,
size: u64,
type_: u32,
}
impl E820Entry {
pub fn get_addr(&self) -> u64 {
self.addr
}
pub fn get_size(&self) -> u64 {
self.size
}
pub fn get_type(&self) -> Result<E820Type, &'static str> {
match self.type_ {
1 => Ok(E820Type::Ram),
2 => Ok(E820Type::Reserved),
3 => Ok(E820Type::Acpi),
4 => Ok(E820Type::Nvs),
5 => Ok(E820Type::Unusable),
_ => Err("不明なE820タイプ"),
}
}
pub fn new(addr: u64, size: u64, type_: E820Type) -> Self {
Self {
addr,
size,
type_: type_ as u32,
}
}
}
#[repr(u32)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum E820Type {
Ram = 1,
Reserved = 2,
Acpi = 3,
Nvs = 4,
Unusable = 5,
}

View File

@ -1,5 +1,5 @@
use x86::{
bits64::vmx::vmwrite,
bits64::vmx::{vmread, vmwrite},
controlregs::{cr0, cr3, cr4, Cr0},
dtables::{self, DescriptorTablePointer},
msr::{rdmsr, IA32_EFER, IA32_FS_BASE},
@ -12,6 +12,7 @@ use core::sync::atomic::Ordering;
use crate::{
info,
memory::{self, BootInfoFrameAllocator},
serial_print,
vmm::vmcs::{
DescriptorType, EntryControls, Granularity, PrimaryExitControls,
PrimaryProcessorBasedVmExecutionControls, SecondaryProcessorBasedVmExecutionControls,
@ -21,6 +22,7 @@ use crate::{
use super::{
ept::{EPT, EPTP},
linux::{self, BootParams, E820Type},
register::GuestRegisters,
vmcs::{InstructionError, PinBasedVmExecutionControls, Vmcs},
vmxon::Vmxon,
@ -73,8 +75,70 @@ impl VCpu {
self.setup_guest_memory(frame_allocator);
}
pub fn load_kernel(&mut self, kernel: &[u8]) {
info!("Loading kernel into guest memory");
let guest_mem_size = 100 * 1024 * 1024;
let mut bp = BootParams::from_bytes(kernel).unwrap();
bp.e820_entries = 0;
bp.hdr.type_of_loader = 0xFF;
bp.hdr.ext_loader_ver = 0;
bp.hdr.loadflags.set_loaded_high(true);
bp.hdr.loadflags.set_can_use_heap(true);
bp.hdr.heap_end_ptr = (linux::LAYOUT_BOOTPARAM - 0x200) as u16;
bp.hdr.loadflags.set_keep_segments(true);
bp.hdr.cmd_line_ptr = linux::LAYOUT_CMDLINE as u32;
bp.hdr.vid_mode = 0xFFFF;
bp.add_e820_entry(0, linux::LAYOUT_KERNEL_BASE, E820Type::Ram);
bp.add_e820_entry(
linux::LAYOUT_KERNEL_BASE,
guest_mem_size - linux::LAYOUT_KERNEL_BASE,
E820Type::Ram,
);
let cmdline_max_size = if bp.hdr.cmdline_size < 256 {
bp.hdr.cmdline_size
} else {
256
};
let cmdline_start = linux::LAYOUT_CMDLINE as u64;
let cmdline_end = cmdline_start + cmdline_max_size as u64;
self.ept.set_range(cmdline_start, cmdline_end, 0).unwrap();
let cmdline_val = "console=ttyS0 earlyprintk=serial nokaslr";
let cmdline_bytes = cmdline_val.as_bytes();
for (i, &byte) in cmdline_bytes.iter().enumerate() {
self.ept.set(cmdline_start + i as u64, byte).unwrap();
}
let bp_bytes = unsafe {
core::slice::from_raw_parts(
&bp as *const BootParams as *const u8,
core::mem::size_of::<BootParams>(),
)
};
self.load_image(bp_bytes, linux::LAYOUT_BOOTPARAM as usize);
let code_offset = bp.hdr.get_protected_code_offset();
let code_size = kernel.len() - code_offset;
self.load_image(
&kernel[code_offset..code_offset + code_size],
linux::LAYOUT_KERNEL_BASE as usize,
);
info!("Kernel loaded into guest memory");
}
pub fn load_image(&mut self, image: &[u8], addr: usize) {
for (i, &byte) in image.iter().enumerate() {
let gpa = addr + i;
self.ept.set(gpa as u64, byte).unwrap();
}
}
pub fn setup_guest_memory(&mut self, frame_allocator: &mut BootInfoFrameAllocator) {
let mut pages = 25;
let mut pages = 100;
let mut gpa = 0;
info!("Setting up guest memory...");
@ -90,6 +154,18 @@ impl VCpu {
}
info!("Guest memory setup complete");
self.load_kernel(linux::BZIMAGE);
unsafe {
let phys_mem_offset = memory::PHYSICAL_MEMORY_OFFSET.load(Ordering::Relaxed);
let hpa = self
.ept
.get_phys_addr(linux::LAYOUT_BOOTPARAM as u64)
.unwrap()
+ phys_mem_offset;
let bp = &*(hpa as *const BootParams);
info!("{:?}", *bp);
}
let eptp = EPTP::new(&self.ept.root_table);
unsafe { vmwrite(vmcs::control::EPTP_FULL, eptp.0).unwrap() };
}
@ -122,7 +198,7 @@ impl VCpu {
primary_exec_ctrl.0 |= (reserved_bits & 0xFFFFFFFF) as u32;
primary_exec_ctrl.0 &= (reserved_bits >> 32) as u32;
primary_exec_ctrl.set_hlt(false);
primary_exec_ctrl.set_hlt(true);
primary_exec_ctrl.set_activate_secondary_controls(true);
primary_exec_ctrl.write();
@ -280,10 +356,7 @@ impl VCpu {
vmwrite(vmcs::guest::IDTR_LIMIT, 0)?;
vmwrite(vmcs::guest::LDTR_LIMIT, 0)?;
vmwrite(
vmcs::guest::CS_SELECTOR,
x86::segmentation::cs().bits() as u64,
)?;
vmwrite(vmcs::guest::CS_SELECTOR, 0)?;
vmwrite(vmcs::guest::SS_SELECTOR, 0)?;
vmwrite(vmcs::guest::DS_SELECTOR, 0)?;
vmwrite(vmcs::guest::ES_SELECTOR, 0)?;
@ -300,8 +373,8 @@ impl VCpu {
rights.set_desc_type_raw(DescriptorType::Code as u8);
rights.set_dpl(0);
rights.set_granularity_raw(Granularity::KByte as u8);
rights.set_long(true);
rights.set_db(false);
rights.set_long(false);
rights.set_db(true);
rights
};
@ -358,9 +431,13 @@ impl VCpu {
vmwrite(vmcs::guest::TR_ACCESS_RIGHTS, tr_right.0 as u64)?;
vmwrite(vmcs::guest::LDTR_ACCESS_RIGHTS, ldtr_right.0 as u64)?;
vmwrite(vmcs::guest::IA32_EFER_FULL, rdmsr(IA32_EFER))?;
vmwrite(vmcs::guest::IA32_EFER_FULL, 0)?;
vmwrite(vmcs::guest::RFLAGS, 0x2)?;
vmwrite(vmcs::guest::LINK_PTR_FULL, u64::MAX)?;
vmwrite(vmcs::guest::RIP, linux::LAYOUT_KERNEL_BASE as u64)?;
self.guest_registers.rsi = linux::LAYOUT_BOOTPARAM as u64;
info!("Guest RIP: {:#x}", linux::LAYOUT_KERNEL_BASE as u64);
}
Ok(())
@ -374,14 +451,6 @@ impl VCpu {
pub fn vm_loop(&mut self) -> ! {
info!("Entering VM loop");
let guest_ptr = crate::vmm::asm::guest_entry as u64;
let guest_addr = self.ept.get_phys_addr(0).unwrap()
+ memory::PHYSICAL_MEMORY_OFFSET.load(Ordering::Relaxed);
unsafe {
core::ptr::copy_nonoverlapping(guest_ptr as *const u8, guest_addr as *mut u8, 200);
vmwrite(vmcs::guest::RIP, 0).unwrap();
}
loop {
if let Err(err) = self.vmentry() {
info!("VMEntry failed: {}", err.as_str());
@ -444,6 +513,10 @@ impl VCpu {
_ => {}
}
} else {
info!(
"vcpu RIP: {:#x}",
unsafe { vmread(vmcs::guest::RIP) }.unwrap()
);
match info.get_reason() {
VmxExitReason::HLT => {
info!("HLT instruction executed");