Merge pull request #1 from mii443/amd

amd emulation
This commit is contained in:
mii443
2025-06-28 19:04:47 +09:00
committed by GitHub
6 changed files with 236 additions and 56 deletions

View File

@ -6,7 +6,12 @@ use super::{vcpu::VCpu, vmcs::VmxLeaf};
pub fn handle_cpuid_exit(vcpu: &mut VCpu) { pub fn handle_cpuid_exit(vcpu: &mut VCpu) {
let regs = &mut vcpu.guest_registers; let regs = &mut vcpu.guest_registers;
let vendor: &[u8; 12] = b"miHypervisor"; let vendor: &[u8; 12] = if vcpu.emulate_amd {
b"AuthenticAMD"
} else {
b"miHypervisor"
};
let brand_string: &[u8; 48] = b"mii Hypervisor CPU on Intel VT-x \0"; let brand_string: &[u8; 48] = b"mii Hypervisor CPU on Intel VT-x \0";
let vendor = unsafe { core::mem::transmute::<&[u8; 12], &[u32; 3]>(vendor) }; let vendor = unsafe { core::mem::transmute::<&[u8; 12], &[u32; 3]>(vendor) };

1
src/vmm/emulation/mod.rs Normal file
View File

@ -0,0 +1 @@
pub mod opcode;

174
src/vmm/emulation/opcode.rs Normal file
View File

@ -0,0 +1,174 @@
use crate::{
info,
vmm::{
vcpu::VCpu,
vmcs::{DescriptorType, EntryControls, Granularity, SegmentRights},
},
};
use x86::current::vmx::{vmread, vmwrite};
use x86::vmx::vmcs;
const OPCODE_TWO_BYTE_ESCAPE: u8 = 0x0F;
const OPCODE_GROUP_7: u8 = 0x01;
const OPCODE_CLAC: u8 = 0xCA;
const OPCODE_STAC: u8 = 0xCB;
const OPCODE_SYSCALL: u8 = 0x05;
const RFLAGS_AC_BIT: u64 = 1 << 18;
pub struct OpcodeEmulator {
pub original_opcode: Option<[u8; 16]>,
pub replaced_address: Option<u64>,
}
impl OpcodeEmulator {
pub fn new() -> Self {
OpcodeEmulator {
original_opcode: None,
replaced_address: None,
}
}
}
pub fn emulate_opcode(vcpu: &mut VCpu, instruction_bytes: [u8; 16], valid_bytes: u64) -> bool {
if instruction_bytes[0] != OPCODE_TWO_BYTE_ESCAPE || valid_bytes < 2 {
return false;
}
match instruction_bytes[1] {
OPCODE_SYSCALL => {
return emulate_syscall(vcpu);
}
_ => {}
}
match (instruction_bytes[1], instruction_bytes[2]) {
(OPCODE_GROUP_7, OPCODE_CLAC) => emulate_clac(vcpu),
(OPCODE_GROUP_7, OPCODE_STAC) => emulate_stac(vcpu),
_ => false,
}
}
fn emulate_syscall(vcpu: &mut VCpu) -> bool {
if !vcpu.emulate_amd {
return false;
}
let current_rip = unsafe { vmread(vmcs::guest::RIP).unwrap() };
let return_address = current_rip + 2;
let rflags = unsafe { vmread(vmcs::guest::RFLAGS).unwrap() };
vcpu.guest_registers.rcx = return_address;
vcpu.guest_registers.r11 = rflags;
let lstar = vcpu.guest_msr.find(0xc0000082).unwrap().data;
let star = vcpu.guest_msr.find(0xc0000081).unwrap().data;
let sfmask = vcpu.guest_msr.find(0xc0000084).unwrap().data;
let cs_selector = (star >> 32) as u16;
let ss_selector = cs_selector + 8;
let cs_rights = {
let mut rights = SegmentRights::default();
rights.set_rw(true);
rights.set_dc(false);
rights.set_executable(true);
rights.set_desc_type_raw(DescriptorType::Code as u8);
rights.set_dpl(0);
rights.set_granularity_raw(Granularity::KByte as u8);
rights.set_long(true);
rights.set_db(false);
rights
};
let ss_rights = {
let mut rights = SegmentRights::default();
rights.set_rw(true);
rights.set_dc(false);
rights.set_executable(false);
rights.set_desc_type_raw(DescriptorType::Code as u8);
rights.set_dpl(0);
rights.set_granularity_raw(Granularity::KByte as u8);
rights.set_long(false);
rights.set_db(true);
rights
};
info!("Setting RIP:{:x} to {:x}", current_rip, lstar);
unsafe {
// Set segment registers for kernel mode
vmwrite(vmcs::guest::RIP, lstar).unwrap();
vmwrite(vmcs::guest::CS_SELECTOR, cs_selector as u64).unwrap();
vmwrite(vmcs::guest::SS_SELECTOR, ss_selector as u64).unwrap();
vmwrite(vmcs::guest::CS_BASE, 0).unwrap();
vmwrite(vmcs::guest::SS_BASE, 0).unwrap();
vmwrite(vmcs::guest::CS_LIMIT, 0xFFFFFFFF).unwrap();
vmwrite(vmcs::guest::SS_LIMIT, 0xFFFFFFFF).unwrap();
vmwrite(vmcs::guest::CS_ACCESS_RIGHTS, cs_rights.0 as u64).unwrap();
vmwrite(vmcs::guest::SS_ACCESS_RIGHTS, ss_rights.0 as u64).unwrap();
// Set CR0 and CR4 for Long Mode
let mut cr0 = vmread(vmcs::guest::CR0).unwrap();
cr0 |= (1 << 31) | (1 << 0); // Set PG (Paging) and PE (Protection Enable)
vmwrite(vmcs::guest::CR0, cr0).unwrap();
let mut cr4 = vmread(vmcs::guest::CR4).unwrap();
cr4 |= 1 << 5; // Set PAE (Physical Address Extension)
vmwrite(vmcs::guest::CR4, cr4).unwrap();
// Set EFER for Long Mode
let mut efer = vmread(vmcs::guest::IA32_EFER_FULL).unwrap();
efer |= (1 << 8) | (1 << 10); // Set LME (Long Mode Enable) and LMA (Long Mode Active)
vmwrite(vmcs::guest::IA32_EFER_FULL, efer).unwrap();
// Set VM-Entry controls for Long Mode
let mut entry_ctrls = EntryControls::read();
entry_ctrls.set_ia32e_mode_guest(true);
entry_ctrls.write();
}
let new_rflags = rflags & !sfmask;
unsafe {
vmwrite(vmcs::guest::RFLAGS, new_rflags).unwrap();
}
return true;
}
fn emulate_clac(vcpu: &mut VCpu) -> bool {
if let Err(_) = modify_rflags_ac(false) {
return false;
}
if let Err(_) = vcpu.step_next_inst() {
return false;
}
true
}
fn emulate_stac(vcpu: &mut VCpu) -> bool {
if let Err(_) = modify_rflags_ac(true) {
return false;
}
if let Err(_) = vcpu.step_next_inst() {
return false;
}
true
}
fn modify_rflags_ac(set: bool) -> x86::vmx::Result<()> {
unsafe {
let rflags = vmread(vmcs::guest::RFLAGS)?;
let new_rflags = if set {
rflags | RFLAGS_AC_BIT
} else {
rflags & !RFLAGS_AC_BIT
};
vmwrite(vmcs::guest::RFLAGS, new_rflags)?;
}
Ok(())
}

View File

@ -1,6 +1,7 @@
pub mod asm; pub mod asm;
pub mod cpuid; pub mod cpuid;
pub mod cr; pub mod cr;
pub mod emulation;
pub mod ept; pub mod ept;
pub mod error; pub mod error;
pub mod fpu; pub mod fpu;

View File

@ -133,6 +133,13 @@ impl ShadowMsr {
} }
x86::msr::IA32_KERNEL_GSBASE => Self::shadow_read(vcpu, msr_kind), x86::msr::IA32_KERNEL_GSBASE => Self::shadow_read(vcpu, msr_kind),
0x1b => Self::shadow_read(vcpu, msr_kind), 0x1b => Self::shadow_read(vcpu, msr_kind),
0x8b => Self::set_ret_val(vcpu, 0x8701021),
0xc0011029 => Self::set_ret_val(vcpu, 0x3000310e08202),
0xc0010000 => Self::set_ret_val(vcpu, 0x130076),
0xc0010001 => Self::set_ret_val(vcpu, 0),
0xc0010002 => Self::set_ret_val(vcpu, 0),
0xc0010003 => Self::set_ret_val(vcpu, 0),
0xc0010007 => Self::set_ret_val(vcpu, 0),
_ => { _ => {
panic!("Unhandled RDMSR: {:#x}", msr_kind); panic!("Unhandled RDMSR: {:#x}", msr_kind);
} }
@ -172,6 +179,7 @@ impl ShadowMsr {
x86::msr::IA32_FS_BASE => unsafe { vmwrite(vmcs::guest::FS_BASE, value).unwrap() }, x86::msr::IA32_FS_BASE => unsafe { vmwrite(vmcs::guest::FS_BASE, value).unwrap() },
x86::msr::IA32_GS_BASE => unsafe { vmwrite(vmcs::guest::GS_BASE, value).unwrap() }, x86::msr::IA32_GS_BASE => unsafe { vmwrite(vmcs::guest::GS_BASE, value).unwrap() },
0x1b => Self::shadow_write(vcpu, msr_kind), 0x1b => Self::shadow_write(vcpu, msr_kind),
0xc0010007 => Self::shadow_write(vcpu, msr_kind),
_ => { _ => {
panic!("Unhandled WRMSR: {:#x}", msr_kind); panic!("Unhandled WRMSR: {:#x}", msr_kind);

View File

@ -26,7 +26,9 @@ use crate::{
memory::BootInfoFrameAllocator, memory::BootInfoFrameAllocator,
subscribe_with_context, subscribe_with_context,
vmm::{ vmm::{
cpuid, cr, fpu, cpuid, cr,
emulation::opcode::{emulate_opcode, OpcodeEmulator},
fpu,
io::{self, InitPhase, Serial, PIC}, io::{self, InitPhase, Serial, PIC},
msr, msr,
qual::{QualCr, QualIo}, qual::{QualCr, QualIo},
@ -49,8 +51,10 @@ use super::{
}; };
const SIZE_2MIB: u64 = 2 * 1024 * 1024; const SIZE_2MIB: u64 = 2 * 1024 * 1024;
const GUEST_MEMORY_SIZE: u64 = 2 * 1024 * 1024 * 1024;
static EPT_FRAME_ALLOCATOR: AtomicPtr<BootInfoFrameAllocator> = AtomicPtr::new(core::ptr::null_mut()); static EPT_FRAME_ALLOCATOR: AtomicPtr<BootInfoFrameAllocator> =
AtomicPtr::new(core::ptr::null_mut());
#[repr(C)] #[repr(C)]
pub struct VCpu { pub struct VCpu {
@ -71,6 +75,8 @@ pub struct VCpu {
pub io_bitmap_b: x86_64::structures::paging::PhysFrame, pub io_bitmap_b: x86_64::structures::paging::PhysFrame,
pub pic: PIC, pub pic: PIC,
pub pending_irq: u16, pub pending_irq: u16,
pub opcode_emulator: OpcodeEmulator,
pub emulate_amd: bool,
} }
const TEMP_STACK_SIZE: usize = 4096; const TEMP_STACK_SIZE: usize = 4096;
@ -184,6 +190,8 @@ impl VCpu {
io_bitmap_b, io_bitmap_b,
pic: PIC::new(), pic: PIC::new(),
pending_irq: 0, pending_irq: 0,
opcode_emulator: OpcodeEmulator::new(),
emulate_amd: false,
} }
} }
@ -193,7 +201,7 @@ impl VCpu {
mapper: &OffsetPageTable<'static>, mapper: &OffsetPageTable<'static>,
) { ) {
EPT_FRAME_ALLOCATOR.store(frame_allocator as *mut _, Ordering::Release); EPT_FRAME_ALLOCATOR.store(frame_allocator as *mut _, Ordering::Release);
self.vmxon.activate_vmxon().unwrap(); self.vmxon.activate_vmxon().unwrap();
let revision_id = unsafe { rdmsr(x86::msr::IA32_VMX_BASIC) } as u32; let revision_id = unsafe { rdmsr(x86::msr::IA32_VMX_BASIC) } as u32;
@ -241,7 +249,7 @@ impl VCpu {
let cmdline_start = linux::LAYOUT_CMDLINE as u64; let cmdline_start = linux::LAYOUT_CMDLINE as u64;
let cmdline_end = cmdline_start + cmdline_max_size as u64; let cmdline_end = cmdline_start + cmdline_max_size as u64;
let cmdline_bytes = b"console=ttyS0 earlyprintk=serial nokaslr\0"; let cmdline_bytes = b"console=ttyS0 earlyprintk=serial nokaslr\0";
self.load_image(cmdline_bytes, cmdline_start as usize); self.load_image(cmdline_bytes, cmdline_start as usize);
@ -272,21 +280,23 @@ impl VCpu {
pub fn load_image(&mut self, image: &[u8], addr: usize) { pub fn load_image(&mut self, image: &[u8], addr: usize) {
info!("Loading image at {:#x}, size: {} bytes", addr, image.len()); info!("Loading image at {:#x}, size: {} bytes", addr, image.len());
let start_page = addr & !0xFFF; let start_page = addr & !0xFFF;
let end_page = ((addr + image.len() - 1) & !0xFFF) + 0x1000; let end_page = ((addr + image.len() - 1) & !0xFFF) + 0x1000;
unsafe { unsafe {
let frame_allocator_ptr = EPT_FRAME_ALLOCATOR.load(Ordering::Acquire); let frame_allocator_ptr = EPT_FRAME_ALLOCATOR.load(Ordering::Acquire);
if !frame_allocator_ptr.is_null() { if !frame_allocator_ptr.is_null() {
let frame_allocator = &mut *(frame_allocator_ptr as *mut BootInfoFrameAllocator); let frame_allocator = &mut *(frame_allocator_ptr as *mut BootInfoFrameAllocator);
let mut current_page = start_page; let mut current_page = start_page;
while current_page < end_page { while current_page < end_page {
if self.ept.get_phys_addr(current_page as u64).is_none() { if self.ept.get_phys_addr(current_page as u64).is_none() {
if let Some(frame) = frame_allocator.allocate_frame() { if let Some(frame) = frame_allocator.allocate_frame() {
let hpa = frame.start_address().as_u64(); let hpa = frame.start_address().as_u64();
self.ept.map_4k(current_page as u64, hpa, frame_allocator).unwrap(); self.ept
.map_4k(current_page as u64, hpa, frame_allocator)
.unwrap();
} else { } else {
panic!("Failed to allocate frame for image at {:#x}", current_page); panic!("Failed to allocate frame for image at {:#x}", current_page);
} }
@ -295,7 +305,7 @@ impl VCpu {
} }
} }
} }
for (i, &byte) in image.iter().enumerate() { for (i, &byte) in image.iter().enumerate() {
let gpa = addr + i; let gpa = addr + i;
self.ept.set(gpa as u64, byte).unwrap(); self.ept.set(gpa as u64, byte).unwrap();
@ -303,17 +313,17 @@ impl VCpu {
} }
pub fn setup_guest_memory(&mut self, frame_allocator: &mut BootInfoFrameAllocator) -> u64 { pub fn setup_guest_memory(&mut self, frame_allocator: &mut BootInfoFrameAllocator) -> u64 {
let guest_memory_size = 2 * 1024 * 1024 * 1024; info!(
"Setting up guest memory with on-demand allocation (reported size: {}MB)",
GUEST_MEMORY_SIZE / (1024 * 1024)
);
info!("Setting up guest memory with on-demand allocation (reported size: {}MB)", self.load_kernel(linux::BZIMAGE, GUEST_MEMORY_SIZE);
guest_memory_size / (1024 * 1024));
self.load_kernel(linux::BZIMAGE, guest_memory_size);
let eptp = EPTP::new(&self.ept.root_table); let eptp = EPTP::new(&self.ept.root_table);
unsafe { vmwrite(vmcs::control::EPTP_FULL, eptp.0).unwrap() }; unsafe { vmwrite(vmcs::control::EPTP_FULL, eptp.0).unwrap() };
guest_memory_size GUEST_MEMORY_SIZE
} }
pub fn register_msrs(&mut self, mapper: &OffsetPageTable<'static>) { pub fn register_msrs(&mut self, mapper: &OffsetPageTable<'static>) {
@ -354,6 +364,7 @@ impl VCpu {
.set(x86::msr::MSR_C5_PMON_BOX_CTRL, 0) .set(x86::msr::MSR_C5_PMON_BOX_CTRL, 0)
.unwrap(); .unwrap();
self.guest_msr.set(0x1b, 0).unwrap(); self.guest_msr.set(0x1b, 0).unwrap();
self.guest_msr.set(0xc0010007, 0).unwrap();
vmwrite( vmwrite(
vmcs::control::VMEXIT_MSR_LOAD_ADDR_FULL, vmcs::control::VMEXIT_MSR_LOAD_ADDR_FULL,
@ -969,7 +980,7 @@ impl VCpu {
vmwrite(vmcs::host::RSP, rsp).unwrap(); vmwrite(vmcs::host::RSP, rsp).unwrap();
} }
fn step_next_inst(&mut self) -> Result<(), VmFail> { pub fn step_next_inst(&mut self) -> Result<(), VmFail> {
unsafe { unsafe {
let rip = vmread(vmcs::guest::RIP)?; let rip = vmread(vmcs::guest::RIP)?;
vmwrite( vmwrite(
@ -980,8 +991,11 @@ impl VCpu {
} }
fn handle_ept_violation(&mut self, gpa: u64) { fn handle_ept_violation(&mut self, gpa: u64) {
if gpa >= 2 * 1024 * 1024 * 1024 { if gpa >= GUEST_MEMORY_SIZE {
panic!("EPT Violation: Guest tried to access memory beyond 2GB at {:#x}", gpa); panic!(
"EPT Violation: Guest tried to access memory beyond 2GB at {:#x}",
gpa
);
} }
unsafe { unsafe {
@ -989,19 +1003,22 @@ impl VCpu {
if frame_allocator_ptr.is_null() { if frame_allocator_ptr.is_null() {
panic!("EPT Violation: Frame allocator not initialized!"); panic!("EPT Violation: Frame allocator not initialized!");
} }
let frame_allocator = &mut *(frame_allocator_ptr as *mut BootInfoFrameAllocator); let frame_allocator = &mut *(frame_allocator_ptr as *mut BootInfoFrameAllocator);
match frame_allocator.allocate_frame() { match frame_allocator.allocate_frame() {
Some(frame) => { Some(frame) => {
let hpa = frame.start_address().as_u64(); let hpa = frame.start_address().as_u64();
if let Err(e) = self.ept.map_4k(gpa, hpa, frame_allocator) { if let Err(e) = self.ept.map_4k(gpa, hpa, frame_allocator) {
panic!("Failed to map page at GPA {:#x}: {}", gpa, e); panic!("Failed to map page at GPA {:#x}: {}", gpa, e);
} }
} }
None => { None => {
panic!("EPT Violation: Out of memory! Cannot allocate frame for GPA {:#x}", gpa); panic!(
"EPT Violation: Out of memory! Cannot allocate frame for GPA {:#x}",
gpa
);
} }
} }
} }
@ -1146,38 +1163,12 @@ impl VCpu {
} }
if valid_bytes > 0 { if valid_bytes > 0 {
match instruction_bytes[0] { if !emulate_opcode(self, instruction_bytes, valid_bytes) {
0x0F => { info!(
if valid_bytes > 1 { "VMExit: Exception {} at RIP {:#x} with instruction bytes: {:?}",
match instruction_bytes[1] { vector, rip, instruction_bytes
0x01 => match instruction_bytes[2] { );
0xCA => { self.inject_exception(vector, error_code).unwrap();
unsafe {
let rflags = vmread(vmcs::guest::RFLAGS).unwrap();
vmwrite(vmcs::guest::RFLAGS, rflags & !(1 << 18)).unwrap();
}
self.step_next_inst().unwrap();
}
0xCB => {
unsafe {
let rflags = vmread(vmcs::guest::RFLAGS).unwrap();
vmwrite(vmcs::guest::RFLAGS, rflags | (1 << 18)).unwrap();
}
self.step_next_inst().unwrap();
}
_ => {
self.inject_exception(vector, error_code).unwrap();
}
},
_ => {
self.inject_exception(vector, error_code).unwrap();
}
}
}
}
_ => {
self.inject_exception(vector, error_code).unwrap();
}
} }
} }
} }
@ -1215,7 +1206,7 @@ impl VCpu {
let translation_valid = (exit_qualification & 0x100) != 0; let translation_valid = (exit_qualification & 0x100) != 0;
let page_addr = guest_address & !0xFFF; let page_addr = guest_address & !0xFFF;
self.handle_ept_violation(page_addr); self.handle_ept_violation(page_addr);
} }
_ => { _ => {