diff --git a/fuzz/fuzz_targets/insn.rs b/fuzz/fuzz_targets/insn.rs index 327793c21..9cb3361ca 100644 --- a/fuzz/fuzz_targets/insn.rs +++ b/fuzz/fuzz_targets/insn.rs @@ -12,7 +12,13 @@ fuzz_target!(|input: &[u8]| -> Corpus { data.copy_from_slice(input); let insn = Instruction::new(data); - let _ = core::hint::black_box(insn.decode(&TestCtx)); + let _ = core::hint::black_box({ + let mut ctx = TestCtx::default(); + match insn.decode(&ctx) { + Ok(insn_ctx) => insn_ctx.emulate(&mut ctx), + Err(e) => Err(e), + } + }); Corpus::Keep }); diff --git a/kernel/src/cpu/efer.rs b/kernel/src/cpu/efer.rs index 2b5cd3bd6..fa66c2d92 100644 --- a/kernel/src/cpu/efer.rs +++ b/kernel/src/cpu/efer.rs @@ -10,6 +10,7 @@ use crate::platform::SvsmPlatform; use bitflags::bitflags; bitflags! { + #[derive(Clone, Copy, Debug)] pub struct EFERFlags: u64 { const SCE = 1 << 0; // System Call Extensions const LME = 1 << 8; // Long Mode Enable diff --git a/kernel/src/cpu/idt/common.rs b/kernel/src/cpu/idt/common.rs index b85e70efc..71fd3d6dd 100644 --- a/kernel/src/cpu/idt/common.rs +++ b/kernel/src/cpu/idt/common.rs @@ -4,14 +4,19 @@ // // Author: Joerg Roedel +extern crate alloc; + use crate::address::{Address, VirtAddr}; use crate::cpu::control_regs::{read_cr0, read_cr4}; use crate::cpu::efer::read_efer; use crate::cpu::gdt::gdt; use crate::cpu::registers::{X86GeneralRegs, X86InterruptFrame}; -use crate::insn_decode::{InsnMachineCtx, SegRegister}; +use crate::insn_decode::{InsnError, InsnMachineCtx, InsnMachineMem, Register, SegRegister}; use crate::locking::{RWLock, ReadLockGuard, WriteLockGuard}; -use crate::types::SVSM_CS; +use crate::mm::GuestPtr; +use crate::platform::SVSM_PLATFORM; +use crate::types::{Bytes, SVSM_CS}; +use alloc::boxed::Box; use core::arch::{asm, global_asm}; use core::mem; use core::ptr::addr_of; @@ -44,6 +49,18 @@ pub const PF_ERROR_WRITE: usize = 2; pub const INT_INJ_VECTOR: usize = 0x50; +bitflags::bitflags! { + /// Page fault error code flags. + #[derive(Clone, Copy, Debug, PartialEq)] + pub struct PageFaultError :u32 { + const P = 1 << 0; + const W = 1 << 1; + const U = 1 << 2; + const R = 1 << 3; + const I = 1 << 4; + } +} + #[repr(C, packed)] #[derive(Default, Debug, Clone, Copy)] pub struct X86ExceptionContext { @@ -71,6 +88,98 @@ impl InsnMachineCtx for X86ExceptionContext { fn read_cr4(&self) -> u64 { read_cr4().bits() } + + fn read_reg(&self, reg: Register) -> usize { + match reg { + Register::Rax => self.regs.rax, + Register::Rdx => self.regs.rdx, + Register::Rcx => self.regs.rcx, + Register::Rbx => self.regs.rdx, + Register::Rsp => self.frame.rsp, + Register::Rbp => self.regs.rbp, + Register::Rdi => self.regs.rdi, + Register::Rsi => self.regs.rsi, + Register::R8 => self.regs.r8, + Register::R9 => self.regs.r9, + Register::R10 => self.regs.r10, + Register::R11 => self.regs.r11, + Register::R12 => self.regs.r12, + Register::R13 => self.regs.r13, + Register::R14 => self.regs.r14, + Register::R15 => self.regs.r15, + Register::Rip => self.frame.rip, + } + } + + fn read_flags(&self) -> usize { + self.frame.flags + } + + fn write_reg(&mut self, reg: Register, val: usize) { + match reg { + Register::Rax => self.regs.rax = val, + Register::Rdx => self.regs.rdx = val, + Register::Rcx => self.regs.rcx = val, + Register::Rbx => self.regs.rdx = val, + Register::Rsp => self.frame.rsp = val, + Register::Rbp => self.regs.rbp = val, + Register::Rdi => self.regs.rdi = val, + Register::Rsi => self.regs.rsi = val, + Register::R8 => self.regs.r8 = val, + Register::R9 => self.regs.r9 = val, + Register::R10 => self.regs.r10 = val, + Register::R11 => self.regs.r11 = val, + Register::R12 => self.regs.r12 = val, + Register::R13 => self.regs.r13 = val, + Register::R14 => self.regs.r14 = val, + Register::R15 => self.regs.r15 = val, + Register::Rip => self.frame.rip = val, + } + } + + fn read_cpl(&self) -> usize { + self.frame.cs & 3 + } + + fn map_linear_addr( + &self, + la: usize, + _write: bool, + _fetch: bool, + ) -> Result>, InsnError> { + if user_mode(self) { + todo!(); + } else { + Ok(Box::new(GuestPtr::::new(VirtAddr::from(la)))) + } + } + + fn ioio_perm(&self, _port: u16, _size: Bytes, _io_read: bool) -> bool { + // Check if the IO port can be supported by user mode + todo!(); + } + + fn ioio_in(&self, port: u16, size: Bytes) -> Result { + let io_port = SVSM_PLATFORM.as_dyn_ref().get_io_port(); + let data = match size { + Bytes::One => io_port.inb(port) as u64, + Bytes::Two => io_port.inw(port) as u64, + Bytes::Four => io_port.inl(port) as u64, + _ => return Err(InsnError::IoIoIn), + }; + Ok(data) + } + + fn ioio_out(&mut self, port: u16, size: Bytes, data: u64) -> Result<(), InsnError> { + let io_port = SVSM_PLATFORM.as_dyn_ref().get_io_port(); + match size { + Bytes::One => io_port.outb(port, data as u8), + Bytes::Two => io_port.outw(port, data as u16), + Bytes::Four => io_port.outl(port, data as u32), + _ => return Err(InsnError::IoIoOut), + } + Ok(()) + } } pub fn user_mode(ctxt: &X86ExceptionContext) -> bool { diff --git a/kernel/src/cpu/registers.rs b/kernel/src/cpu/registers.rs index 5bfe3a48a..a5aeb3a3e 100644 --- a/kernel/src/cpu/registers.rs +++ b/kernel/src/cpu/registers.rs @@ -61,3 +61,28 @@ bitflags! { const G = 1 << 55; } } + +bitflags! { + #[derive(Clone, Copy, Debug)] + pub struct RFlags: usize { + const CF = 1 << 0; + const FIXED = 1 << 1; + const PF = 1 << 2; + const AF = 1 << 4; + const ZF = 1 << 6; + const SF = 1 << 7; + const TF = 1 << 8; + const IF = 1 << 9; + const DF = 1 << 10; + const OF = 1 << 11; + const IOPL = 3 << 12; + const NT = 1 << 14; + const MD = 1 << 15; + const RF = 1 << 16; + const VM = 1 << 17; + const AC = 1 << 18; + const VIF = 1 << 19; + const VIP = 1 << 20; + const ID = 1 << 21; + } +} diff --git a/kernel/src/cpu/vc.rs b/kernel/src/cpu/vc.rs index e5587a211..f150142e1 100644 --- a/kernel/src/cpu/vc.rs +++ b/kernel/src/cpu/vc.rs @@ -134,7 +134,7 @@ pub fn handle_vc_exception(ctx: &mut X86ExceptionContext, vector: usize) -> Resu let insn_ctx = vc_decode_insn(ctx)?; - match (error_code, insn_ctx.and_then(|d| d.insn())) { + match (error_code, insn_ctx.as_ref().and_then(|d| d.insn())) { // If the gdb stub is enabled then debugging operations such as single stepping // will cause either an exception via DB_VECTOR if the DEBUG_SWAP sev_feature is // clear, or a VC exception with an error code of X86_TRAP if set. @@ -143,7 +143,11 @@ pub fn handle_vc_exception(ctx: &mut X86ExceptionContext, vector: usize) -> Resu Ok(()) } (SVM_EXIT_CPUID, Some(DecodedInsn::Cpuid)) => handle_cpuid(ctx), - (SVM_EXIT_IOIO, Some(ins)) => handle_ioio(ctx, ghcb, ins), + (SVM_EXIT_IOIO, Some(_)) => insn_ctx + .as_ref() + .unwrap() + .emulate(ctx) + .map_err(SvsmError::from), (SVM_EXIT_MSR, Some(ins)) => handle_msr(ctx, ghcb, ins), (SVM_EXIT_RDTSC, Some(DecodedInsn::Rdtsc)) => ghcb.rdtsc_regs(&mut ctx.regs), (SVM_EXIT_RDTSCP, Some(DecodedInsn::Rdtsc)) => ghcb.rdtscp_regs(&mut ctx.regs), @@ -225,7 +229,7 @@ fn snp_cpuid(ctx: &mut X86ExceptionContext) -> Result<(), SvsmError> { } fn vc_finish_insn(ctx: &mut X86ExceptionContext, insn_ctx: &Option) { - ctx.frame.rip += insn_ctx.map_or(0, |d| d.size()) + ctx.frame.rip += insn_ctx.as_ref().map_or(0, |d| d.size()) } fn ioio_get_port(source: Operand, ctx: &X86ExceptionContext) -> u16 { @@ -415,7 +419,13 @@ mod tests { fn rep_outsw(port: u16, data: &[u16]) { unsafe { - asm!("rep outsw", in("dx") port, in("rsi") data.as_ptr(), in("rcx") data.len(), options(att_syntax)) + asm!("rep outsw", in("dx") port, in("rsi") data.as_ptr(), inout("rcx") data.len() => _, options(att_syntax)) + } + } + + fn rep_insw(port: u16, data: &mut [u16]) { + unsafe { + asm!("rep insw", in("dx") port, in("rdi") data.as_ptr(), inout("rcx") data.len() => _, options(att_syntax)) } } @@ -477,8 +487,7 @@ mod tests { } #[test] - // #[cfg_attr(not(test_in_svsm), ignore = "Can only be run inside guest")] - #[ignore = "Currently unhandled by #VC handler"] + #[cfg_attr(not(test_in_svsm), ignore = "Can only be run inside guest")] fn test_port_io_string_16_get_last() { const TEST_DATA: &[u16] = &[0x1234, 0x5678, 0x9abc, 0xdef0]; verify_ghcb_gets_altered(|| rep_outsw(TESTDEV_ECHO_LAST_PORT, TEST_DATA)); @@ -486,6 +495,12 @@ mod tests { TEST_DATA.last().unwrap(), &verify_ghcb_gets_altered(|| inw(TESTDEV_ECHO_LAST_PORT)) ); + + let mut test_data: [u16; 4] = [0; 4]; + verify_ghcb_gets_altered(|| rep_insw(TESTDEV_ECHO_LAST_PORT, &mut test_data)); + for d in test_data.iter() { + assert_eq!(d, TEST_DATA.last().unwrap()); + } } #[test] diff --git a/kernel/src/insn_decode/decode.rs b/kernel/src/insn_decode/decode.rs index 1f30f8f94..4b9f9c47e 100644 --- a/kernel/src/insn_decode/decode.rs +++ b/kernel/src/insn_decode/decode.rs @@ -39,13 +39,16 @@ // https://github.com/projectacrn/acrn-hypervisor/blob/master/hypervisor/ // arch/x86/guest/instr_emul.c +extern crate alloc; + use super::insn::{DecodedInsn, Immediate, Operand, MAX_INSN_SIZE}; use super::opcode::{OpCodeClass, OpCodeDesc, OpCodeFlags}; use super::{InsnError, Register, SegRegister}; use crate::cpu::control_regs::{CR0Flags, CR4Flags}; use crate::cpu::efer::EFERFlags; -use crate::cpu::registers::SegDescAttrFlags; +use crate::cpu::registers::{RFlags, SegDescAttrFlags}; use crate::types::Bytes; +use alloc::boxed::Box; use bitflags::bitflags; /// Represents the raw bytes of an instruction and @@ -143,6 +146,131 @@ pub trait InsnMachineCtx: core::fmt::Debug { fn read_cr0(&self) -> u64; /// Read CR4 register fn read_cr4(&self) -> u64; + + /// Read a register + fn read_reg(&self, _reg: Register) -> usize { + unimplemented!("Reading register is not implemented"); + } + + /// Read rflags register + fn read_flags(&self) -> usize { + unimplemented!("Reading flags is not implemented"); + } + + /// Write a register + fn write_reg(&mut self, _reg: Register, _val: usize) { + unimplemented!("Writing register is not implemented"); + } + + /// Read the current privilege level + fn read_cpl(&self) -> usize { + unimplemented!("Reading CPL is not implemented"); + } + + /// Map the given linear address region to a machine memory object + /// which provides access to the memory of this linear address region. + /// + /// # Arguments + /// + /// * `la` - The linear address of the region to map. + /// * `write` - Whether write access is allowed to the mapped region. + /// * `fetch` - Whether fetch access is allowed to the mapped region. + /// + /// # Returns + /// + /// A `Result` containing a boxed trait object representing the mapped + /// memory, or an `InsnError` if mapping fails. + fn map_linear_addr( + &self, + _la: usize, + _write: bool, + _fetch: bool, + ) -> Result>, InsnError> { + Err(InsnError::MapLinearAddr) + } + + /// Check IO permission bitmap. + /// + /// # Arguments + /// + /// * `port` - The I/O port to check. + /// * `size` - The size of the I/O operation. + /// * `io_read` - Whether the I/O operation is a read operation. + /// + /// # Returns + /// + /// A `Result` containing true if the port is permitted otherwise false. + fn ioio_perm(&self, _port: u16, _size: Bytes, _io_read: bool) -> bool { + unimplemented!("Checking IO permission bitmap is not implemented"); + } + + /// Handle an I/O in operation. + /// + /// # Arguments + /// + /// * `port` - The I/O port to read from. + /// * `size` - The size of the data to read. + /// + /// # Returns + /// + /// A `Result` containing the read data if success or an `InsnError` if + /// the operation fails. + fn ioio_in(&self, _port: u16, _size: Bytes) -> Result { + Err(InsnError::IoIoIn) + } + + /// Handle an I/O out operation. + /// + /// # Arguments + /// + /// * `port` - The I/O port to write to. + /// * `size` - The size of the data to write. + /// * `data` - The data to write to the I/O port. + /// + /// # Returns + /// + /// A `Result` indicating success or an `InsnError` if the operation fails. + fn ioio_out(&mut self, _port: u16, _size: Bytes, _data: u64) -> Result<(), InsnError> { + Err(InsnError::IoIoOut) + } +} + +/// Trait representing a machine memory for instruction decoding. +pub trait InsnMachineMem { + type Item; + + /// Read data from the memory at the specified offset. + /// + /// # Safety + /// + /// The caller must verify not to read data from arbitrary memory. The object implements this + /// trait should guarantee the memory region is readable. + /// + /// # Returns + /// + /// Returns the read data on success, or an `InsnError` if the read + /// operation fails. + unsafe fn mem_read(&self) -> Result { + Err(InsnError::MemRead) + } + + /// Write data to the memory at the specified offset. + /// + /// # Safety + /// + /// The caller must verify not to write data to corrupt arbitrary memory. The object implements + /// this trait should guarantee the memory region is writable. + /// + /// # Arguments + /// + /// * `data` - The data to write to the memory. + /// + /// # Returns + /// + /// Returns `Ok`on success, or an `InsnError` if the write operation fails. + unsafe fn mem_write(&mut self, _data: Self::Item) -> Result<(), InsnError> { + Err(InsnError::MemWrite) + } } #[derive(Clone, Copy, Debug, PartialEq)] @@ -338,6 +466,63 @@ impl Sib { } } +#[inline] +fn read_reg(mctx: &I, reg: Register, size: Bytes) -> usize { + mctx.read_reg(reg) & size.mask() as usize +} + +#[inline] +fn write_reg(mctx: &mut I, reg: Register, data: usize, size: Bytes) { + mctx.write_reg( + reg, + match size { + Bytes::Zero => return, + // Writing 8bit or 16bit register will not affect the upper bits. + Bytes::One | Bytes::Two => { + let old = mctx.read_reg(reg); + (data & size.mask() as usize) | (old & !size.mask() as usize) + } + // Writing 32bit register will zero out the upper bits. + Bytes::Four => data & size.mask() as usize, + Bytes::Eight => data, + }, + ); +} + +#[inline] +fn segment_base(segment: u64) -> u32 { + // Segment base bits 0 ~ 23: raw value bits 16 ~ 39 + // Segment base bits 24 ~ 31: raw value bits 56 ~ 63 + (((segment >> 16) & 0xffffff) | ((segment >> 56) << 24)) as u32 +} + +#[inline] +fn segment_limit(segment: u64) -> u32 { + // Segment limit bits 0 ~ 15: raw value bits 0 ~ 15 + // Segment limit bits 16 ~ 19: raw value bits 48 ~ 51 + let limit = ((segment & 0xffff) | ((segment >> 32) & 0xf0000)) as u32; + + if SegDescAttrFlags::from_bits_truncate(segment).contains(SegDescAttrFlags::G) { + (limit << 12) | 0xfff + } else { + limit + } +} + +fn ioio_perm(mctx: &I, port: u16, size: Bytes, io_read: bool) -> bool { + if mctx.read_cr0() & CR0Flags::PE.bits() != 0 + && (mctx.read_cpl() > ((mctx.read_flags() >> 12) & 3) + || mctx.read_cr4() & CR4Flags::VME.bits() != 0) + { + // In protected mode with CPL > IOPL or virtual-8086 mode, if + // any I/O Permission Bit for I/O port being accessed = 1, the I/O + // operation is not allowed. + mctx.ioio_perm(port, size, io_read) + } else { + true + } +} + /// Represents the context of a decoded instruction, which is used to /// interpret the instruction. It holds the decoded instruction, its /// length and various components that are decoded from the instruction @@ -373,6 +558,9 @@ pub struct DecodedInsnCtx { // Optional immediate operand immediate: i64, + + // Instruction repeat count + repeat: usize, } impl DecodedInsnCtx { @@ -415,9 +603,39 @@ impl DecodedInsnCtx { /// /// # Returns /// - /// The length of the decoded instruction as a `usize`. + /// The length of the decoded instruction as a `usize`. If the + /// repeat count is greater than 1, then return 0 to indicate not to + /// skip this instruction. If the repeat count is less than 1, then + /// return instruction len to indicate this instruction can be skipped. pub fn size(&self) -> usize { - self.insn_len + if self.repeat > 1 { + 0 + } else { + self.insn_len + } + } + + /// Emulates the decoded instruction using the provided machine context. + /// + /// # Arguments + /// + /// * `mctx` - A mutable reference to an object implementing the + /// `InsnMachineCtx` trait to provide the necessary machine context + /// for emulation. + /// + /// # Returns + /// + /// An `Ok(())` if emulation is successful or an `InsnError` otherwise. + pub fn emulate(&self, mctx: &mut I) -> Result<(), InsnError> { + self.insn + .ok_or(InsnError::UnSupportedInsn) + .and_then(|insn| match insn { + DecodedInsn::In(port, opsize) => self.emulate_in_out(port, opsize, mctx, true), + DecodedInsn::Out(port, opsize) => self.emulate_in_out(port, opsize, mctx, false), + DecodedInsn::Ins => self.emulate_ins_outs(mctx, true), + DecodedInsn::Outs => self.emulate_ins_outs(mctx, false), + _ => Err(InsnError::UnSupportedInsn), + }) } fn decode( @@ -431,7 +649,7 @@ impl DecodedInsnCtx { .and_then(|(insn, disp_bytes)| self.decode_displacement(insn, disp_bytes)) .and_then(|insn| self.decode_immediate(insn)) .and_then(|insn| self.decode_moffset(insn)) - .and_then(|insn| self.complete_decode(insn)) + .and_then(|insn| self.complete_decode(insn, mctx)) } #[inline] @@ -767,13 +985,17 @@ impl DecodedInsnCtx { } } - fn complete_decode(&mut self, insn: DecodedBytes) -> Result<(), InsnError> { + fn complete_decode( + &mut self, + insn: DecodedBytes, + mctx: &I, + ) -> Result<(), InsnError> { self.insn_len = insn.0.processed(); - self.decoded_insn() + self.decoded_insn(mctx) .map(|decoded_insn| self.insn = Some(decoded_insn)) } - fn decoded_insn(&self) -> Result { + fn decoded_insn(&mut self, mctx: &I) -> Result { let opdesc = self.get_opdesc()?; Ok(match opdesc.class { OpCodeClass::Cpuid => DecodedInsn::Cpuid, @@ -797,6 +1019,20 @@ impl DecodedInsnCtx { DecodedInsn::Out(Operand::rdx(), self.opsize) } } + OpCodeClass::Ins | OpCodeClass::Outs => { + if self.prefix.contains(PrefixFlags::REPZ_P) { + // The prefix REPZ(F3h) actually represents REP for ins/outs. + // The count register is depending on the address size of the + // instruction. + self.repeat = read_reg(mctx, Register::Rcx, self.addrsize); + }; + + if opdesc.class == OpCodeClass::Ins { + DecodedInsn::Ins + } else { + DecodedInsn::Outs + } + } OpCodeClass::Rdmsr => DecodedInsn::Rdmsr, OpCodeClass::Rdtsc => DecodedInsn::Rdtsc, OpCodeClass::Rdtscp => DecodedInsn::Rdtscp, @@ -804,4 +1040,278 @@ impl DecodedInsnCtx { _ => return Err(InsnError::UnSupportedInsn), }) } + + fn canonical_check(&self, la: usize) -> Option { + if match self.cpu_mode { + CpuMode::Bit64(level) => { + let virtaddr_bits = if level == PagingLevel::Level4 { 48 } else { 57 }; + let mask = !((1 << virtaddr_bits) - 1); + if la & (1 << (virtaddr_bits - 1)) != 0 { + la & mask == mask + } else { + la & mask == 0 + } + } + _ => true, + } { + Some(la) + } else { + None + } + } + + fn alignment_check(&self, la: usize, size: Bytes) -> Option { + match size { + // Zero size is not allowed + Bytes::Zero => None, + // One byte is always aligned + Bytes::One => Some(la), + // Two/Four/Eight bytes must be aligned on a boundary + _ => { + if la & (size as usize - 1) != 0 { + None + } else { + Some(la) + } + } + } + } + + fn cal_linear_addr( + &self, + mctx: &I, + seg: SegRegister, + ea: usize, + writable: bool, + ) -> Option { + let segment = mctx.read_seg(seg); + + let addrsize = if self.cpu_mode.is_bit64() { + Bytes::Eight + } else { + let attr = SegDescAttrFlags::from_bits_truncate(segment); + // Invalid if is system segment + if !attr.contains(SegDescAttrFlags::S) { + return None; + } + + if writable { + // Writing to a code segment, or writing to a read-only + // data segment is not allowed. + if attr.contains(SegDescAttrFlags::C_D) || !attr.contains(SegDescAttrFlags::R_W) { + return None; + } + } else { + // Data segment is always read-able, but code segment + // may be execute only. Invalid if read an execute only + // code segment. + if attr.contains(SegDescAttrFlags::C_D) && !attr.contains(SegDescAttrFlags::R_W) { + return None; + } + } + + let mut limit = segment_limit(segment) as usize; + + if !attr.contains(SegDescAttrFlags::C_D) && attr.contains(SegDescAttrFlags::C_E) { + // Expand-down segment, check low limit + if ea <= limit { + return None; + } + + limit = if attr.contains(SegDescAttrFlags::DB) { + u32::MAX as usize + } else { + u16::MAX as usize + } + } + + // Check high limit for each byte + for i in 0..self.opsize as usize { + if ea + i > limit { + return None; + } + } + + Bytes::Four + }; + + self.canonical_check( + if self.cpu_mode.is_bit64() && seg != SegRegister::FS && seg != SegRegister::GS { + ea & (addrsize.mask() as usize) + } else { + (segment_base(segment) as usize + ea) & addrsize.mask() as usize + }, + ) + } + + fn get_linear_addr( + &self, + mctx: &I, + seg: SegRegister, + ea: usize, + writable: bool, + ) -> Result { + self.cal_linear_addr(mctx, seg, ea, writable) + .ok_or(if seg == SegRegister::SS { + InsnError::ExceptionSS + } else { + InsnError::ExceptionGP(0) + }) + .and_then(|la| { + if (mctx.read_cpl() == 3) + && (mctx.read_cr0() & CR0Flags::AM.bits()) != 0 + && (mctx.read_flags() & RFlags::AC.bits()) != 0 + { + self.alignment_check(la, self.opsize) + .ok_or(InsnError::ExceptionAC) + } else { + Ok(la) + } + }) + } + + fn emulate_ins_outs( + &self, + mctx: &mut I, + io_read: bool, + ) -> Result<(), InsnError> { + // I/O port number is stored in DX. + let port = mctx.read_reg(Register::Rdx) as u16; + + // Check the IO permission bit map. + if !ioio_perm(mctx, port, self.opsize, io_read) { + return Err(InsnError::ExceptionGP(0)); + } + + let (seg, reg) = if io_read { + // Input byte from I/O port specified in DX into + // memory location specified with ES:(E)DI or + // RDI. + (SegRegister::ES, Register::Rdi) + } else { + // Output byte/word/doubleword from memory location specified in + // DS:(E)SI (The DS segment may be overridden with a segment + // override prefix.) or RSI to I/O port specified in DX. + ( + self.override_seg.map_or(SegRegister::DS, |s| s), + Register::Rsi, + ) + }; + + // Decoed the linear addresses and map as a memory object + // which allows accessing to the memory represented by the + // linear addresses. + let linear_addr = + self.get_linear_addr(mctx, seg, read_reg(mctx, reg, self.addrsize), io_read)?; + if io_read { + // Read data from IO port and then write to the memory location. + let data = mctx.ioio_in(port, self.opsize)?; + // Safety: The linear address is decoded from the instruction and checked. It can be + // remapped to a memory object with the write permission successfully, and the remapped + // memory size matches the operand size of the instruction. + unsafe { + match self.opsize { + Bytes::One => mctx + .map_linear_addr::(linear_addr, io_read, false)? + .mem_write(data as u8)?, + Bytes::Two => mctx + .map_linear_addr::(linear_addr, io_read, false)? + .mem_write(data as u16)?, + Bytes::Four => mctx + .map_linear_addr::(linear_addr, io_read, false)? + .mem_write(data as u32)?, + _ => return Err(InsnError::IoIoIn), + }; + } + } else { + // Read data from memory location and then write to the IO port + // + // Safety: The linear address is decoded from the instruction and checked. It can be + // remapped to a memory object with the read permission successfully, and the remapped + // memory size matches the operand size of the instruction. + let data = unsafe { + match self.opsize { + Bytes::One => mctx + .map_linear_addr::(linear_addr, io_read, false)? + .mem_read()? as u64, + Bytes::Two => mctx + .map_linear_addr::(linear_addr, io_read, false)? + .mem_read()? as u64, + Bytes::Four => mctx + .map_linear_addr::(linear_addr, io_read, false)? + .mem_read()? as u64, + _ => return Err(InsnError::IoIoOut), + } + }; + mctx.ioio_out(port, self.opsize, data)?; + } + + let rflags = RFlags::from_bits_truncate(mctx.read_flags()); + if rflags.contains(RFlags::DF) { + // The DF flag is 1, the (E)SI/DI register is decremented. + write_reg( + mctx, + reg, + read_reg(mctx, reg, self.addrsize) + .checked_sub(self.opsize as usize) + .ok_or(InsnError::IoIoOut)?, + self.addrsize, + ); + } else { + // The DF flag is 0, the (E)SI/DI register is incremented. + write_reg( + mctx, + reg, + read_reg(mctx, reg, self.addrsize) + .checked_add(self.opsize as usize) + .ok_or(InsnError::IoIoOut)?, + self.addrsize, + ); + } + + if self.repeat != 0 { + // Update the count register with the left count which are not + // emulated yet. + write_reg(mctx, Register::Rcx, self.repeat - 1, self.addrsize); + } + + Ok(()) + } + + fn emulate_in_out( + &self, + port: Operand, + opsize: Bytes, + mctx: &mut I, + io_read: bool, + ) -> Result<(), InsnError> { + let port = match port { + Operand::Reg(Register::Rdx) => mctx.read_reg(Register::Rdx) as u16, + Operand::Reg(..) => unreachable!("Port value is always in DX"), + Operand::Imm(imm) => match imm { + Immediate::U8(val) => val as u16, + _ => unreachable!("Port value in immediate is always 1 byte"), + }, + }; + + // Check the IO permission bit map + if !ioio_perm(mctx, port, opsize, io_read) { + return Err(InsnError::ExceptionGP(0)); + } + + if io_read { + // Read data from IO port and then write to AL/AX/EAX. + write_reg( + mctx, + Register::Rax, + mctx.ioio_in(port, opsize)? as usize, + opsize, + ); + } else { + // Read data from AL/AX/EAX and then write to the IO port. + mctx.ioio_out(port, opsize, read_reg(mctx, Register::Rax, opsize) as u64)?; + } + + Ok(()) + } } diff --git a/kernel/src/insn_decode/insn.rs b/kernel/src/insn_decode/insn.rs index df5affa1d..99e603e0e 100644 --- a/kernel/src/insn_decode/insn.rs +++ b/kernel/src/insn_decode/insn.rs @@ -67,7 +67,9 @@ impl Operand { pub enum DecodedInsn { Cpuid, In(Operand, Bytes), + Ins, Out(Operand, Bytes), + Outs, Wrmsr, Rdmsr, Rdtsc, @@ -95,213 +97,548 @@ impl Instruction { } } -/// A dummy struct to implement InsnMachineCtx for testing purposes. #[cfg(any(test, fuzzing))] -#[derive(Copy, Clone, Debug)] -pub struct TestCtx; +pub mod test_utils { + extern crate alloc; -#[cfg(any(test, fuzzing))] -impl InsnMachineCtx for TestCtx { - fn read_efer(&self) -> u64 { - use crate::cpu::efer::EFERFlags; + use crate::cpu::control_regs::{CR0Flags, CR4Flags}; + use crate::cpu::efer::EFERFlags; + use crate::insn_decode::*; + use crate::types::Bytes; + use alloc::boxed::Box; + + pub const TEST_PORT: u16 = 0xE0; + + /// A dummy struct to implement InsnMachineCtx for testing purposes. + #[allow(dead_code)] + #[derive(Copy, Clone, Debug)] + pub struct TestCtx { + pub efer: u64, + pub cr0: u64, + pub cr4: u64, + + pub rax: usize, + pub rdx: usize, + pub rcx: usize, + pub rbx: usize, + pub rsp: usize, + pub rbp: usize, + pub rdi: usize, + pub rsi: usize, + pub r8: usize, + pub r9: usize, + pub r10: usize, + pub r11: usize, + pub r12: usize, + pub r13: usize, + pub r14: usize, + pub r15: usize, + pub rip: usize, + pub flags: usize, - EFERFlags::LMA.bits() + pub ioport: u16, + pub iodata: u64, + } + + impl Default for TestCtx { + fn default() -> Self { + Self { + efer: EFERFlags::LMA.bits(), + cr0: CR0Flags::PE.bits(), + cr4: CR4Flags::LA57.bits(), + rax: 0, + rdx: 0, + rcx: 0, + rbx: 0, + rsp: 0, + rbp: 0, + rdi: 0, + rsi: 0, + r8: 0, + r9: 0, + r10: 0, + r11: 0, + r12: 0, + r13: 0, + r14: 0, + r15: 0, + rip: 0, + flags: 0, + ioport: TEST_PORT, + iodata: u64::MAX, + } + } } - fn read_seg(&self, seg: SegRegister) -> u64 { - match seg { - SegRegister::CS => 0x00af9a000000ffffu64, - _ => 0x00cf92000000ffffu64, + #[allow(dead_code)] + struct TestMem { + ptr: *mut T, + } + + impl InsnMachineCtx for TestCtx { + fn read_efer(&self) -> u64 { + self.efer + } + + fn read_seg(&self, seg: SegRegister) -> u64 { + match seg { + SegRegister::CS => 0x00af9a000000ffffu64, + _ => 0x00cf92000000ffffu64, + } + } + + fn read_cr0(&self) -> u64 { + self.cr0 + } + + fn read_cr4(&self) -> u64 { + self.cr4 + } + + fn read_reg(&self, reg: Register) -> usize { + match reg { + Register::Rax => self.rax, + Register::Rdx => self.rdx, + Register::Rcx => self.rcx, + Register::Rbx => self.rdx, + Register::Rsp => self.rsp, + Register::Rbp => self.rbp, + Register::Rdi => self.rdi, + Register::Rsi => self.rsi, + Register::R8 => self.r8, + Register::R9 => self.r9, + Register::R10 => self.r10, + Register::R11 => self.r11, + Register::R12 => self.r12, + Register::R13 => self.r13, + Register::R14 => self.r14, + Register::R15 => self.r15, + Register::Rip => self.rip, + } + } + + fn write_reg(&mut self, reg: Register, val: usize) { + match reg { + Register::Rax => self.rax = val, + Register::Rdx => self.rdx = val, + Register::Rcx => self.rcx = val, + Register::Rbx => self.rdx = val, + Register::Rsp => self.rsp = val, + Register::Rbp => self.rbp = val, + Register::Rdi => self.rdi = val, + Register::Rsi => self.rsi = val, + Register::R8 => self.r8 = val, + Register::R9 => self.r9 = val, + Register::R10 => self.r10 = val, + Register::R11 => self.r11 = val, + Register::R12 => self.r12 = val, + Register::R13 => self.r13 = val, + Register::R14 => self.r14 = val, + Register::R15 => self.r15 = val, + Register::Rip => self.rip = val, + } + } + + fn read_cpl(&self) -> usize { + 0 + } + + fn read_flags(&self) -> usize { + self.flags + } + + fn map_linear_addr( + &self, + la: usize, + _write: bool, + _fetch: bool, + ) -> Result>, InsnError> { + Ok(Box::new(TestMem { ptr: la as *mut T })) + } + + fn ioio_in(&self, _port: u16, size: Bytes) -> Result { + match size { + Bytes::One => Ok(self.iodata as u8 as u64), + Bytes::Two => Ok(self.iodata as u16 as u64), + Bytes::Four => Ok(self.iodata as u32 as u64), + _ => Err(InsnError::IoIoIn), + } + } + + fn ioio_out(&mut self, _port: u16, size: Bytes, data: u64) -> Result<(), InsnError> { + match size { + Bytes::One => self.iodata = data as u8 as u64, + Bytes::Two => self.iodata = data as u16 as u64, + Bytes::Four => self.iodata = data as u32 as u64, + _ => return Err(InsnError::IoIoOut), + } + + Ok(()) } } - fn read_cr0(&self) -> u64 { - use crate::cpu::control_regs::CR0Flags; + #[cfg(test)] + impl InsnMachineMem for TestMem { + type Item = T; + + unsafe fn mem_read(&self) -> Result { + Ok(*(self.ptr)) + } - CR0Flags::PE.bits() + unsafe fn mem_write(&mut self, data: Self::Item) -> Result<(), InsnError> { + *(self.ptr) = data; + Ok(()) + } } - fn read_cr4(&self) -> u64 { - use crate::cpu::control_regs::CR4Flags; + #[cfg(fuzzing)] + impl InsnMachineMem for TestMem { + type Item = T; + + unsafe fn mem_read(&self) -> Result { + Err(InsnError::MemRead) + } - CR4Flags::LA57.bits() + unsafe fn mem_write(&mut self, _data: Self::Item) -> Result<(), InsnError> { + Ok(()) + } } } #[cfg(test)] mod tests { + use super::test_utils::*; use super::*; + use crate::cpu::registers::RFlags; #[test] fn test_decode_inb() { + let mut testctx = TestCtx { + iodata: 0xab, + ..Default::default() + }; let raw_insn: [u8; MAX_INSN_SIZE] = [ - 0xE4, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, + 0xE4, + TEST_PORT as u8, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, 0x41, ]; - let decoded = Instruction::new(raw_insn).decode(&TestCtx).unwrap(); + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + assert_eq!( decoded.insn().unwrap(), - DecodedInsn::In(Operand::Imm(Immediate::U8(0x41)), Bytes::One) + DecodedInsn::In(Operand::Imm(Immediate::U8(TEST_PORT as u8)), Bytes::One) ); assert_eq!(decoded.size(), 2); + assert_eq!(testctx.rax as u64, testctx.iodata); + let mut testctx = TestCtx { + rdx: TEST_PORT as usize, + iodata: 0xab, + ..Default::default() + }; let raw_insn: [u8; MAX_INSN_SIZE] = [ 0xEC, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, ]; - let decoded = Instruction::new(raw_insn).decode(&TestCtx).unwrap(); + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + assert_eq!( decoded.insn().unwrap(), DecodedInsn::In(Operand::rdx(), Bytes::One) ); assert_eq!(decoded.size(), 1); + assert_eq!(testctx.rax as u64, testctx.iodata); } #[test] fn test_decode_inw() { + let mut testctx = TestCtx { + iodata: 0xabcd, + ..Default::default() + }; let raw_insn: [u8; MAX_INSN_SIZE] = [ - 0x66, 0xE5, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, + 0x66, + 0xE5, + TEST_PORT as u8, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, 0x41, ]; - let insn = Instruction::new(raw_insn); - let decoded = insn.decode(&TestCtx).unwrap(); + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + assert_eq!( decoded.insn().unwrap(), - DecodedInsn::In(Operand::Imm(Immediate::U8(0x41)), Bytes::Two) + DecodedInsn::In(Operand::Imm(Immediate::U8(TEST_PORT as u8)), Bytes::Two) ); assert_eq!(decoded.size(), 3); + assert_eq!(testctx.rax as u64, testctx.iodata); + let mut testctx = TestCtx { + rdx: TEST_PORT as usize, + iodata: 0xabcd, + ..Default::default() + }; let raw_insn: [u8; MAX_INSN_SIZE] = [ 0x66, 0xED, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, ]; - let insn = Instruction::new(raw_insn); - let decoded = insn.decode(&TestCtx).unwrap(); + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + assert_eq!( decoded.insn().unwrap(), DecodedInsn::In(Operand::rdx(), Bytes::Two) ); assert_eq!(decoded.size(), 2); + assert_eq!(testctx.rax as u64, testctx.iodata); } #[test] fn test_decode_inl() { + let mut testctx = TestCtx { + iodata: 0xabcdef01, + ..Default::default() + }; let raw_insn: [u8; MAX_INSN_SIZE] = [ - 0xE5, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, + 0xE5, + TEST_PORT as u8, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, 0x41, ]; - let insn = Instruction::new(raw_insn); - let decoded = insn.decode(&TestCtx).unwrap(); + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + assert_eq!( decoded.insn().unwrap(), - DecodedInsn::In(Operand::Imm(Immediate::U8(0x41)), Bytes::Four) + DecodedInsn::In(Operand::Imm(Immediate::U8(TEST_PORT as u8)), Bytes::Four) ); assert_eq!(decoded.size(), 2); + assert_eq!(testctx.rax as u64, testctx.iodata); + let mut testctx = TestCtx { + rdx: TEST_PORT as usize, + iodata: 0xabcdef01, + ..Default::default() + }; let raw_insn: [u8; MAX_INSN_SIZE] = [ 0xED, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, ]; - let insn = Instruction::new(raw_insn); - let decoded = insn.decode(&TestCtx).unwrap(); + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + assert_eq!( decoded.insn().unwrap(), DecodedInsn::In(Operand::rdx(), Bytes::Four) ); assert_eq!(decoded.size(), 1); + assert_eq!(testctx.rax as u64, testctx.iodata); } #[test] fn test_decode_outb() { + let mut testctx = TestCtx { + rax: 0xab, + ..Default::default() + }; let raw_insn: [u8; MAX_INSN_SIZE] = [ - 0xE6, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, + 0xE6, + TEST_PORT as u8, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, 0x41, ]; - let insn = Instruction::new(raw_insn); - let decoded = insn.decode(&TestCtx).unwrap(); + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + assert_eq!( decoded.insn().unwrap(), - DecodedInsn::Out(Operand::Imm(Immediate::U8(0x41)), Bytes::One) + DecodedInsn::Out(Operand::Imm(Immediate::U8(TEST_PORT as u8)), Bytes::One) ); assert_eq!(decoded.size(), 2); + assert_eq!(testctx.rax as u64, testctx.iodata); + let mut testctx = TestCtx { + rax: 0xab, + rdx: TEST_PORT as usize, + ..Default::default() + }; let raw_insn: [u8; MAX_INSN_SIZE] = [ 0xEE, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, ]; - let insn = Instruction::new(raw_insn); - let decoded = insn.decode(&TestCtx).unwrap(); + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + assert_eq!( decoded.insn().unwrap(), DecodedInsn::Out(Operand::rdx(), Bytes::One) ); assert_eq!(decoded.size(), 1); + assert_eq!(testctx.rax as u64, testctx.iodata); } #[test] fn test_decode_outw() { + let mut testctx = TestCtx { + rax: 0xabcd, + ..Default::default() + }; let raw_insn: [u8; MAX_INSN_SIZE] = [ - 0x66, 0xE7, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, + 0x66, + 0xE7, + TEST_PORT as u8, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, 0x41, ]; - let insn = Instruction::new(raw_insn); - let decoded = insn.decode(&TestCtx).unwrap(); + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + assert_eq!( decoded.insn().unwrap(), - DecodedInsn::Out(Operand::Imm(Immediate::U8(0x41)), Bytes::Two) + DecodedInsn::Out(Operand::Imm(Immediate::U8(TEST_PORT as u8)), Bytes::Two) ); assert_eq!(decoded.size(), 3); + assert_eq!(testctx.rax as u64, testctx.iodata); + let mut testctx = TestCtx { + rax: 0xabcd, + rdx: TEST_PORT as usize, + ..Default::default() + }; let raw_insn: [u8; MAX_INSN_SIZE] = [ 0x66, 0xEF, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, ]; - let insn = Instruction::new(raw_insn); - let decoded = insn.decode(&TestCtx).unwrap(); + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + assert_eq!( decoded.insn().unwrap(), DecodedInsn::Out(Operand::rdx(), Bytes::Two) ); assert_eq!(decoded.size(), 2); + assert_eq!(testctx.rax as u64, testctx.iodata); } #[test] fn test_decode_outl() { + let mut testctx = TestCtx { + rax: 0xabcdef01, + ..Default::default() + }; let raw_insn: [u8; MAX_INSN_SIZE] = [ - 0xE7, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, + 0xE7, + TEST_PORT as u8, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, + 0x41, 0x41, ]; - let insn = Instruction::new(raw_insn); - let decoded = insn.decode(&TestCtx).unwrap(); + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + assert_eq!( decoded.insn().unwrap(), - DecodedInsn::Out(Operand::Imm(Immediate::U8(0x41)), Bytes::Four) + DecodedInsn::Out(Operand::Imm(Immediate::U8(TEST_PORT as u8)), Bytes::Four) ); assert_eq!(decoded.size(), 2); + assert_eq!(testctx.rax as u64, testctx.iodata); + let mut testctx = TestCtx { + rax: 0xabcdef01, + rdx: TEST_PORT as usize, + ..Default::default() + }; let raw_insn: [u8; MAX_INSN_SIZE] = [ 0xEF, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, ]; - let insn = Instruction::new(raw_insn); - let decoded = insn.decode(&TestCtx).unwrap(); + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + assert_eq!( decoded.insn().unwrap(), DecodedInsn::Out(Operand::rdx(), Bytes::Four) ); assert_eq!(decoded.size(), 1); + assert_eq!(testctx.rax as u64, testctx.iodata); } #[test] @@ -312,7 +649,7 @@ mod tests { ]; let insn = Instruction::new(raw_insn); - let decoded = insn.decode(&TestCtx).unwrap(); + let decoded = insn.decode(&TestCtx::default()).unwrap(); assert_eq!(decoded.insn().unwrap(), DecodedInsn::Cpuid); assert_eq!(decoded.size(), 2); } @@ -325,7 +662,7 @@ mod tests { ]; let insn = Instruction::new(raw_insn); - let decoded = insn.decode(&TestCtx).unwrap(); + let decoded = insn.decode(&TestCtx::default()).unwrap(); assert_eq!(decoded.insn().unwrap(), DecodedInsn::Wrmsr); assert_eq!(decoded.size(), 2); } @@ -338,7 +675,7 @@ mod tests { ]; let insn = Instruction::new(raw_insn); - let decoded = insn.decode(&TestCtx).unwrap(); + let decoded = insn.decode(&TestCtx::default()).unwrap(); assert_eq!(decoded.insn().unwrap(), DecodedInsn::Rdmsr); assert_eq!(decoded.size(), 2); } @@ -351,7 +688,7 @@ mod tests { ]; let insn = Instruction::new(raw_insn); - let decoded = insn.decode(&TestCtx).unwrap(); + let decoded = insn.decode(&TestCtx::default()).unwrap(); assert_eq!(decoded.insn().unwrap(), DecodedInsn::Rdtsc); assert_eq!(decoded.size(), 2); } @@ -364,11 +701,444 @@ mod tests { ]; let insn = Instruction::new(raw_insn); - let decoded = insn.decode(&TestCtx).unwrap(); + let decoded = insn.decode(&TestCtx::default()).unwrap(); assert_eq!(decoded.insn().unwrap(), DecodedInsn::Rdtscp); assert_eq!(decoded.size(), 3); } + #[test] + fn test_decode_ins_u8() { + let raw_insn: [u8; MAX_INSN_SIZE] = [ + 0xF3, 0x6C, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, + 0x41, + ]; + let iodata: [u8; 4] = [0x12, 0x34, 0x56, 0x78]; + + let mut i = 0usize; + let mut testdata: [u8; 4] = [0; 4]; + let mut testctx = TestCtx { + rdx: TEST_PORT as usize, + rcx: testdata.len(), + rdi: core::ptr::addr_of!(testdata[0]) as usize, + ..Default::default() + }; + loop { + testctx.iodata = *iodata.get(i).unwrap() as u64; + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + if decoded.size() == 0 { + i += 1; + continue; + } + + assert_eq!(decoded.insn().unwrap(), DecodedInsn::Ins); + assert_eq!(decoded.size(), 2); + assert_eq!(0, testctx.rcx); + assert_eq!( + core::ptr::addr_of!(testdata) as usize + testdata.len() * Bytes::One as usize, + testctx.rdi + ); + assert_eq!(i, testdata.len() - 1); + for (i, d) in testdata.iter().enumerate() { + assert_eq!(d, iodata.get(i).unwrap()); + } + break; + } + + i = iodata.len() - 1; + testdata = [0; 4]; + testctx = TestCtx { + rdx: TEST_PORT as usize, + rcx: testdata.len(), + rdi: core::ptr::addr_of!(testdata[testdata.len() - 1]) as usize, + flags: RFlags::DF.bits(), + ..Default::default() + }; + loop { + testctx.iodata = *iodata.get(i).unwrap() as u64; + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + if decoded.size() == 0 { + i = i.checked_sub(1).unwrap(); + continue; + } + + assert_eq!(decoded.insn().unwrap(), DecodedInsn::Ins); + assert_eq!(decoded.size(), 2); + assert_eq!(0, testctx.rcx); + assert_eq!( + core::ptr::addr_of!(testdata[0]) as usize - Bytes::One as usize, + testctx.rdi + ); + assert_eq!(i, 0); + for (i, d) in testdata.iter().enumerate() { + assert_eq!(d, iodata.get(i).unwrap()); + } + break; + } + } + + #[test] + fn test_decode_ins_u16() { + let raw_insn: [u8; MAX_INSN_SIZE] = [ + 0x66, 0xF3, 0x6D, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, + 0x41, + ]; + let iodata: [u16; 4] = [0x1234, 0x5678, 0x9abc, 0xdef0]; + + let mut i = 0usize; + let mut testdata: [u16; 4] = [0; 4]; + let mut testctx = TestCtx { + rdx: TEST_PORT as usize, + rcx: testdata.len(), + rdi: core::ptr::addr_of!(testdata[0]) as usize, + ..Default::default() + }; + loop { + testctx.iodata = *iodata.get(i).unwrap() as u64; + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + if decoded.size() == 0 { + i += 1; + continue; + } + + assert_eq!(decoded.insn().unwrap(), DecodedInsn::Ins); + assert_eq!(decoded.size(), 3); + assert_eq!(0, testctx.rcx); + assert_eq!( + core::ptr::addr_of!(testdata) as usize + testdata.len() * Bytes::Two as usize, + testctx.rdi + ); + assert_eq!(i, testdata.len() - 1); + for (i, d) in testdata.iter().enumerate() { + assert_eq!(d, iodata.get(i).unwrap()); + } + break; + } + + i = iodata.len() - 1; + testdata = [0; 4]; + testctx = TestCtx { + rdx: TEST_PORT as usize, + rcx: testdata.len(), + rdi: core::ptr::addr_of!(testdata[testdata.len() - 1]) as usize, + flags: RFlags::DF.bits(), + ..Default::default() + }; + loop { + testctx.iodata = *iodata.get(i).unwrap() as u64; + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + if decoded.size() == 0 { + i = i.checked_sub(1).unwrap(); + continue; + } + + assert_eq!(decoded.insn().unwrap(), DecodedInsn::Ins); + assert_eq!(decoded.size(), 3); + assert_eq!(0, testctx.rcx); + assert_eq!( + core::ptr::addr_of!(testdata) as usize - Bytes::Two as usize, + testctx.rdi + ); + assert_eq!(i, 0); + for (i, d) in testdata.iter().enumerate() { + assert_eq!(d, iodata.get(i).unwrap()); + } + break; + } + } + + #[test] + fn test_decode_ins_u32() { + let raw_insn: [u8; MAX_INSN_SIZE] = [ + 0xF3, 0x6D, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, + 0x41, + ]; + let iodata: [u32; 4] = [0x12345678, 0x9abcdef0, 0x87654321, 0x0fedcba9]; + + let mut i = 0usize; + let mut testdata: [u32; 4] = [0; 4]; + let mut testctx = TestCtx { + rdx: TEST_PORT as usize, + rcx: testdata.len(), + rdi: core::ptr::addr_of!(testdata[0]) as usize, + ..Default::default() + }; + loop { + testctx.iodata = *iodata.get(i).unwrap() as u64; + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + if decoded.size() == 0 { + i += 1; + continue; + } + + assert_eq!(decoded.insn().unwrap(), DecodedInsn::Ins); + assert_eq!(decoded.size(), 2); + assert_eq!(0, testctx.rcx); + assert_eq!( + core::ptr::addr_of!(testdata) as usize + testdata.len() * Bytes::Four as usize, + testctx.rdi + ); + assert_eq!(i, testdata.len() - 1); + for (i, d) in testdata.iter().enumerate() { + assert_eq!(d, iodata.get(i).unwrap()); + } + break; + } + + i = iodata.len() - 1; + testdata = [0; 4]; + testctx = TestCtx { + rdx: TEST_PORT as usize, + rcx: testdata.len(), + rdi: core::ptr::addr_of!(testdata[testdata.len() - 1]) as usize, + flags: RFlags::DF.bits(), + ..Default::default() + }; + loop { + testctx.iodata = *iodata.get(i).unwrap() as u64; + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + if decoded.size() == 0 { + i = i.checked_sub(1).unwrap(); + continue; + } + + assert_eq!(decoded.insn().unwrap(), DecodedInsn::Ins); + assert_eq!(decoded.size(), 2); + assert_eq!(0, testctx.rcx); + assert_eq!( + core::ptr::addr_of!(testdata) as usize - Bytes::Four as usize, + testctx.rdi + ); + assert_eq!(i, 0); + for (i, d) in testdata.iter().enumerate() { + assert_eq!(d, iodata.get(i).unwrap()); + } + break; + } + } + + #[test] + fn test_decode_outs_u8() { + let raw_insn: [u8; MAX_INSN_SIZE] = [ + 0xF3, 0x6E, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, + 0x41, + ]; + let testdata: [u8; 4] = [0x12, 0x34, 0x56, 0x78]; + + let mut i = 0usize; + let mut iodata: [u8; 4] = [0; 4]; + let mut testctx = TestCtx { + rdx: TEST_PORT as usize, + rcx: testdata.len(), + rsi: core::ptr::addr_of!(testdata[0]) as usize, + ..Default::default() + }; + loop { + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + *iodata.get_mut(i).unwrap() = testctx.iodata as u8; + if decoded.size() == 0 { + i += 1; + continue; + } + + assert_eq!(decoded.insn().unwrap(), DecodedInsn::Outs); + assert_eq!(decoded.size(), 2); + assert_eq!(0, testctx.rcx); + assert_eq!( + core::ptr::addr_of!(testdata) as usize + testdata.len(), + testctx.rsi + ); + assert_eq!(i, testdata.len() - 1); + for (i, d) in testdata.iter().enumerate() { + assert_eq!(d, iodata.get(i).unwrap()); + } + break; + } + + i = iodata.len() - 1; + iodata = [0; 4]; + testctx = TestCtx { + rdx: TEST_PORT as usize, + rcx: testdata.len(), + rsi: core::ptr::addr_of!(testdata[testdata.len() - 1]) as usize, + flags: RFlags::DF.bits(), + ..Default::default() + }; + loop { + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + *iodata.get_mut(i).unwrap() = testctx.iodata as u8; + if decoded.size() == 0 { + i = i.checked_sub(1).unwrap(); + continue; + } + + assert_eq!(decoded.insn().unwrap(), DecodedInsn::Outs); + assert_eq!(decoded.size(), 2); + assert_eq!(0, testctx.rcx); + assert_eq!( + core::ptr::addr_of!(testdata[0]) as usize - Bytes::One as usize, + testctx.rsi + ); + assert_eq!(i, 0); + for (i, d) in testdata.iter().enumerate() { + assert_eq!(d, iodata.get(i).unwrap()); + } + break; + } + } + + #[test] + fn test_decode_outs_u16() { + let raw_insn: [u8; MAX_INSN_SIZE] = [ + 0x66, 0xF3, 0x6F, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, + 0x41, + ]; + let testdata: [u16; 4] = [0x1234, 0x5678, 0x9abc, 0xdef0]; + + let mut i = 0usize; + let mut iodata: [u16; 4] = [0; 4]; + let mut testctx = TestCtx { + rdx: TEST_PORT as usize, + rcx: testdata.len(), + rsi: core::ptr::addr_of!(testdata[0]) as usize, + ..Default::default() + }; + loop { + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + *iodata.get_mut(i).unwrap() = testctx.iodata as u16; + if decoded.size() == 0 { + i += 1; + continue; + } + + assert_eq!(decoded.insn().unwrap(), DecodedInsn::Outs); + assert_eq!(decoded.size(), 3); + assert_eq!(0, testctx.rcx); + assert_eq!( + core::ptr::addr_of!(testdata) as usize + testdata.len() * Bytes::Two as usize, + testctx.rsi + ); + assert_eq!(i, testdata.len() - 1); + for (i, d) in testdata.iter().enumerate() { + assert_eq!(d, iodata.get(i).unwrap()); + } + break; + } + + i = iodata.len() - 1; + iodata = [0; 4]; + testctx = TestCtx { + rdx: TEST_PORT as usize, + rcx: testdata.len(), + rsi: core::ptr::addr_of!(testdata[testdata.len() - 1]) as usize, + flags: RFlags::DF.bits(), + ..Default::default() + }; + loop { + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + *iodata.get_mut(i).unwrap() = testctx.iodata as u16; + if decoded.size() == 0 { + i = i.checked_sub(1).unwrap(); + continue; + } + + assert_eq!(decoded.insn().unwrap(), DecodedInsn::Outs); + assert_eq!(decoded.size(), 3); + assert_eq!(0, testctx.rcx); + assert_eq!( + core::ptr::addr_of!(testdata[0]) as usize - Bytes::Two as usize, + testctx.rsi + ); + assert_eq!(i, 0); + for (i, d) in testdata.iter().enumerate() { + assert_eq!(d, iodata.get(i).unwrap()); + } + break; + } + } + + #[test] + fn test_decode_outs_u32() { + let raw_insn: [u8; MAX_INSN_SIZE] = [ + 0xF3, 0x6F, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, + 0x41, + ]; + let testdata: [u32; 4] = [0x12345678, 0x9abcdef0, 0xdeadbeef, 0xfeedface]; + + let mut i = 0usize; + let mut iodata: [u32; 4] = [0; 4]; + let mut testctx = TestCtx { + rdx: TEST_PORT as usize, + rcx: testdata.len(), + rsi: core::ptr::addr_of!(testdata) as usize, + ..Default::default() + }; + loop { + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + *iodata.get_mut(i).unwrap() = testctx.iodata as u32; + if decoded.size() == 0 { + i += 1; + continue; + } + + assert_eq!(decoded.insn().unwrap(), DecodedInsn::Outs); + assert_eq!(decoded.size(), 2); + assert_eq!(*testdata.last().unwrap() as u64, testctx.iodata); + assert_eq!(0, testctx.rcx); + assert_eq!( + core::ptr::addr_of!(testdata) as usize + testdata.len() * Bytes::Four as usize, + testctx.rsi + ); + assert_eq!(i, testdata.len() - 1); + for (i, d) in testdata.iter().enumerate() { + assert_eq!(d, iodata.get(i).unwrap()); + } + break; + } + + i = iodata.len() - 1; + iodata = [0; 4]; + testctx = TestCtx { + rdx: TEST_PORT as usize, + rcx: testdata.len(), + rsi: core::ptr::addr_of!(testdata[testdata.len() - 1]) as usize, + flags: RFlags::DF.bits(), + ..Default::default() + }; + loop { + let decoded = Instruction::new(raw_insn).decode(&testctx).unwrap(); + decoded.emulate(&mut testctx).unwrap(); + *iodata.get_mut(i).unwrap() = testctx.iodata as u32; + if decoded.size() == 0 { + i = i.checked_sub(1).unwrap(); + continue; + } + + assert_eq!(decoded.insn().unwrap(), DecodedInsn::Outs); + assert_eq!(decoded.size(), 2); + assert_eq!(0, testctx.rcx); + assert_eq!( + core::ptr::addr_of!(testdata[0]) as usize - Bytes::Four as usize, + testctx.rsi + ); + assert_eq!(i, 0); + for (i, d) in testdata.iter().enumerate() { + assert_eq!(d, iodata.get(i).unwrap()); + } + break; + } + } + #[test] fn test_decode_failed() { let raw_insn: [u8; MAX_INSN_SIZE] = [ @@ -377,7 +1147,7 @@ mod tests { ]; let insn = Instruction::new(raw_insn); - let err = insn.decode(&TestCtx); + let err = insn.decode(&TestCtx::default()); assert!(err.is_err()); } diff --git a/kernel/src/insn_decode/mod.rs b/kernel/src/insn_decode/mod.rs index ecb29f134..06b37bc23 100644 --- a/kernel/src/insn_decode/mod.rs +++ b/kernel/src/insn_decode/mod.rs @@ -8,9 +8,9 @@ mod decode; mod insn; mod opcode; -pub use decode::{DecodedInsnCtx, InsnMachineCtx}; +pub use decode::{DecodedInsnCtx, InsnMachineCtx, InsnMachineMem}; #[cfg(any(test, fuzzing))] -pub use insn::TestCtx; +pub use insn::test_utils::TestCtx; pub use insn::{ DecodedInsn, Immediate, Instruction, Operand, Register, SegRegister, MAX_INSN_SIZE, }; @@ -32,12 +32,28 @@ pub enum InsnError { DecodePrefix, /// Error while decoding the SIB byte. DecodeSib, + /// Error due to alignment check exception. + ExceptionAC, + /// Error due to general protection exception. + ExceptionGP(u8), + /// Error due to stack segment exception. + ExceptionSS, + /// Error while mapping linear addresses. + MapLinearAddr, + /// Error while reading from memory. + MemRead, + /// Error while writing to memory. + MemWrite, /// No OpCodeDesc generated while decoding. NoOpCodeDesc, /// Error while peeking an instruction byte. InsnPeek, /// Invalid RegCode for decoding Register. InvalidRegister, + /// Error while handling input IO operation. + IoIoIn, + /// Error while handling output IO operation. + IoIoOut, /// The decoded instruction is not supported. UnSupportedInsn, } diff --git a/kernel/src/insn_decode/opcode.rs b/kernel/src/insn_decode/opcode.rs index 92dc900bf..5f5b7bd4d 100644 --- a/kernel/src/insn_decode/opcode.rs +++ b/kernel/src/insn_decode/opcode.rs @@ -41,7 +41,9 @@ pub enum OpCodeClass { Group7, Group7Rm7, In, + Ins, Out, + Outs, Rdmsr, Rdtsc, Rdtscp, @@ -90,6 +92,18 @@ static ONE_BYTE_TABLE: [Option; 256] = { let mut table: [Option; 256] = [None; 256]; table[0x0F] = opcode!(OpCodeClass::TwoByte); + table[0x6C] = opcode!( + 0x6C, + OpCodeClass::Ins, + OpCodeFlags::BYTE_OP.bits() | OpCodeFlags::NO_MODRM.bits() + ); + table[0x6D] = opcode!(0x6D, OpCodeClass::Ins, OpCodeFlags::NO_MODRM.bits()); + table[0x6E] = opcode!( + 0x6E, + OpCodeClass::Outs, + OpCodeFlags::BYTE_OP.bits() | OpCodeFlags::NO_MODRM.bits() + ); + table[0x6F] = opcode!(0x6F, OpCodeClass::Outs, OpCodeFlags::NO_MODRM.bits()); table[0xE4] = opcode!( 0xE4, OpCodeClass::In, diff --git a/kernel/src/io.rs b/kernel/src/io.rs index 3167e7509..39c8a920c 100644 --- a/kernel/src/io.rs +++ b/kernel/src/io.rs @@ -31,6 +31,18 @@ pub trait IOPort: Sync + Debug { ret } } + + fn outl(&self, port: u16, value: u32) { + unsafe { asm!("outl %eax, %dx", in("eax") value, in("dx") port, options(att_syntax)) } + } + + fn inl(&self, port: u16) -> u32 { + unsafe { + let ret: u32; + asm!("inl %dx, %eax", in("dx") port, out("eax") ret, options(att_syntax)); + ret + } + } } #[derive(Default, Debug, Clone, Copy)] diff --git a/kernel/src/mm/guestmem.rs b/kernel/src/mm/guestmem.rs index 643585823..a1614b485 100644 --- a/kernel/src/mm/guestmem.rs +++ b/kernel/src/mm/guestmem.rs @@ -6,7 +6,7 @@ use crate::address::{Address, VirtAddr}; use crate::error::SvsmError; - +use crate::insn_decode::{InsnError, InsnMachineMem}; use core::arch::asm; use core::mem::{size_of, MaybeUninit}; @@ -257,6 +257,20 @@ impl GuestPtr { } } +impl InsnMachineMem for GuestPtr { + type Item = T; + + /// Safety: See the GuestPtr's read() method documentation for safety requirements. + unsafe fn mem_read(&self) -> Result { + self.read().map_err(|_| InsnError::MemRead) + } + + /// Safety: See the GuestPtr's write() method documentation for safety requirements. + unsafe fn mem_write(&mut self, data: Self::Item) -> Result<(), InsnError> { + self.write(data).map_err(|_| InsnError::MemWrite) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/kernel/src/mm/pagetable.rs b/kernel/src/mm/pagetable.rs index 67c9bc380..3f6e40252 100644 --- a/kernel/src/mm/pagetable.rs +++ b/kernel/src/mm/pagetable.rs @@ -5,8 +5,11 @@ // Author: Joerg Roedel use crate::address::{Address, PhysAddr, VirtAddr}; -use crate::cpu::control_regs::write_cr3; +use crate::cpu::control_regs::{write_cr3, CR0Flags, CR4Flags}; +use crate::cpu::efer::EFERFlags; use crate::cpu::flush_tlb_global_sync; +use crate::cpu::idt::common::PageFaultError; +use crate::cpu::registers::RFlags; use crate::error::SvsmError; use crate::mm::PageBox; use crate::mm::{phys_to_virt, virt_to_phys, PGTABLE_LVL3_IDX_SHARED}; @@ -14,6 +17,7 @@ use crate::platform::SvsmPlatform; use crate::types::{PageSize, PAGE_SIZE, PAGE_SIZE_2M}; use crate::utils::immut_after_init::{ImmutAfterInitCell, ImmutAfterInitResult}; use crate::utils::MemoryRegion; +use crate::BIT_MASK; use bitflags::bitflags; use core::cmp; use core::ops::{Index, IndexMut}; @@ -34,6 +38,9 @@ static SHARED_PTE_MASK: ImmutAfterInitCell = ImmutAfterInitCell::new(0); /// Maximum physical address supported by the system. static MAX_PHYS_ADDR: ImmutAfterInitCell = ImmutAfterInitCell::uninit(); +/// Maximum physical address bits supported by the system. +static PHYS_ADDR_SIZE: ImmutAfterInitCell = ImmutAfterInitCell::uninit(); + /// Physical address for the Launch VMSA (Virtual Machine Saving Area). pub const LAUNCH_VMSA_ADDR: PhysAddr = PhysAddr::new(0xFFFFFFFFF000); @@ -77,6 +84,8 @@ fn init_encrypt_mask(platform: &dyn SvsmPlatform) -> ImmutAfterInitResult<()> { guest_phys_addr_size }; + PHYS_ADDR_SIZE.reinit(&phys_addr_size)?; + // If the C-bit is a physical address bit however, the guest physical // address space is effectively reduced by 1 bit. // - APM2, 15.34.6 Page Table Support @@ -160,6 +169,43 @@ impl PTEntryFlags { } } +/// Represents paging mode. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum PagingMode { + // Paging mode is disabled + NoPaging, + // 32bit legacy paging mode + NonPAE, + // 32bit PAE paging mode + PAE, + // 4 level paging mode + PML4, + // 5 level paging mode + PML5, +} + +impl PagingMode { + pub fn new(efer: EFERFlags, cr0: CR0Flags, cr4: CR4Flags) -> Self { + if !cr0.contains(CR0Flags::PG) { + // Paging is disabled + PagingMode::NoPaging + } else if efer.contains(EFERFlags::LMA) { + // Long mode is activated + if cr4.contains(CR4Flags::LA57) { + PagingMode::PML5 + } else { + PagingMode::PML4 + } + } else if cr4.contains(CR4Flags::PAE) { + // PAE mode + PagingMode::PAE + } else { + // Non PAE mode + PagingMode::NonPAE + } + } +} + /// Represents a page table entry. #[repr(C)] #[derive(Copy, Clone, Debug, Default)] @@ -181,6 +227,118 @@ impl PTEntry { self.flags().contains(PTEntryFlags::PRESENT) } + /// Check if the page table entry is huge. + pub fn huge(&self) -> bool { + self.flags().contains(PTEntryFlags::HUGE) + } + + /// Check if the page table entry is writable. + pub fn writable(&self) -> bool { + self.flags().contains(PTEntryFlags::WRITABLE) + } + + /// Check if the page table entry is NX (no-execute). + pub fn nx(&self) -> bool { + self.flags().contains(PTEntryFlags::NX) + } + + /// Check if the page table entry is user-accessible. + pub fn user(&self) -> bool { + self.flags().contains(PTEntryFlags::USER) + } + + /// Check if the page table entry has reserved bits set. + pub fn has_reserved_bits(&self, pm: PagingMode, level: usize) -> bool { + let reserved_mask = match pm { + PagingMode::NoPaging => unreachable!("NoPaging does not have page table"), + PagingMode::NonPAE => { + match level { + // No reserved bits in 4k PTE. + 0 => 0, + 1 => { + if self.huge() { + // Bit21 is reserved in 4M PDE. + BIT_MASK!(21, 21) + } else { + // No reserved bits in PDE. + 0 + } + } + _ => unreachable!("Invalid NonPAE page table level"), + } + } + PagingMode::PAE => { + // Bit62 ~ MAXPHYSADDR are reserved for each + // level in PAE page table. + BIT_MASK!(62, *PHYS_ADDR_SIZE) + | match level { + // No additional reserved bits in 4k PTE. + 0 => 0, + 1 => { + if self.huge() { + // Bit20 ~ Bit13 are reserved in 2M PDE. + BIT_MASK!(20, 13) + } else { + // No additional reserved bits in PDE. + 0 + } + } + // Bit63 and Bit8 ~ Bit5 are reserved in PDPTE. + 2 => BIT_MASK!(63, 63) | BIT_MASK!(8, 5), + _ => unreachable!("Invalid PAE page table level"), + } + } + PagingMode::PML4 | PagingMode::PML5 => { + // Bit51 ~ MAXPHYSADDR are reserved for each level + // in PML4 and PML5 page table. + let common = if *PHYS_ADDR_SIZE > 51 { + 0 + } else { + // Remove the encryption mask bit as this bit is not reserved + BIT_MASK!(51, *PHYS_ADDR_SIZE) + & !((shared_pte_mask() | private_pte_mask()) as u64) + }; + + common + | match level { + // No additional reserved bits in 4k PTE. + 0 => 0, + 1 => { + if self.huge() { + // Bit20 ~ Bit13 are reserved in 2M PDE. + BIT_MASK!(20, 13) + } else { + // No additional reserved bits in PDE. + 0 + } + } + 2 => { + if self.huge() { + // Bit29 ~ Bit13 are reserved in 1G PDPTE. + BIT_MASK!(29, 13) + } else { + // No additional reserved bits in PDPTE. + 0 + } + } + // Bit8 ~ Bit7 are reserved in PML4E. + 3 => BIT_MASK!(8, 7), + 4 => { + if pm == PagingMode::PML4 { + unreachable!("Invalid PML4 page table level"); + } else { + // Bit8 ~ Bit7 are reserved in PML5E. + BIT_MASK!(8, 7) + } + } + _ => unreachable!("Invalid PML4/PML5 page table level"), + } + } + }; + + self.raw() & reserved_mask != 0 + } + /// Get the raw bits (`u64`) of the page table entry. pub fn raw(&self) -> u64 { self.0.bits() as u64 @@ -1277,3 +1435,228 @@ impl PageTablePart { self.get_mut().and_then(|r| r.unmap_2m(vaddr)) } } + +bitflags! { + /// Flags to represent how memory is accessed, e.g. write data to the + /// memory or fetch code from the memory. + #[derive(Clone, Copy, Debug)] + pub struct MemAccessMode: u32 { + const WRITE = 1 << 0; + const FETCH = 1 << 1; + } +} + +/// Attributes to determin Whether a memory access (write/fetch) is permitted +/// by a translation which includes the paging-mode modifiers in CR0, CR4 and +/// EFER; EFLAGS.AC; and the supervisor/user mode access. +#[derive(Clone, Copy, Debug)] +pub struct PTWalkAttr { + cr0: CR0Flags, + cr4: CR4Flags, + efer: EFERFlags, + flags: RFlags, + user_mode_access: bool, + pm: PagingMode, +} + +impl PTWalkAttr { + /// Creates a new `PTWalkAttr` instance with the specified attributes. + /// + /// # Arguments + /// + /// * `cr0`, `cr4`, and `efer` - Represent the control register + /// flags for CR0, CR4, and EFER respectively. + /// * `flags` - Represents the CPU Flags. + /// * `user_mode_access` - Indicates whether the access is in user mode. + /// + /// Returns a new `PTWalkAttr` instance. + pub fn new( + cr0: CR0Flags, + cr4: CR4Flags, + efer: EFERFlags, + flags: RFlags, + user_mode_access: bool, + ) -> Self { + Self { + cr0, + cr4, + efer, + flags, + user_mode_access, + pm: PagingMode::new(efer, cr0, cr4), + } + } + + /// Checks the access rights for a page table entry. + /// + /// # Arguments + /// + /// * `entry` - The page table entry to check. + /// * `mem_am` - Indicates how to access the memory. + /// * `last_level` - Indicates whether the entry is at the last level + /// of the page table. + /// * `pteflags` - The PTE flags to indicate if the corresponding page + /// table entry allows the access rights. + /// + /// # Returns + /// + /// Returns `Ok((entry, leaf))` if the access rights are valid, where + /// `entry` is the modified page table entry and `leaf` is a boolean + /// indicating whether the entry is a leaf node, or `Err(PageFaultError)` + /// to indicate the page fault error code if the access rights are invalid. + pub fn check_access_rights( + &self, + entry: PTEntry, + mem_am: MemAccessMode, + level: usize, + pteflags: &mut PTEntryFlags, + ) -> Result<(PTEntry, bool), PageFaultError> { + let pf_err = self.default_pf_err(mem_am) | PageFaultError::P; + + if !entry.present() { + // Entry is not present. + return Err(pf_err & !PageFaultError::P); + } + + if entry.has_reserved_bits(self.pm, level) { + // Reserved bits have been set. + return Err(pf_err | PageFaultError::R); + } + + // SDM 4.6.1 Determination of Access Rights: + // If the U/S flag (bit 2) is 0 in at least one of the + // paging-structure entries, the address is a supervisor-mode + // address. Otherwise, the address is a user-mode address. + // So by-default assume the address is user mode address. + if !entry.user() { + *pteflags &= !PTEntryFlags::USER; + } + + // SDM 4.6.1 Determination of Access Rights: + // R/W flag (bit 1) is 1 in every paging-structure entry controlling + // the translation and with a protection key for which write access is + // permitted; data may not be written to any supervisor-mode + // address with a translation for which the R/W flag is 0 in any + // paging-structure entry controlling the translation. + // The same for user mode address + if !entry.writable() { + *pteflags &= !PTEntryFlags::WRITABLE; + } + + // SDM 4.6.1 Determination of Access Rights: + // For non 32-bit paging modes with IA32_EFER.NXE = 1, instructions + // may be fetched from any supervisormode address with a translation + // for which the XD flag (bit 63) is 0 in every paging-structure entry + // controlling the translation; instructions may not be fetched from + // any supervisor-mode address with a translation for which the XD flag + // is 1 in any paging-structure entry controlling the translation + if self.efer.contains(EFERFlags::NXE) && entry.nx() { + *pteflags |= PTEntryFlags::NX; + } else if !self.efer.contains(EFERFlags::NXE) && entry.nx() { + // XD bit must be 0 if efer.NXE = 0 + return Err(pf_err | PageFaultError::R); + } + + let leaf = if level == 0 || entry.huge() { + // User mode cannot access any supervisor mode addresses + if self.user_mode_access && !pteflags.contains(PTEntryFlags::USER) { + return Err(pf_err); + } + + // Always check for reading. For the case of supervisor mode read user + // mode addresses, do special checking. For other cases, read is allowed. + if !self.user_mode_access && pteflags.contains(PTEntryFlags::USER) { + // Read not allowed with SMAP = 1 && flags.ac = 0 + if self.cr4.contains(CR4Flags::SMAP) && !self.flags.contains(RFlags::AC) { + return Err(pf_err); + } + } + + if mem_am.contains(MemAccessMode::WRITE) { + if !self.user_mode_access && pteflags.contains(PTEntryFlags::USER) { + // Check supervisor mode write user mode addresses + if !self.cr0.contains(CR0Flags::WP) { + // Check write with CR0.WP = 0 + if self.cr4.contains(CR4Flags::SMAP) && !self.flags.contains(RFlags::AC) { + // Write not allowed with SMAP = 1 && flags.ac = 0 + return Err(pf_err); + } + } else { + // Check write with CR0.WP = 1 + if !self.cr4.contains(CR4Flags::SMAP) { + // SMAP = 0 + if !pteflags.contains(PTEntryFlags::WRITABLE) { + // Write not allowed R/W = 0 + return Err(pf_err); + } + } else { + // SMAP = 1 + if !self.flags.contains(RFlags::AC) + || !pteflags.contains(PTEntryFlags::WRITABLE) + { + // Write not allowed with flags.AC = 0 || R/W = 0 + return Err(pf_err); + } + } + } + } else if !self.user_mode_access && !pteflags.contains(PTEntryFlags::USER) { + // Check supervisor mode write supervisor mode addresses + if self.cr0.contains(CR0Flags::WP) && !pteflags.contains(PTEntryFlags::WRITABLE) + { + // Write not allowed with CR0.WP = 1 && R/W = 0 + return Err(pf_err); + } + } else if self.user_mode_access && pteflags.contains(PTEntryFlags::USER) { + // Check user mode write user mode addresses + if !pteflags.contains(PTEntryFlags::WRITABLE) { + // Write not allowed R/W = 0 + return Err(pf_err); + } + } + // User mode write supervisor mode addresses is checked already + } + + if mem_am.contains(MemAccessMode::FETCH) { + // For instruction fetch, the rule is the same except for the case of + // supervisor mode fetch user mode addresses + if !self.user_mode_access && pteflags.contains(PTEntryFlags::USER) { + // Fetch not allowed with SMEP = 1 + if self.cr4.contains(CR4Flags::SMEP) { + return Err(pf_err); + } + } + + // For non-32bit paging mode, fetch not allowed with efer.NXE = 1 && XD = 1 + if self.cr4.contains(CR4Flags::PAE) + && self.efer.contains(EFERFlags::NXE) + && pteflags.contains(PTEntryFlags::NX) + { + return Err(pf_err); + } + } + true + } else { + false + }; + + Ok((entry, leaf)) + } + + fn default_pf_err(&self, mem_am: MemAccessMode) -> PageFaultError { + let mut err = PageFaultError::empty(); + + if mem_am.contains(MemAccessMode::WRITE) { + err |= PageFaultError::W; + } + + if mem_am.contains(MemAccessMode::FETCH) { + err |= PageFaultError::I; + } + + if self.user_mode_access { + err |= PageFaultError::U; + } + + err + } +} diff --git a/kernel/src/mm/ptguards.rs b/kernel/src/mm/ptguards.rs index a6cd6c94a..da02f82ff 100644 --- a/kernel/src/mm/ptguards.rs +++ b/kernel/src/mm/ptguards.rs @@ -9,12 +9,13 @@ use crate::address::{Address, PhysAddr, VirtAddr}; use crate::cpu::percpu::this_cpu; use crate::cpu::tlb::flush_address_percpu; use crate::error::SvsmError; +use crate::insn_decode::{InsnError, InsnMachineMem}; use crate::mm::virtualrange::{ virt_alloc_range_2m, virt_alloc_range_4k, virt_free_range_2m, virt_free_range_4k, }; use crate::types::{PageSize, PAGE_SIZE, PAGE_SIZE_2M}; - use crate::utils::MemoryRegion; +use core::marker::PhantomData; /// Guard for a per-CPU page mapping to ensure adequate cleanup if drop. #[derive(Debug)] @@ -95,6 +96,49 @@ impl PerCPUPageMappingGuard { pub fn virt_addr(&self) -> VirtAddr { self.mapping.start() } + + /// Creates a virtual contigous mapping for the given 4k physical pages which + /// may not be contiguous in physical memory. + /// + /// # Arguments + /// + /// * `pages`: A slice of tuple containing `PhysAddr` objects representing the + /// 4k page to map and its shareability. + /// + /// # Returns + /// + /// This function returns a `Result` that contains a `PerCPUPageMappingGuard` + /// object on success. The `PerCPUPageMappingGuard` object represents the page + /// mapping that was created. If an error occurs while creating the page + /// mapping, it returns a `SvsmError`. + pub fn create_4k_pages(pages: &[(PhysAddr, bool)]) -> Result { + let region = virt_alloc_range_4k(pages.len() * PAGE_SIZE, 0)?; + let flags = PTEntryFlags::data(); + + let mut pgtable = this_cpu().get_pgtable(); + for (i, addr) in region.iter_pages(PageSize::Regular).enumerate() { + assert!(pages[i].0.is_aligned(PAGE_SIZE)); + + pgtable + .map_4k(addr, pages[i].0, flags) + .and_then(|_| { + if pages[i].1 { + pgtable.set_shared_4k(addr) + } else { + Ok(()) + } + }) + .map_err(|e| { + virt_free_range_4k(region); + e + })?; + } + + Ok(PerCPUPageMappingGuard { + mapping: region, + huge: false, + }) + } } impl Drop for PerCPUPageMappingGuard { @@ -115,3 +159,121 @@ impl Drop for PerCPUPageMappingGuard { } } } + +/// Represents a guard for a specific memory range mapping, which will +/// unmap the specific memory range after being dropped. +#[derive(Debug)] +pub struct MemMappingGuard { + // The guard of holding the temperary mapping for a specific memory range. + guard: PerCPUPageMappingGuard, + // The starting offset of the memory range. + start_off: usize, + + phantom: PhantomData, +} + +impl MemMappingGuard { + /// Creates a new `MemMappingGuard` with the given `PerCPUPageMappingGuard` + /// and starting offset. + /// + /// # Arguments + /// + /// * `guard` - The `PerCPUPageMappingGuard` to associate with the `MemMappingGuard`. + /// * `start_off` - The starting offset for the memory mapping. + /// + /// # Returns + /// + /// Self is returned. + pub fn new(guard: PerCPUPageMappingGuard, start_off: usize) -> Result { + if start_off >= guard.mapping.len() { + Err(SvsmError::Mem) + } else { + Ok(Self { + guard, + start_off, + phantom: PhantomData, + }) + } + } + + /// Reads data from a virtual address region specified by an offset + /// + /// # Safety + /// + /// The caller must verify not to read from arbitrary memory regions. The region to read must + /// be checked to guarantee the memory is mapped by the guard and is valid for reading. + /// + /// # Arguments + /// + /// * `offset`: The offset (in unit of `size_of::()`) from the start of the virtual address + /// region to read from. + /// + /// # Returns + /// + /// This function returns a `Result` that indicates the success or failure of the operation. + /// If the read operation is successful, it returns `Ok(T)` which contains the read back data. + /// If the virtual address region cannot be retrieved, it returns `Err(SvsmError::Mem)`. + pub unsafe fn read(&self, offset: usize) -> Result { + let size = core::mem::size_of::(); + self.virt_addr_region(offset * size, size) + .map_or(Err(SvsmError::Mem), |region| { + Ok(*(region.start().as_ptr::())) + }) + } + + /// Writes data from a provided data into a virtual address region specified by an offset. + /// + /// # Safety + /// + /// The caller must verify not to write to arbitrary memory regions. The memory region to write + /// must be checked to guarantee the memory is mapped by the guard and is valid for writing. + /// + /// # Arguments + /// + /// * `offset`: The offset (in unit of `size_of::()`) from the start of the virtual address + /// region to write to. + /// * `data`: Data to write. + /// + /// # Returns + /// + /// This function returns a `Result` that indicates the success or failure of the operation. + /// If the write operation is successful, it returns `Ok(())`. If the virtual address region + /// cannot be retrieved or if the buffer size is larger than the region size, it returns + /// `Err(SvsmError::Mem)`. + pub unsafe fn write(&self, offset: usize, data: T) -> Result<(), SvsmError> { + let size = core::mem::size_of::(); + self.virt_addr_region(offset * size, size) + .map_or(Err(SvsmError::Mem), |region| { + *(region.start().as_mut_ptr::()) = data; + Ok(()) + }) + } + + fn virt_addr_region(&self, offset: usize, len: usize) -> Option> { + if len != 0 { + MemoryRegion::checked_new( + self.guard + .virt_addr() + .checked_add(self.start_off + offset)?, + len, + ) + .filter(|v| self.guard.mapping.contains_region(v)) + } else { + None + } + } +} + +impl InsnMachineMem for MemMappingGuard { + type Item = T; + + /// Safety: See the MemMappingGuard's read() method documentation for safety requirements. + unsafe fn mem_read(&self) -> Result { + self.read(0).map_err(|_| InsnError::MemRead) + } + + /// Safety: See the MemMappingGuard's write() method documentation for safety requirements. + unsafe fn mem_write(&mut self, data: Self::Item) -> Result<(), InsnError> { + self.write(0, data).map_err(|_| InsnError::MemWrite) + } +} diff --git a/kernel/src/svsm_console.rs b/kernel/src/svsm_console.rs index 88872fabd..5b828cf11 100644 --- a/kernel/src/svsm_console.rs +++ b/kernel/src/svsm_console.rs @@ -50,6 +50,21 @@ impl IOPort for SVSMIOPort { Err(_e) => request_termination_msr(), } } + + fn outl(&self, port: u16, value: u32) { + let ret = current_ghcb().ioio_out(port, GHCBIOSize::Size32, value as u64); + if ret.is_err() { + request_termination_msr(); + } + } + + fn inl(&self, port: u16) -> u32 { + let ret = current_ghcb().ioio_in(port, GHCBIOSize::Size32); + match ret { + Ok(v) => (v & 0xffffffff) as u32, + Err(_e) => request_termination_msr(), + } + } } #[derive(Clone, Copy, Debug, Default)] diff --git a/kernel/src/utils/util.rs b/kernel/src/utils/util.rs index 6e9d676fa..45aa5c827 100644 --- a/kernel/src/utils/util.rs +++ b/kernel/src/utils/util.rs @@ -70,6 +70,18 @@ macro_rules! BIT { }; } +/// Obtain bit mask for the given positions +#[macro_export] +macro_rules! BIT_MASK { + ($e: expr, $s: expr) => {{ + assert!( + $s <= 63 && $e <= 63 && $s <= $e, + "Start bit position must be less than or equal to end bit position" + ); + (((1u64 << ($e - $s + 1)) - 1) << $s) + }}; +} + #[cfg(test)] mod tests {