From 333d9512b8ff9dd2ed59bb85fe9bf5d7f89d3e33 Mon Sep 17 00:00:00 2001 From: Jiaqi Gao Date: Fri, 2 Aug 2024 02:43:14 -0400 Subject: [PATCH] td-payload: add new shared memory init function with private shadow To keep the original API behavior unchanged. The extended API init the shared memory allocator with a private shadow start address. If the private shadow is not available, the method `copy_to_private_shadow` will return None. As the `shadow_start` may be lower or higher than start of shared memory, the way of allocating private shadow is changed to use the offset of the allocated shared address to the start of shared allocator. Signed-off-by: Jiaqi Gao --- td-payload/src/arch/x86_64/init.rs | 18 +++++++++++---- td-payload/src/mm/shared.rs | 35 +++++++++++++++++------------- 2 files changed, 34 insertions(+), 19 deletions(-) diff --git a/td-payload/src/arch/x86_64/init.rs b/td-payload/src/arch/x86_64/init.rs index 46d7f455..43162733 100644 --- a/td-payload/src/arch/x86_64/init.rs +++ b/td-payload/src/arch/x86_64/init.rs @@ -7,8 +7,12 @@ use crate::{ arch::{gdt, idt}, hob::{self, get_hob}, mm::{ - get_usable, heap::init_heap, init_ram, layout::RuntimeLayout, - page_table::init_pt_frame_allocator, shared::init_shared_memory, + get_usable, + heap::init_heap, + init_ram, + layout::RuntimeLayout, + page_table::init_pt_frame_allocator, + shared::{init_shared_memory, init_shared_memory_with_shadow}, }, }; @@ -22,7 +26,7 @@ use super::{ idt::{PAGE_FAULT_EXCEPTION, PAGE_FAULT_IST}, }; -pub fn pre_init(hob: u64, layout: &RuntimeLayout) { +pub fn pre_init(hob: u64, layout: &RuntimeLayout, use_shared_shadow: bool) { let hob = hob::init(hob).expect("Invalid payload HOB"); let memory_map = init_ram(hob).expect("Failed to parse E820 table from payload HOB"); @@ -35,7 +39,13 @@ pub fn pre_init(hob: u64, layout: &RuntimeLayout) { init_heap(heap, layout.heap_size); let shared = get_usable(layout.shared_memory_size).expect("Failed to allocate shared memory"); - init_shared_memory(shared, layout.shared_memory_size); + if use_shared_shadow { + let shadow = + get_usable(layout.shared_memory_size).expect("Failed to allocate shared shadow"); + init_shared_memory_with_shadow(shared, layout.shared_memory_size, shadow); + } else { + init_shared_memory(shared, layout.shared_memory_size); + } // Init Global Descriptor Table and Task State Segment gdt::init_gdt(); diff --git a/td-payload/src/mm/shared.rs b/td-payload/src/mm/shared.rs index f610034e..86e40cb5 100644 --- a/td-payload/src/mm/shared.rs +++ b/td-payload/src/mm/shared.rs @@ -10,35 +10,38 @@ use super::SIZE_4K; use crate::arch::shared::decrypt; static SHARED_MEMORY_ALLOCATOR: LockedHeap = LockedHeap::empty(); -static SHADOW_OFFSET: Once = Once::new(); +static SHARED_START: Once = Once::new(); +static SHADOW_START: Once = Once::new(); pub fn init_shared_memory(start: u64, size: usize) { if size % SIZE_4K != 0 { panic!("Failed to initialize shared memory: size needs to be aligned with 0x1000"); } - let shared_size = size / 2; // Set the shared memory region to be shared - decrypt(start, shared_size); + decrypt(start, size); // Initialize the shared memory allocator unsafe { - SHARED_MEMORY_ALLOCATOR - .lock() - .init(start as *mut u8, shared_size); + SHARED_MEMORY_ALLOCATOR.lock().init(start as *mut u8, size); } - SHADOW_OFFSET.call_once(|| shared_size); +} + +pub fn init_shared_memory_with_shadow(start: u64, size: usize, shadow_start: u64) { + init_shared_memory(start, size); + SHARED_START.call_once(|| start as usize); + SHADOW_START.call_once(|| shadow_start as usize); } pub struct SharedMemory { addr: usize, - shadow_addr: usize, + shadow_addr: Option, size: usize, } impl SharedMemory { pub fn new(num_page: usize) -> Option { let addr = unsafe { alloc_shared_pages(num_page)? }; - let shadow_addr = alloc_private_shadow_pages(addr)?; + let shadow_addr = alloc_private_shadow_pages(addr); Some(Self { addr, @@ -47,12 +50,13 @@ impl SharedMemory { }) } - pub fn copy_to_private_shadow(&mut self) -> &[u8] { - let shadow = - unsafe { core::slice::from_raw_parts_mut(self.shadow_addr as *mut u8, self.size) }; - shadow.copy_from_slice(self.as_bytes()); + pub fn copy_to_private_shadow(&mut self) -> Option<&[u8]> { + self.shadow_addr.map(|addr| { + let shadow = unsafe { core::slice::from_raw_parts_mut(addr as *mut u8, self.size) }; + shadow.copy_from_slice(self.as_bytes()); - shadow + &shadow[..] + }) } pub fn as_bytes(&self) -> &[u8] { @@ -110,5 +114,6 @@ pub unsafe fn free_shared_page(addr: usize) { } fn alloc_private_shadow_pages(shared_addr: usize) -> Option { - Some(shared_addr + SHADOW_OFFSET.get()?) + let offset = shared_addr.checked_sub(*SHARED_START.get()?)?; + Some(SHADOW_START.get()? + offset) }