diff --git a/td-payload/src/mm/shared.rs b/td-payload/src/mm/shared.rs index ebec405f..6a2690d1 100644 --- a/td-payload/src/mm/shared.rs +++ b/td-payload/src/mm/shared.rs @@ -9,31 +9,52 @@ use super::SIZE_4K; use crate::arch::shared::decrypt; static SHARED_MEMORY_ALLOCATOR: LockedHeap = LockedHeap::empty(); +static PRIVATE_SHADOW_ALLOCATOR: LockedHeap = LockedHeap::empty(); pub fn init_shared_memory(start: u64, size: usize) { + let shadow_size = size / 2; + let shared_start = start + shadow_size as u64; + let shared_size = size - shadow_size; + // Set the shared memory region to be shared - decrypt(start, size); + decrypt(shared_start, shared_size); // Initialize the shared memory allocator unsafe { - SHARED_MEMORY_ALLOCATOR.lock().init(start as *mut u8, size); + SHARED_MEMORY_ALLOCATOR + .lock() + .init(shared_start as *mut u8, shared_size); + PRIVATE_SHADOW_ALLOCATOR + .lock() + .init(start as *mut u8, shadow_size); } } pub struct SharedMemory { addr: usize, + shadow_addr: usize, size: usize, } impl SharedMemory { pub fn new(num_page: usize) -> Option { let addr = unsafe { alloc_shared_pages(num_page)? }; + let shadow_addr = unsafe { alloc_private_shadow_pages(num_page)? }; Some(Self { addr, + shadow_addr, size: num_page * SIZE_4K, }) } + pub fn copy_to_private_shadow(&mut self) -> &[u8] { + let shadow = + unsafe { core::slice::from_raw_parts_mut(self.shadow_addr as *mut u8, self.size) }; + shadow.copy_from_slice(self.as_bytes()); + + shadow + } + pub fn as_bytes(&self) -> &[u8] { unsafe { core::slice::from_raw_parts(self.addr as *const u8, self.size) } } @@ -45,6 +66,7 @@ impl SharedMemory { impl Drop for SharedMemory { fn drop(&mut self) { + unsafe { free_private_shadow_pages(self.addr, self.size / SIZE_4K) } unsafe { free_shared_pages(self.addr, self.size / SIZE_4K) } } } @@ -52,17 +74,7 @@ impl Drop for SharedMemory { /// # Safety /// The caller needs to explicitly call the `free_shared_pages` function after use pub unsafe fn alloc_shared_pages(num: usize) -> Option { - let size = SIZE_4K.checked_mul(num)?; - - let addr = SHARED_MEMORY_ALLOCATOR - .lock() - .allocate_first_fit(Layout::from_size_align(size, SIZE_4K).ok()?) - .map(|ptr| ptr.as_ptr() as usize) - .ok()?; - - core::slice::from_raw_parts_mut(addr as *mut u8, size).fill(0); - - Some(addr) + allocator_alloc(&SHARED_MEMORY_ALLOCATOR, num) } /// # Safety @@ -74,12 +86,7 @@ pub unsafe fn alloc_shared_page() -> Option { /// # Safety /// The caller needs to ensure the correctness of the addr and page num pub unsafe fn free_shared_pages(addr: usize, num: usize) { - let size = SIZE_4K.checked_mul(num).expect("Invalid page num"); - - SHARED_MEMORY_ALLOCATOR.lock().deallocate( - NonNull::new(addr as *mut u8).unwrap(), - Layout::from_size_align(size, SIZE_4K).unwrap(), - ); + allocator_free(&SHARED_MEMORY_ALLOCATOR, addr, num) } /// # Safety @@ -87,3 +94,38 @@ pub unsafe fn free_shared_pages(addr: usize, num: usize) { pub unsafe fn free_shared_page(addr: usize) { free_shared_pages(addr, 1) } + +/// # Safety +/// The caller needs to explicitly call the `free_private_shadow_pages` function after use +unsafe fn alloc_private_shadow_pages(num: usize) -> Option { + allocator_alloc(&PRIVATE_SHADOW_ALLOCATOR, num) +} + +/// # Safety +/// The caller needs to ensure the correctness of the addr and page num +unsafe fn free_private_shadow_pages(addr: usize, num: usize) { + allocator_free(&PRIVATE_SHADOW_ALLOCATOR, addr, num) +} + +unsafe fn allocator_alloc(allocator: &LockedHeap, num: usize) -> Option { + let size = SIZE_4K.checked_mul(num)?; + + let addr = allocator + .lock() + .allocate_first_fit(Layout::from_size_align(size, SIZE_4K).ok()?) + .map(|ptr| ptr.as_ptr() as usize) + .ok()?; + + core::slice::from_raw_parts_mut(addr as *mut u8, size).fill(0); + + Some(addr) +} + +unsafe fn allocator_free(allocator: &LockedHeap, addr: usize, num: usize) { + let size = SIZE_4K.checked_mul(num).expect("Invalid page num"); + + allocator.lock().deallocate( + NonNull::new(addr as *mut u8).unwrap(), + Layout::from_size_align(size, SIZE_4K).unwrap(), + ); +}