From 2c880444a0ed4c11051ca3d119cd931ad0270903 Mon Sep 17 00:00:00 2001 From: Graham MacDonald Date: Tue, 20 Feb 2024 21:38:48 +0000 Subject: [PATCH] Simple bitmap page allocator Replaces the freelist-based page allocator and can be used for lifetime of kernel. Signed-off-by: Graham MacDonald --- aarch64/src/kalloc.rs | 57 ----- aarch64/src/kmem.rs | 27 +-- aarch64/src/mailbox.rs | 10 +- aarch64/src/main.rs | 28 ++- aarch64/src/pagealloc.rs | 73 +++++++ aarch64/src/uartmini.rs | 1 - aarch64/src/vm.rs | 37 ++-- port/src/bitmapalloc.rs | 441 +++++++++++++++++++++++++++++++++++++++ port/src/lib.rs | 2 + port/src/mem.rs | 23 +- 10 files changed, 585 insertions(+), 114 deletions(-) delete mode 100644 aarch64/src/kalloc.rs create mode 100644 aarch64/src/pagealloc.rs create mode 100644 port/src/bitmapalloc.rs diff --git a/aarch64/src/kalloc.rs b/aarch64/src/kalloc.rs deleted file mode 100644 index 973ae3b..0000000 --- a/aarch64/src/kalloc.rs +++ /dev/null @@ -1,57 +0,0 @@ -use crate::vm::Page4K; -use core::ptr; -use port::{ - mcslock::{Lock, LockNode}, - mem::PAGE_SIZE_4K, -}; - -static FREE_LIST: Lock = Lock::new("kmem", FreeList { next: None }); - -#[repr(align(4096))] -struct FreeList { - next: Option>, -} -unsafe impl Send for FreeList {} - -#[derive(Debug)] -pub enum Error { - NoFreeBlocks, -} - -impl FreeList { - pub fn put(&mut self, page: &mut Page4K) { - let ptr = (page as *mut Page4K).addr(); - assert_eq!(ptr % PAGE_SIZE_4K, 0, "freeing unaligned page"); - page.scribble(); - let f = page as *mut Page4K as *mut FreeList; - unsafe { - ptr::write(f, FreeList { next: self.next }); - } - self.next = ptr::NonNull::new(f); - } - - pub fn get(&mut self) -> Result<&'static mut Page4K, Error> { - let mut next = self.next.ok_or(Error::NoFreeBlocks)?; - let next = unsafe { next.as_mut() }; - self.next = next.next; - let pg = unsafe { &mut *(next as *mut FreeList as *mut Page4K) }; - pg.clear(); - Ok(pg) - } -} - -pub unsafe fn free_pages(pages: &mut [Page4K]) { - static mut NODE: LockNode = LockNode::new(); - let mut lock = FREE_LIST.lock(unsafe { &*ptr::addr_of!(NODE) }); - let fl = &mut *lock; - for page in pages.iter_mut() { - fl.put(page); - } -} - -pub fn alloc() -> Result<&'static mut Page4K, Error> { - static mut NODE: LockNode = LockNode::new(); - let mut lock = FREE_LIST.lock(unsafe { &*ptr::addr_of!(NODE) }); - let fl = &mut *lock; - fl.get() -} diff --git a/aarch64/src/kmem.rs b/aarch64/src/kmem.rs index 14e8cef..e7a7a28 100644 --- a/aarch64/src/kmem.rs +++ b/aarch64/src/kmem.rs @@ -1,7 +1,5 @@ -use port::mem::PhysAddr; - -use crate::{param::KZERO, vm::Page4K}; -use core::{mem, slice}; +use crate::param::KZERO; +use port::mem::{PhysAddr, PhysRange}; // These map to definitions in kernel.ld extern "C" { @@ -52,20 +50,9 @@ pub fn from_ptr_to_physaddr(a: *const T) -> PhysAddr { from_virt_to_physaddr(a.addr()) } -unsafe fn page_slice_mut<'a>(pstart: *mut Page4K, pend: *mut Page4K) -> &'a mut [Page4K] { - let ustart = pstart.addr(); - let uend = pend.addr(); - const PAGE_SIZE: usize = mem::size_of::(); - assert_eq!(ustart % PAGE_SIZE, 0, "page_slice_mut: unaligned start page"); - assert_eq!(uend % PAGE_SIZE, 0, "page_slice_mut: unaligned end page"); - assert!(ustart < uend, "page_slice_mut: bad range"); - - let len = (uend - ustart) / PAGE_SIZE; - unsafe { slice::from_raw_parts_mut(ustart as *mut Page4K, len) } -} - -pub fn early_pages() -> &'static mut [Page4K] { - let early_start = early_pagetables_addr() as *mut Page4K; - let early_end = eearly_pagetables_addr() as *mut Page4K; - unsafe { page_slice_mut(early_start, early_end) } +pub fn early_pages_range() -> PhysRange { + PhysRange::new( + from_virt_to_physaddr(early_pagetables_addr()), + from_virt_to_physaddr(eearly_pagetables_addr()), + ) } diff --git a/aarch64/src/mailbox.rs b/aarch64/src/mailbox.rs index 587c2df..51ed579 100644 --- a/aarch64/src/mailbox.rs +++ b/aarch64/src/mailbox.rs @@ -4,7 +4,7 @@ use core::mem; use core::mem::MaybeUninit; use port::fdt::DeviceTree; use port::mcslock::{Lock, LockNode}; -use port::mem::VirtRange; +use port::mem::{PhysAddr, PhysRange, VirtRange}; const MBOX_READ: usize = 0x00; const MBOX_STATUS: usize = 0x18; @@ -191,7 +191,7 @@ pub struct MemoryInfo { pub end: u32, } -pub fn get_arm_memory() -> MemoryInfo { +pub fn get_arm_memory() -> PhysRange { let tags = Tag:: { tag_id0: TagId::GetArmMemory, tag_buffer_size0: 12, @@ -204,10 +204,10 @@ pub fn get_arm_memory() -> MemoryInfo { let size = res.size; let end = start + size; - MemoryInfo { start, size, end } + PhysRange::new(PhysAddr::new(start as u64), PhysAddr::new(end as u64)) } -pub fn get_vc_memory() -> MemoryInfo { +pub fn get_vc_memory() -> PhysRange { let tags = Tag:: { tag_id0: TagId::GetVcMemory, tag_buffer_size0: 12, @@ -220,7 +220,7 @@ pub fn get_vc_memory() -> MemoryInfo { let size = res.size; let end = start + size; - MemoryInfo { start, size, end } + PhysRange::new(PhysAddr::new(start as u64), PhysAddr::new(end as u64)) } pub fn get_firmware_revision() -> u32 { diff --git a/aarch64/src/main.rs b/aarch64/src/main.rs index 4e10056..fee031c 100644 --- a/aarch64/src/main.rs +++ b/aarch64/src/main.rs @@ -5,15 +5,16 @@ #![feature(alloc_error_handler)] #![feature(asm_const)] #![feature(core_intrinsics)] +#![feature(inline_const)] #![feature(stdsimd)] #![feature(strict_provenance)] #![forbid(unsafe_op_in_unsafe_fn)] mod devcons; mod io; -mod kalloc; mod kmem; mod mailbox; +mod pagealloc; mod param; mod registers; mod trap; @@ -39,7 +40,7 @@ unsafe fn print_memory_range(name: &str, start: &*const c_void, end: &*const c_v let start = start as *const _ as u64; let end = end as *const _ as u64; let size = end - start; - println!(" {name}{start:#x}-{end:#x} ({size:#x})"); + println!(" {name}{start:#x}..{end:#x} ({size:#x})"); } fn print_binary_sections() { @@ -67,12 +68,17 @@ fn print_binary_sections() { } } -fn print_physical_memory_map() { +fn print_memory_info() { println!("Physical memory map:"); - let mailbox::MemoryInfo { start, size, end } = mailbox::get_arm_memory(); - println!(" Memory:\t{start:#018x}-{end:#018x} ({size:#x})"); - let mailbox::MemoryInfo { start, size, end } = mailbox::get_vc_memory(); - println!(" Video:\t{start:#018x}-{end:#018x} ({size:#x})"); + let arm_mem = mailbox::get_arm_memory(); + println!(" Memory:\t{arm_mem} ({:#x})", arm_mem.size()); + let vc_mem = mailbox::get_vc_memory(); + println!(" Video:\t{vc_mem} ({:#x})", vc_mem.size()); + + println!("Memory usage::"); + let (used, total) = pagealloc::usage_bytes(); + println!(" Used:\t\t{used:#016x}"); + println!(" Total:\t{total:#016x}"); } // https://github.com/raspberrypi/documentation/blob/develop/documentation/asciidoc/computers/raspberry-pi/revision-codes.adoc @@ -121,15 +127,15 @@ pub extern "C" fn main9(dtb_va: usize) { // Map address space accurately using rust VM code to manage page tables unsafe { - kalloc::free_pages(kmem::early_pages()); - let dtb_range = PhysRange::with_len(from_virt_to_physaddr(dtb_va).addr(), dt.size()); - vm::init(&dt, &mut *ptr::addr_of_mut!(KPGTBL), dtb_range); + vm::init(&mut *ptr::addr_of_mut!(KPGTBL), dtb_range, mailbox::get_arm_memory()); vm::switch(&*ptr::addr_of!(KPGTBL)); } + // From this point we can use the global allocator + print_binary_sections(); - print_physical_memory_map(); + print_memory_info(); print_board_info(); kernel_root().print_recursive_tables(); diff --git a/aarch64/src/pagealloc.rs b/aarch64/src/pagealloc.rs new file mode 100644 index 0000000..bba2d0a --- /dev/null +++ b/aarch64/src/pagealloc.rs @@ -0,0 +1,73 @@ +/// This module acts as an interface between the portable allocator and the +/// arch-specific use of it. +/// +/// The page allocator is constructed and finalised in a number of phases: +/// 1. `init_page_allocator` to create a fixed size allocator assuming everything +/// is in use except a small number of statically defined pages available for +/// setting up the initial page tables. +/// 2. `free_unused_ranges` to mark available ranges as the inverse of the +/// physical memory map within the bounds of the available memory. +use crate::kmem; +use crate::kmem::physaddr_as_ptr_mut; +use crate::vm::Page4K; +use port::bitmapalloc::BitmapPageAlloc; +use port::bitmapalloc::BitmapPageAllocError; +use port::mem::PhysRange; +use port::{ + mcslock::{Lock, LockNode}, + mem::PAGE_SIZE_4K, +}; + +/// Set up bitmap page allocator assuming everything is allocated. +static PAGE_ALLOC: Lock> = Lock::new( + "page_alloc", + const { BitmapPageAlloc::<16, PAGE_SIZE_4K>::new_all_allocated(PAGE_SIZE_4K) }, +); + +/// The bitmap allocator has all pages marked as allocated initially. We'll +/// add some pages (mark free) to allow us to set up the page tables and build +/// a memory map. Once the memory map has been build, we can mark all the unused +/// space as available. This allows us to use only one page allocator throughout. +pub fn init_page_allocator() { + let node = LockNode::new(); + let mut lock = PAGE_ALLOC.lock(&node); + let page_alloc = &mut *lock; + + let early_pages_range = kmem::early_pages_range(); + if let Err(err) = page_alloc.mark_free(&early_pages_range) { + panic!("Couldn't mark early pages free: range: {} err: {:?}", early_pages_range, err); + } +} + +/// Free unused pages in mem that aren't covered by the memory map. Assumes +/// that custom_map is sorted. +pub fn free_unused_ranges<'a>( + available_mem: &PhysRange, + used_ranges: impl Iterator, +) -> Result<(), BitmapPageAllocError> { + let node = LockNode::new(); + let mut lock = PAGE_ALLOC.lock(&node); + let page_alloc = &mut *lock; + + page_alloc.free_unused_ranges(available_mem, used_ranges) +} + +/// Try to allocate a page +pub fn allocate() -> Result<&'static mut Page4K, BitmapPageAllocError> { + let node = LockNode::new(); + let mut lock = PAGE_ALLOC.lock(&node); + let page_alloc = &mut *lock; + + match page_alloc.allocate() { + Ok(page_pa) => Ok(unsafe { &mut *physaddr_as_ptr_mut::(page_pa) }), + Err(err) => Err(err), + } +} + +/// Return a tuple of (bytes used, total bytes available) based on the page allocator. +pub fn usage_bytes() -> (usize, usize) { + let node = LockNode::new(); + let mut lock = PAGE_ALLOC.lock(&node); + let page_alloc = &mut *lock; + page_alloc.usage_bytes() +} diff --git a/aarch64/src/uartmini.rs b/aarch64/src/uartmini.rs index bc14ca9..687c8d2 100644 --- a/aarch64/src/uartmini.rs +++ b/aarch64/src/uartmini.rs @@ -11,7 +11,6 @@ use crate::registers::{ /// MiniUart is assigned to UART1 on the Raspberry Pi. It is easier to use with /// real hardware, as it requires no additional configuration. Conversely, it's /// harded to use with QEMU, as it can't be used with the `nographic` switch. -#[allow(dead_code)] pub struct MiniUart { pub gpio_range: VirtRange, pub aux_range: VirtRange, diff --git a/aarch64/src/vm.rs b/aarch64/src/vm.rs index b3e8428..411332c 100644 --- a/aarch64/src/vm.rs +++ b/aarch64/src/vm.rs @@ -1,11 +1,11 @@ #![allow(non_upper_case_globals)] use crate::{ - kalloc, kmem::{ ebss_addr, erodata_addr, etext_addr, from_ptr_to_physaddr, from_virt_to_physaddr, physaddr_as_ptr_mut, physaddr_as_virt, text_addr, }, + pagealloc, registers::rpi_mmio, }; use bitstruct::bitstruct; @@ -13,7 +13,7 @@ use core::fmt; use core::ptr::write_volatile; use num_enum::{FromPrimitive, IntoPrimitive}; use port::{ - fdt::DeviceTree, + bitmapalloc::BitmapPageAllocError, mem::{PhysAddr, PhysRange, PAGE_SIZE_1G, PAGE_SIZE_2M, PAGE_SIZE_4K}, }; @@ -48,12 +48,6 @@ impl Page4K { core::intrinsics::volatile_set_memory(&mut self.0, 0u8, 1); } } - - pub fn scribble(&mut self) { - unsafe { - core::intrinsics::volatile_set_memory(self, 0b1010_1010u8, 1); - } - } } #[derive(Debug, IntoPrimitive, FromPrimitive)] @@ -79,7 +73,7 @@ pub enum AccessPermission { pub enum Shareable { #[num_enum(default)] Non = 0, // Non-shareable (single core) - Unpredictable = 1, // Unpredicatable! + Unpredictable = 1, // Unpredictable! Outer = 2, // Outer shareable (shared across CPUs, GPU) Inner = 3, // Inner shareable (shared across CPUs) } @@ -280,13 +274,13 @@ fn recursive_table_addr(va: usize, level: Level) -> usize { #[derive(Debug)] pub enum PageTableError { - AllocationFailed(kalloc::Error), + AllocationFailed(BitmapPageAllocError), EntryIsNotTable, PhysRangeIsZero, } -impl From for PageTableError { - fn from(err: kalloc::Error) -> PageTableError { +impl From for PageTableError { + fn from(err: BitmapPageAllocError) -> PageTableError { PageTableError::AllocationFailed(err) } } @@ -331,7 +325,7 @@ impl Table { } fn alloc_pagetable() -> Result<&'static mut Table, PageTableError> { - let page = kalloc::alloc()?; + let page = pagealloc::allocate()?; page.clear(); Ok(unsafe { &mut *(page as *mut Page4K as *mut Table) }) } @@ -471,7 +465,9 @@ fn print_pte(indent: usize, i: usize, level: Level, pte: Entry) { } } -pub unsafe fn init(_dt: &DeviceTree, kpage_table: &mut PageTable, dtb_range: PhysRange) { +pub unsafe fn init(kpage_table: &mut PageTable, dtb_range: PhysRange, available_mem: PhysRange) { + pagealloc::init_page_allocator(); + // We use recursive page tables, but we have to be careful in the init call, // since the kpage_table is not currently pointed to by ttbr1_el1. Any // recursive addressing of (511, 511, 511, 511) always points to the @@ -486,10 +482,7 @@ pub unsafe fn init(_dt: &DeviceTree, kpage_table: &mut PageTable, dtb_range: Phy write_volatile(&mut kpage_table.entries[511], entry); } - // TODO We don't actualy unmap the first page... We should to achieve: - // Note that the first page is left unmapped to try and - // catch null pointer dereferences in unsafe code: defense - // in depth! + // TODO leave the first page unmapped to catch null pointer dereferences in unsafe code let custom_map = { let text_range = PhysRange(from_virt_to_physaddr(text_addr())..from_virt_to_physaddr(etext_addr())); @@ -519,8 +512,9 @@ pub unsafe fn init(_dt: &DeviceTree, kpage_table: &mut PageTable, dtb_range: Phy for (name, range, flags, page_size) in custom_map.iter() { let mapped_range = kpage_table.map_phys_range(range, *flags, *page_size).expect("init mapping failed"); + println!( - " {:14}{:#018x}-{:#018x} to {:#018x}-{:#018x} flags: {:?} page_size: {:?}", + " {:14}{:#018x}..{:#018x} to {:#018x}..{:#018x} flags: {:?} page_size: {:?}", name, range.start().addr(), range.end().addr(), @@ -530,6 +524,11 @@ pub unsafe fn init(_dt: &DeviceTree, kpage_table: &mut PageTable, dtb_range: Phy page_size ); } + + if let Err(err) = pagealloc::free_unused_ranges(&available_mem, custom_map.map(|m| m.1).iter()) + { + panic!("Couldn't mark unused pages as free: err: {:?}", err); + } } /// Return the root kernel page table physical address diff --git a/port/src/bitmapalloc.rs b/port/src/bitmapalloc.rs new file mode 100644 index 0000000..680626f --- /dev/null +++ b/port/src/bitmapalloc.rs @@ -0,0 +1,441 @@ +use core::fmt; + +use crate::mem::{PhysAddr, PhysRange}; + +/// Simple bitmap. Bear in mind that logically, bit 0 is the rightmost bit, +/// so writing out as bytes will have the bits logically reversed. +struct Bitmap { + bytes: [u8; SIZE_BYTES], +} + +impl Bitmap { + pub const fn new(init_value: u8) -> Self { + Self { bytes: [init_value; SIZE_BYTES] } + } + + /// Is bit `i` within the bitmap set? + pub fn is_set(&self, i: usize) -> bool { + let byte_idx = i / 8; + let bit_idx = i % 8; + let byte = self.bytes[byte_idx]; + byte & (1 << bit_idx) > 0 + } + + /// Set bit `i` within the bitmap + pub fn set(&mut self, i: usize, b: bool) { + let byte_idx = i / 8; + let bit_idx = i % 8; + if b { + self.bytes[byte_idx] |= 1 << bit_idx; + } else { + self.bytes[byte_idx] &= !(1 << bit_idx); + } + } +} + +#[derive(Debug, PartialEq)] +pub enum BitmapPageAllocError { + OutOfBounds, + MisalignedAddr, + OutOfSpace, + NotAllocated, +} + +/// Allocator where each page is represented by a single bit. +/// 0: free, 1: allocated +/// `end` is used to indicate the extent of the memory. Anything beyond this +/// will be marked as allocated. +pub struct BitmapPageAlloc { + bitmaps: [Bitmap; NUM_BITMAPS], + alloc_page_size: usize, // Size of pages represented by single bit + end: PhysAddr, // Upper bound of physical memory + next_pa_to_scan: PhysAddr, // PhysAddr from which to start scanning for next allocation +} + +impl + BitmapPageAlloc +{ + pub const fn new_all_allocated(alloc_page_size: usize) -> Self { + let end = PhysAddr::new((NUM_BITMAPS * BITMAP_SIZE_BYTES * 8 * alloc_page_size) as u64); + Self { + bitmaps: [const { Bitmap::::new(0xff) }; NUM_BITMAPS], + alloc_page_size, + end, + next_pa_to_scan: PhysAddr::new(0), + } + } + + /// Returns number of physical bytes a single bitmap can cover. + const fn bytes_per_bitmap_byte(&self) -> usize { + 8 * self.alloc_page_size + } + + /// Returns number of physical bytes a single bitmap can cover. + const fn bytes_per_bitmap(&self) -> usize { + BITMAP_SIZE_BYTES * self.bytes_per_bitmap_byte() + } + + /// Returns number of physical bytes covered by all bitmaps. + const fn max_bytes(&self) -> usize { + NUM_BITMAPS * self.bytes_per_bitmap() + } + + /// Mark the bits corresponding to the given physical range as allocated, + /// regardless of the existing state. + pub fn mark_allocated(&mut self, range: &PhysRange) -> Result<(), BitmapPageAllocError> { + self.mark_range(range, true, true) + } + + /// Mark the bits corresponding to the given physical range as free, + /// regardless of the existing state. + pub fn mark_free(&mut self, range: &PhysRange) -> Result<(), BitmapPageAllocError> { + self.mark_range(range, false, true) + } + + /// Free unused pages in mem that aren't covered by the memory map. Assumes + /// that custom_map is sorted and that available_mem can be used to set the + /// upper bound of the allocator. + pub fn free_unused_ranges<'a>( + &mut self, + available_mem: &PhysRange, + used_ranges: impl Iterator, + ) -> Result<(), BitmapPageAllocError> { + let mut next_start = available_mem.start(); + for range in used_ranges { + if next_start < range.0.start { + self.mark_free(&PhysRange::new(next_start, range.0.start))?; + } + if next_start < range.0.end { + next_start = range.0.end; + } + } + if next_start < available_mem.end() { + self.mark_free(&PhysRange::new(next_start, available_mem.end()))?; + } + + self.end = available_mem.0.end; + + // Mark everything past the end point as allocated + let end_range = PhysRange::new(self.end, PhysAddr::new(self.max_bytes() as u64)); + self.mark_range(&end_range, true, false)?; + + self.next_pa_to_scan = PhysAddr::new(0); // Just set to 0 for simplicity - could be smarter + + Ok(()) + } + + /// Try to allocate the next available page. + pub fn allocate(&mut self) -> Result { + let (first_bitmap_idx, first_byte_idx, _) = self.physaddr_as_indices(self.next_pa_to_scan); + + let found_indices = self + .indices_from(first_bitmap_idx, first_byte_idx) + .find(|indices| self.byte(indices) != 0xff); + + if let Some(indices) = found_indices { + // Mark the page as allocated and return the address + let byte = &mut self.bitmaps[indices.bitmap].bytes[indices.byte]; + let num_leading_ones = byte.trailing_ones() as usize; + *byte |= 1 << num_leading_ones; + + let pa = self.indices_as_physaddr(indices.bitmap, indices.byte, num_leading_ones); + self.next_pa_to_scan = pa; + Ok(pa) + } else { + Err(BitmapPageAllocError::OutOfSpace) + } + } + + /// Deallocate the page corresponding to the given PhysAddr. + pub fn deallocate(&mut self, pa: PhysAddr) -> Result<(), BitmapPageAllocError> { + if pa > self.end { + return Err(BitmapPageAllocError::OutOfBounds); + } + + let (bitmap_idx, byte_idx, bit_idx) = self.physaddr_as_indices(pa); + + let bitmap = &mut self.bitmaps[bitmap_idx]; + if !bitmap.is_set(8 * byte_idx + bit_idx) { + return Err(BitmapPageAllocError::NotAllocated); + } + bitmap.set(bit_idx, false); + + self.next_pa_to_scan = pa; // Next allocation will reuse this + + Ok(()) + } + + /// Return a tuple of (bytes used, total bytes available) based on the page allocator. + pub fn usage_bytes(&self) -> (usize, usize) { + // We count free because the last bits might be marked partially 'allocated' + // if the end comes in the middle of a byte in the bitmap. + let mut free_bytes: usize = 0; + for indices in self.indices() { + free_bytes += self.byte(&indices).count_zeros() as usize * self.alloc_page_size; + } + let total = self.end.0 as usize; + (total - free_bytes, total) + } + + /// For the given physaddr, returns a tuple of (the bitmap containing pa, + /// the index of the byte containing the pa, and the index of the bit within that byte). + fn physaddr_as_indices(&self, pa: PhysAddr) -> (usize, usize, usize) { + assert_eq!(pa.addr() % self.alloc_page_size as u64, 0); + + // Get the index of the bitmap containing the pa + let bytes_per_bitmap = self.bytes_per_bitmap(); + let bitmap_idx = pa.addr() as usize / bytes_per_bitmap; + + // Get the byte within the bitmap representing the pa + let pa_offset_into_bitmap = pa.addr() as usize % bytes_per_bitmap; + let bytes_per_bitmap_byte = self.bytes_per_bitmap_byte(); + let byte_idx = pa_offset_into_bitmap / bytes_per_bitmap_byte; + + // Finally get the bit within the byte + let bit_idx = + (pa_offset_into_bitmap - (byte_idx * bytes_per_bitmap_byte)) / self.alloc_page_size; + + (bitmap_idx, byte_idx, bit_idx) + } + + /// Given the bitmap index, byte index within the bitmap, and bit index within the byte, + /// return the corresponding PhysAddr. + fn indices_as_physaddr(&self, bitmap_idx: usize, byte_idx: usize, bit_idx: usize) -> PhysAddr { + PhysAddr::new( + ((bitmap_idx * self.bytes_per_bitmap()) + + (byte_idx * self.bytes_per_bitmap_byte()) + + (bit_idx * self.alloc_page_size)) as u64, + ) + } + + fn mark_range( + &mut self, + range: &PhysRange, + mark_allocated: bool, + check_end: bool, + ) -> Result<(), BitmapPageAllocError> { + if check_end && range.0.end > self.end { + return Err(BitmapPageAllocError::OutOfBounds); + } + + for pa in range.step_by_rounded(self.alloc_page_size) { + let (bitmap_idx, byte_idx, bit_idx) = self.physaddr_as_indices(pa); + if bitmap_idx >= self.bitmaps.len() { + return Err(BitmapPageAllocError::OutOfBounds); + } + + let bitmap = &mut self.bitmaps[bitmap_idx]; + bitmap.set(8 * byte_idx + bit_idx, mark_allocated); + } + Ok(()) + } + + /// Iterate over each of the bytes in turn. Iterates only over the bytes + /// covering pages up to `end`. If `end` is within one of the bytes, that + /// byte will be returned. + fn indices(&self) -> impl Iterator + '_ { + self.indices_from(0, 0) + } + + /// Iterate over each of the bytes in turn, starting from a particular bitmap + /// and byte, and looping to iterate across all bytes. Iterates only over the bytes + /// covering pages up to `end`. If `end` is within one of the bytes, that + /// byte will be returned. + fn indices_from( + &self, + start_bitmap_idx: usize, + start_byte_idx: usize, + ) -> impl Iterator + '_ { + let mut bitmap_idx = start_bitmap_idx; + let mut byte_idx = start_byte_idx; + let mut passed_first = false; + let mut currpa = self.indices_as_physaddr(bitmap_idx, byte_idx, 0); + + core::iter::from_fn(move || { + // Catch when we've iterated to the end of the last bitmap and need to + // cycle back to the start + if bitmap_idx >= self.bitmaps.len() || currpa >= self.end { + bitmap_idx = 0; + byte_idx = 0; + currpa = PhysAddr::new(0); + } + + // Catch when we've iterated over all the bytes + if passed_first && bitmap_idx == start_bitmap_idx && byte_idx == start_byte_idx { + return None; + } + passed_first = true; + + // Return the byte and prepare for the next + let indices = ByteIndices { bitmap: bitmap_idx, byte: byte_idx }; + byte_idx += 1; + if byte_idx >= BITMAP_SIZE_BYTES { + byte_idx = 0; + bitmap_idx += 1; + currpa.0 += self.alloc_page_size as u64; + } + Some(indices) + }) + } + + fn byte(&self, indices: &ByteIndices) -> u8 { + self.bitmaps[indices.bitmap].bytes[indices.byte] + } + + #[cfg(test)] + fn bytes(&self) -> Vec { + self.indices().map(|idx| self.byte(&idx)).collect::>() + } + + #[cfg(test)] + fn bytes_from(&self, start_bitmap_idx: usize, start_byte_idx: usize) -> Vec { + self.indices_from(start_bitmap_idx, start_byte_idx) + .map(|idx| self.byte(&idx)) + .collect::>() + } +} + +struct ByteIndices { + bitmap: usize, + byte: usize, +} + +/// fmt::Debug is useful in small test cases, but would be too verbose for a +/// realistic bitmap. +impl fmt::Debug + for BitmapPageAlloc +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "0x")?; + for b in self.indices() { + write!(f, "{:02x}", self.byte(&b))?; + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bitmap_new() { + let bitmap = Bitmap::<4096>::new(0); + for byte in bitmap.bytes { + assert_eq!(byte, 0x00); + } + } + + #[test] + fn bitmap_set() { + let mut bitmap = Bitmap::<4096>::new(0); + assert!(!bitmap.is_set(0)); + bitmap.set(0, true); + assert!(bitmap.is_set(0)); + + // Assert only this bit is set + assert_eq!(bitmap.bytes[0], 1); + for i in 1..bitmap.bytes.len() { + assert_eq!(bitmap.bytes[i], 0); + } + } + + #[test] + fn iterate() { + let alloc = BitmapPageAlloc::<2, 2>::new_all_allocated(4); + assert_eq!(alloc.bytes(), vec![255; 4]); + assert_eq!(alloc.bytes_from(1, 0), vec![255; 4]); + } + + #[test] + fn bitmappagealloc_mark_allocated_and_free() -> Result<(), BitmapPageAllocError> { + // Create a new allocator and mark it all freed + // 2 bitmaps, 2 bytes per bitmap, mapped to pages of 4 bytes + // 32 bits, 128 bytes physical memory + let mut alloc = BitmapPageAlloc::<2, 2>::new_all_allocated(4); + alloc.mark_free(&PhysRange::with_end(0, alloc.max_bytes() as u64))?; + + // Mark a range as allocated - 10 bits + alloc.mark_allocated(&PhysRange::with_end(4, 44))?; + assert_eq!(alloc.bytes(), [0xfe, 0x07, 0x00, 0x00]); + + // Deallocate a range - first 2 bits + alloc.mark_free(&PhysRange::with_end(0, 8))?; + assert_eq!(alloc.bytes(), [0xfc, 0x07, 0x00, 0x00]); + Ok(()) + } + + #[test] + fn bitmappagealloc_allocate_and_deallocate() -> Result<(), BitmapPageAllocError> { + // Create a new allocator and mark it all freed + // 2 bitmaps, 2 bytes per bitmap, mapped to pages of 4 bytes + // 32 bits, 128 bytes physical memory + let mut alloc = BitmapPageAlloc::<2, 2>::new_all_allocated(4); + alloc.mark_free(&PhysRange::with_end(0, alloc.max_bytes() as u64))?; + assert_eq!(alloc.usage_bytes(), (0, 128)); + + // Mark a range as allocated - 10 bits + alloc.mark_allocated(&PhysRange::with_end(4, 44))?; + assert_eq!(alloc.usage_bytes(), (40, 128)); + assert_eq!(alloc.bytes(), [0xfe, 0x07, 0x00, 0x00]); + + // Now try to allocate the next 3 free pages + assert_eq!(alloc.allocate()?, PhysAddr::new(0)); + assert_eq!(alloc.allocate()?, PhysAddr::new(44)); + assert_eq!(alloc.allocate()?, PhysAddr::new(48)); + + // Allocate until we run out of pages. At this point there are 19 pages left, + // so allocate them, and then assert one more fails + for _ in 0..19 { + alloc.allocate()?; + } + assert_eq!(alloc.bytes(), [0xff, 0xff, 0xff, 0xff]); + assert_eq!(alloc.allocate().unwrap_err(), BitmapPageAllocError::OutOfSpace); + + // Now try to deallocate the second page + assert!(alloc.deallocate(PhysAddr::new(4)).is_ok()); + assert_eq!(alloc.bytes(), [0xfd, 0xff, 0xff, 0xff]); + + // Ensure double deallocation fails + assert_eq!( + alloc.deallocate(PhysAddr::new(4)).unwrap_err(), + BitmapPageAllocError::NotAllocated + ); + assert_eq!(alloc.bytes(), [0xfd, 0xff, 0xff, 0xff]); + + // Allocate once more, expecting the physical address we just deallocated + assert_eq!(alloc.allocate()?, PhysAddr::new(4)); + + Ok(()) + } + + #[test] + fn physaddr_as_indices() { + let alloc = BitmapPageAlloc::<2, 4096>::new_all_allocated(4096); + let bytes_per_bitmap = alloc.bytes_per_bitmap() as u64; + + assert_eq!(alloc.physaddr_as_indices(PhysAddr::new(0)), (0, 0, 0)); + assert_eq!(alloc.physaddr_as_indices(PhysAddr::new(4096)), (0, 0, 1)); + assert_eq!(alloc.physaddr_as_indices(PhysAddr::new(8192)), (0, 0, 2)); + assert_eq!(alloc.physaddr_as_indices(PhysAddr::new(4096 * 8)), (0, 1, 0)); + assert_eq!(alloc.physaddr_as_indices(PhysAddr::new(4096 * 9)), (0, 1, 1)); + assert_eq!(alloc.physaddr_as_indices(PhysAddr::new(bytes_per_bitmap)), (1, 0, 0)); + assert_eq!( + alloc.physaddr_as_indices(PhysAddr::new(bytes_per_bitmap + 4096 * 9)), + (1, 1, 1) + ); + } + + #[test] + fn indices_as_physaddr() { + let alloc = BitmapPageAlloc::<2, 4096>::new_all_allocated(4096); + let bytes_per_bitmap = alloc.bytes_per_bitmap() as u64; + + assert_eq!(alloc.indices_as_physaddr(0, 0, 0), PhysAddr::new(0)); + assert_eq!(alloc.indices_as_physaddr(0, 0, 1), PhysAddr::new(4096)); + assert_eq!(alloc.indices_as_physaddr(0, 1, 0), PhysAddr::new(4096 * 8)); + assert_eq!(alloc.indices_as_physaddr(0, 1, 1), PhysAddr::new(4096 * 9)); + assert_eq!(alloc.indices_as_physaddr(1, 0, 0), PhysAddr::new(bytes_per_bitmap)); + assert_eq!(alloc.indices_as_physaddr(1, 1, 1), PhysAddr::new(bytes_per_bitmap + 4096 * 9)); + } +} diff --git a/port/src/lib.rs b/port/src/lib.rs index aca277a..9fa7e0d 100644 --- a/port/src/lib.rs +++ b/port/src/lib.rs @@ -1,9 +1,11 @@ #![allow(clippy::upper_case_acronyms)] #![cfg_attr(not(any(test)), no_std)] +#![feature(inline_const)] #![feature(maybe_uninit_slice)] #![feature(step_trait)] #![forbid(unsafe_op_in_unsafe_fn)] +pub mod bitmapalloc; pub mod dat; pub mod devcons; pub mod fdt; diff --git a/port/src/mem.rs b/port/src/mem.rs index d232c0c..d9f7087 100644 --- a/port/src/mem.rs +++ b/port/src/mem.rs @@ -44,7 +44,7 @@ impl From<&RegBlock> for VirtRange { #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord)] #[repr(transparent)] -pub struct PhysAddr(u64); +pub struct PhysAddr(pub u64); impl PhysAddr { pub const fn new(value: u64) -> Self { @@ -56,10 +56,12 @@ impl PhysAddr { } pub const fn round_up(&self, step: u64) -> PhysAddr { + assert!(step.is_power_of_two()); PhysAddr((self.0 + step - 1) & !(step - 1)) } pub const fn round_down(&self, step: u64) -> PhysAddr { + assert!(step.is_power_of_two()); PhysAddr(self.0 & !(step - 1)) } } @@ -104,6 +106,14 @@ impl fmt::Debug for PhysAddr { pub struct PhysRange(pub Range); impl PhysRange { + pub fn new(start: PhysAddr, end: PhysAddr) -> Self { + Self(start..end) + } + + pub fn with_end(start: u64, end: u64) -> Self { + Self(PhysAddr(start)..PhysAddr(end)) + } + pub fn with_len(start: u64, len: usize) -> Self { Self(PhysAddr(start)..PhysAddr(start + len as u64)) } @@ -126,6 +136,10 @@ impl PhysRange { self.0.end } + pub fn size(&self) -> usize { + (self.0.end.addr() - self.0.start.addr()) as usize + } + pub fn step_by_rounded(&self, step_size: usize) -> StepBy> { let startpa = self.start().round_down(step_size as u64); let endpa = self.end().round_up(step_size as u64); @@ -133,6 +147,13 @@ impl PhysRange { } } +impl fmt::Display for PhysRange { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:#016x}..{:#016x}", self.0.start.addr(), self.0.end.addr())?; + Ok(()) + } +} + impl From<&RegBlock> for PhysRange { fn from(r: &RegBlock) -> Self { let start = PhysAddr(r.addr);