diff --git a/aarch64/src/main.rs b/aarch64/src/main.rs index 2edce89..70f317e 100644 --- a/aarch64/src/main.rs +++ b/aarch64/src/main.rs @@ -3,7 +3,6 @@ #![cfg_attr(not(any(test)), no_std)] #![cfg_attr(not(test), no_main)] #![feature(alloc_error_handler)] -#![feature(asm_const)] #![feature(core_intrinsics)] #![feature(strict_provenance)] #![forbid(unsafe_op_in_unsafe_fn)] diff --git a/aarch64/src/runtime.rs b/aarch64/src/runtime.rs index aa65a36..18ba3f3 100644 --- a/aarch64/src/runtime.rs +++ b/aarch64/src/runtime.rs @@ -5,7 +5,7 @@ extern crate alloc; use crate::kmem::physaddr_as_virt; use crate::registers::rpi_mmio; use crate::uartmini::MiniUart; -use alloc::alloc::{GlobalAlloc, Layout}; +use alloc::alloc::Layout; use core::fmt::Write; use core::panic::PanicInfo; use port::devcons::PanicConsole; @@ -39,17 +39,3 @@ pub fn panic(info: &PanicInfo) -> ! { fn oom(_layout: Layout) -> ! { panic!("oom"); } - -struct FakeAlloc; - -unsafe impl GlobalAlloc for FakeAlloc { - unsafe fn alloc(&self, _layout: Layout) -> *mut u8 { - panic!("fake alloc"); - } - unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) { - panic!("fake dealloc"); - } -} - -#[global_allocator] -static FAKE_ALLOCATOR: FakeAlloc = FakeAlloc {}; diff --git a/lib/aarch64-unknown-none-elf.json b/lib/aarch64-unknown-none-elf.json index ee81954..88d0a74 100644 --- a/lib/aarch64-unknown-none-elf.json +++ b/lib/aarch64-unknown-none-elf.json @@ -1,6 +1,6 @@ { "arch": "aarch64", - "data-layout": "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128", + "data-layout": "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32", "disable-redzone": true, "executables": true, "features": "+strict-align,+neon,+fp-armv8", @@ -16,4 +16,4 @@ "-nostdlib" ] } -} \ No newline at end of file +} diff --git a/port/src/allocator.rs b/port/src/allocator.rs new file mode 100644 index 0000000..494da14 --- /dev/null +++ b/port/src/allocator.rs @@ -0,0 +1,534 @@ +// Copyright 2021 The Hypatia Authors +// All rights reserved +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file or at +// https://opensource.org/licenses/MIT. + +use alloc::alloc::{AllocError, Allocator, Layout}; +use core::ptr::NonNull; +use core::sync::atomic::{AtomicUsize, Ordering}; +use core::{mem, ptr}; + +/// The allocator works in terms of an owned region of memory +/// that is represented by a Block, which describes the region +/// in terms of a non-nil pointer and a length. A Block is an +/// analogue of a mutable slice. +/// +/// At some point, it may make sense to replace this with a +/// slice pointer, but too many of the interfaces there are not +/// (yet) stable. +#[derive(Clone, Copy, Debug)] +pub struct Block { + ptr: NonNull, + len: usize, +} + +impl Block { + /// Creates a new block from raw parts. This is analogous + /// to `core::slice::from_raw_parts`. + /// + /// # Safety + /// The caller must ensure that the pointer and length given + /// are appropriate for the construction of a new block. + pub const unsafe fn new_from_raw_parts(ptr: *mut u8, len: usize) -> Block { + let ptr = unsafe { NonNull::new_unchecked(ptr) }; + Block { ptr, len } + } + + /// Splits a block into two sub-blocks. + pub fn split_at_mut(self, offset: usize) -> Option<(Block, Block)> { + let len = self.len(); + if offset > len { + return None; + } + let ptr = self.as_ptr(); + let a = unsafe { Block::new_from_raw_parts(ptr, offset) }; + let b = unsafe { Block::new_from_raw_parts(ptr.wrapping_add(offset), len - offset) }; + Some((a, b)) + } + + /// Returns a raw mutable pointer to the beginning of the + /// owned region. + pub fn as_ptr(self) -> *mut u8 { + self.ptr.as_ptr() + } + + /// Returns the length of the region. + fn len(self) -> usize { + self.len + } +} + +/// A Bump Allocator takes ownership a region of memory, called +/// an "arena", represented by a Block, and maintains a cursor +/// into that region. The cursor denotes the point between +/// allocated and unallocated memory in the arena. +pub struct BumpAlloc { + arena: Block, + cursor: AtomicUsize, +} + +impl BumpAlloc { + /// Creates a new bump allocator over the given Block. + /// Takes ownership of the provided region. + pub const fn new(arena: Block) -> BumpAlloc { + BumpAlloc { arena, cursor: AtomicUsize::new(0) } + } + + /// Allocates the requested number of bytes with the given + /// alignment. Returns `None` if the allocation cannot be + /// satisfied, otherwise returns `Some` of a pair of blocks: + /// the first contains the prefix before the (aligned) block + /// and the second is the requested block itself. + pub fn try_alloc(&self, align: usize, size: usize) -> Option<(Block, Block)> { + let base = self.arena.as_ptr(); + let mut first = ptr::null_mut(); + let mut adjust = 0; + self.cursor + .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |current| { + first = base.wrapping_add(current); + adjust = first.align_offset(align); + let offset = current.checked_add(adjust).expect("alignment overflow"); + let next = offset.checked_add(size).expect("size overflow"); + (next <= self.arena.len()).then_some(next) + }) + .ok()?; + let prefix = unsafe { Block::new_from_raw_parts(first, adjust) }; + let ptr = first.wrapping_add(adjust); + let block = unsafe { Block::new_from_raw_parts(ptr, size) }; + Some((prefix, block)) + } +} + +/// BumpAlloc implements the allocator interface, and is +/// suitable for e.g. page allocators and so forth. Dealloc is +/// unimplemented and will panic. +unsafe impl Allocator for BumpAlloc { + fn allocate(&self, layout: Layout) -> Result, AllocError> { + let (_, block) = self.try_alloc(layout.size(), layout.align()).ok_or(AllocError)?; + Ok(NonNull::slice_from_raw_parts(block.ptr, block.len())) + } + + unsafe fn deallocate(&self, _ptr: NonNull, _layout: Layout) { + unimplemented!(); + } +} + +// # QuickFit allocator for small objects. +// +// This is an implementation of the QuickFit[Wei88] allocator +// for small objects, suitable for managing small heaps in +// memory constrained environments, such as boot loaders and +// standalone debuggers. +// +// [Wei88] Charles B. Weinstock and William A. Wulf. 1988. +// Quick Fit: An Efficient Algorithm for Heap Storage +// Allocation. ACM SIGPLAN Notices 23, 10 (Oct. 1988), +// 141-148. https://doi.org/10.1145/51607.51619 + +const ALLOC_UNIT_SHIFT: usize = 6; +const ALLOC_UNIT_SIZE: usize = 1 << ALLOC_UNIT_SHIFT; +const MIN_ALLOC_SIZE: usize = ALLOC_UNIT_SIZE; +const MAX_QUICK_SHIFT: usize = 14; +const MAX_QUICK_SIZE: usize = 1 << MAX_QUICK_SHIFT; + +const NUM_QLISTS: usize = 14 - ALLOC_UNIT_SHIFT + 1; +const NUM_HASH_BUCKETS: usize = 31; // Prime. + +/// A linked block header containing size, alignment, and +/// address information for the block. This is used both for +/// linking unallocated blocks into one of the free lists and +/// for keeping track of blocks allocated from the `misc` list. +/// +/// For irregularly sized allocations, the header keeps track of +/// the block's layout data, its virtual address, and a link +/// pointer. Such a header is either not in any list, if newly +/// allocated and not yet freed, or always in exactly one of two +/// lists: the free list, or a hash chain of allocated blocks. +/// We do this because we need some way to preserve the +/// allocation size after the initial allocation from the tail, +/// and because misc blocks can be reused in a first-fit manner, +/// we cannot rely on a `Layout` to recover the size of the +/// block, so we must store it somewhere. By allocating a tag +/// outside of the buffer, which we look up in a hash table as +/// needed, we can maintain this information without adding +/// additional complexity to allocation. +/// +/// For blocks on one of the quick lists, the size, address and +/// alignment fields are redundant, but convenient. +/// +/// We use the link pointer to point to the next entry in the +/// list in all cases. +#[derive(Debug)] +#[repr(C, align(64))] +struct Header { + next: Option>, + addr: NonNull, + size: usize, + align: usize, +} + +impl Header { + /// Returns a new header for a block of the given size and + /// alignment at the given address. + fn new(addr: NonNull, size: usize, align: usize, next: Option>) -> Header { + Header { next, addr, size, align } + } +} + +/// The QuickFit allocator itself. The allocator takes +/// ownership of a bump allocator for the tail, and contains a +/// set of lists for the quick blocks, as well as a misc list +/// for unusually sized regions, and a hash table of headers +/// describing current misc allocations. As mentioned above, +/// these last data are kept outside of the allocations to keep +/// allocation simple. +#[repr(C)] +pub struct QuickFit { + tail: BumpAlloc, + qlists: [Option>; NUM_QLISTS], + misc: Option>, + allocated_misc: [Option>; NUM_HASH_BUCKETS], +} + +impl QuickFit { + /// Constructs a QuickFit from the given `tail`. + pub const fn new(tail: BumpAlloc) -> QuickFit { + let qlists = [None; NUM_QLISTS]; + let misc = None; + let allocated_misc = [None; NUM_HASH_BUCKETS]; + QuickFit { tail, qlists, misc, allocated_misc } + } + + /// Allocates a block of memory of the requested size and + /// alignment. Returns a pointer to such a block, or nil if + /// the block cannot be allocated. + pub fn malloc(&mut self, layout: Layout) -> *mut u8 { + let (size, align) = Self::adjust(layout); + let p = self.alloc_quick(size, align); + p.or_else(|| self.alloc_tail(size, align)).map(|p| p.as_ptr()).unwrap_or(ptr::null_mut()) + } + + /// Adjusts the given layout so that blocks allocated from + /// one of the quick lists are appropriately sized and + /// aligned. Otherwise, returns the original size and + /// alignment. + fn adjust(layout: Layout) -> (usize, usize) { + let size = layout.size(); + let align = layout.align(); + if size > MAX_QUICK_SIZE { + return (size, align); + } + let size = usize::max(MIN_ALLOC_SIZE, size.next_power_of_two()); + let align = usize::max(layout.align(), size); + (size, align) + } + + /// Attempts to allocate from an existing list: for requests + /// that can be satisfied from one of the quick lists, try + /// and do so; otherwise, attempt an allocation from the + /// misc list. + fn alloc_quick(&mut self, size: usize, align: usize) -> Option> { + if size <= MAX_QUICK_SIZE && align == size { + let k: usize = size.ilog2() as usize - ALLOC_UNIT_SHIFT; + let (node, list) = Self::head(self.qlists[k].take()); + self.qlists[k] = list; + node.map(|header| unsafe { header.as_ref() }.addr) + } else { + self.alloc_misc(size, align) + } + } + + /// Allocates a block from the misc list. This is a simple + /// first-fit allocator. + fn alloc_misc(&mut self, size: usize, align: usize) -> Option> { + let (node, list) = + Self::unlink(self.misc.take(), |node| size <= node.size && align <= node.align); + self.misc = list; + node.map(|mut header| { + let header = unsafe { header.as_mut() }; + let k = Self::hash(header.addr.as_ptr()); + header.next = self.allocated_misc[k].take(); + self.allocated_misc[k] = NonNull::new(header); + header.addr + }) + } + + /// Allocates an aligned block of size `size` from `tail`. + /// If `tail` is not already aligned to the given alignment, + /// then we try to free blocks larger than or equal in size + /// to the minimum allocation unit into the quick lists + /// until it is. + fn alloc_tail(&mut self, size: usize, align: usize) -> Option> { + let (prefix, block) = { self.tail.try_alloc(size, align)? }; + self.free_prefix(prefix); + Some(block.ptr) + } + + /// Frees a prefix that came from a tail allocation. This + /// attempts to store blocks into the quick lists. + fn free_prefix(&mut self, prefix: Block) { + let mut prefix = Self::align_prefix(prefix); + while let Some(rest) = self.try_free_prefix(prefix) { + prefix = rest; + } + } + + /// Aligns the prefix to the minimum allocation size. + fn align_prefix(prefix: Block) -> Block { + let ptr = prefix.as_ptr(); + let len = prefix.len(); + let offset = ptr.align_offset(MIN_ALLOC_SIZE); + assert!(offset <= len); + unsafe { Block::new_from_raw_parts(ptr.wrapping_add(offset), len - offset) } + } + + /// Tries to free the largest section of the prefix that it + /// can, returning the remainder if it did so. Otherwise, + /// returns None. + fn try_free_prefix(&mut self, prefix: Block) -> Option { + let ptr: *mut u8 = prefix.as_ptr(); + for k in (0..NUM_QLISTS).rev() { + let size = 1 << (k + ALLOC_UNIT_SHIFT); + if prefix.len() >= size && ptr.align_offset(size) == 0 { + let (_, rest) = prefix.split_at_mut(size)?; + self.free(ptr, Layout::from_size_align(size, size).unwrap()); + return (rest.len() >= MIN_ALLOC_SIZE).then_some(rest); + } + } + None + } + + /// Attempts to reallocate the given block to a new size. + /// + /// This has a small optimization for the most common case, + /// where a block is being realloc'd to grow as data is + /// accumulated: it's subtle, but if the original block was + /// allocated from one of the quick lists, and the new size + /// can be accommodated by the existing allocation, simply + /// return the existing block pointer. Otherwise, allocate + /// a new block, copy, and free the old block. + /// + /// Note that the case of a reduction in size might result + /// in a new allocation. This is because we rely on the + /// accuracy of the `Layout` to find the correct quicklist + /// to store the block onto on free. If we reduced below + /// the size of the current block, we would lose the layout + /// information and potentially leak memory. But this is + /// very uncommon. + /// + /// We make no effort to optimize the case of a `realloc` in + /// a `misc` block, as a) it is relatively uncommon to do so + /// and b) there may not be a buffer tag for such a block + /// yet (one isn't allocated until the block is freed), and + /// the implementation would need to be more complex as a + /// result. + /// + /// # Safety + /// Must be called with a valid block pointer, layout, and + /// size. + pub unsafe fn realloc(&mut self, block: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + if block.is_null() { + return self.malloc(layout); + } + let new_layout = Layout::from_size_align(new_size, layout.align()).expect("layout"); + let (size, align) = Self::adjust(new_layout); + if size == layout.size() && align == layout.align() { + return block; + } + let np = self.malloc(new_layout); + if !np.is_null() { + unsafe { + ptr::copy(block, np, usize::min(layout.size(), new_size)); + } + self.free(block, layout) + } + np + } + + /// Frees a block of memory characterized by the `layout` + /// argument. If the block can be freed to one of the + /// quick lists, it is; otherwise, it is treated as a misc + /// block and freed there. + pub fn free(&mut self, block: *mut u8, layout: Layout) { + let Some(block) = NonNull::new(block) else { + return; + }; + let (size, align) = Self::adjust(layout); + if size <= MAX_QUICK_SIZE && align == size { + let k: usize = size.ilog2() as usize - ALLOC_UNIT_SHIFT; + let header = Header::new(block, size, align, self.qlists[k].take()); + assert_eq!(block.align_offset(mem::align_of::
()), 0); + let p = block.cast::
(); + unsafe { + ptr::write(p.as_ptr(), header); + } + self.qlists[k] = Some(p); + } else { + self.free_misc(block, size, align); + } + } + + /// Frees a block to the misc list. This looks up the given + /// address in the hash of allocated misc blocks to find its + /// header. + /// + /// If the block header is not found in the hash table, we + /// assume that the block was allocated from the tail and + /// this is the first time it's been freed, so we allocate a + /// header for it and link that into the misc list. + /// + /// If we cannot allocate a header in the usual way, we take + /// it from the block to be freed, which is guaranteed to be + /// large enough to hold a header, since anything smaller + /// would have been allocated from one of the quick lists, + /// and thus freed through that path. + fn free_misc(&mut self, mut block: NonNull, mut size: usize, mut align: usize) { + let mut header = self + .unlink_allocated_misc(block) + .or_else(|| { + let hblock = self.malloc(Layout::new::
()).cast::
(); + let hblock = hblock + .is_null() + .then(|| { + let offset = block.align_offset(MIN_ALLOC_SIZE); + let hblock = block.as_ptr().wrapping_add(offset); + let next = hblock.wrapping_add(MIN_ALLOC_SIZE); + block = unsafe { NonNull::new_unchecked(next) }; + size -= offset + MIN_ALLOC_SIZE; + align = MIN_ALLOC_SIZE; + hblock.cast() + }) + .expect("allocated header block"); + let header = Header::new(block, size, align, None); + unsafe { + ptr::write(hblock, header); + } + NonNull::new(hblock) + }) + .expect("header"); + let header = unsafe { header.as_mut() }; + header.next = self.misc.take(); + self.misc = NonNull::new(header); + } + + /// Unlinks the header for the given address from the hash + /// table for allocated misc blocks and returns it, if such + /// a header exists. If the block associated with the + /// address has not been freed yet, it's possible that no + /// header for it exists yet, in which case we return None. + fn unlink_allocated_misc(&mut self, block: NonNull) -> Option> { + let k = Self::hash(block.as_ptr()); + let list = self.allocated_misc[k].take(); + let (node, list) = Self::unlink(list, |node| node.addr == block); + self.allocated_misc[k] = list; + node + } + + /// Unlinks the first node matching the given predicate from + /// the given list, if it exists, returning the node, or + /// None, and the list head. The list head will be None if + /// the list is empty. + fn unlink( + mut list: Option>, + predicate: F, + ) -> (Option>, Option>) + where + F: Fn(&Header) -> bool, + { + let mut prev: Option> = None; + while let Some(mut node) = list { + let node = unsafe { node.as_mut() }; + if predicate(node) { + let next = node.next.take(); + if let Some(mut prev) = prev { + let prev = unsafe { prev.as_mut() }; + prev.next = next; + } else { + list = next; + } + return (NonNull::new(node), list); + } + prev = NonNull::new(node); + list = node.next; + } + (None, list) + } + + /// Splits the list into it's first element and tail and + /// returns both. + fn head(list: Option>) -> (Option>, Option>) { + Self::unlink(list, |_| true) + } + + /// Hashes a pointer value. This is the bit mixing algorithm + /// from Murmur3. + fn hash(ptr: *mut u8) -> usize { + let mut k = ptr.addr(); + k ^= k >> 33; + k = k.wrapping_mul(0xff51afd7ed558ccd); + k ^= k >> 33; + k = k.wrapping_mul(0xc4ceb9fe1a85ec53); + (k >> 33) % NUM_HASH_BUCKETS + } +} + +#[cfg(not(test))] +mod global { + use super::{Block, BumpAlloc, QuickFit}; + use alloc::alloc::{GlobalAlloc, Layout}; + use core::mem; + use core::ptr; + use core::sync::atomic::{AtomicPtr, Ordering}; + + const GLOBAL_HEAP_SIZE: usize = 4 * 1024 * 1024; + + /// A GlobalHeap is an aligned wrapper around an owned + /// buffer. + #[repr(C, align(4096))] + struct GlobalHeap([u8; GLOBAL_HEAP_SIZE]); + impl GlobalHeap { + const fn new() -> GlobalHeap { + Self([0u8; GLOBAL_HEAP_SIZE]) + } + } + + /// GlobalQuickAlloc is a wrapper around a QuickFit over a + /// GlobalHeap that uses interior mutability to implement + /// the GlobalAlloc trait. + struct GlobalQuickAlloc(AtomicPtr); + impl GlobalQuickAlloc { + fn with_allocator(&self, thunk: F) -> R + where + F: FnOnce(&mut QuickFit) -> R, + { + let a = self.0.swap(ptr::null_mut(), Ordering::Relaxed); + assert!(!a.is_null(), "global allocator is nil"); + let r = thunk(unsafe { &mut *a }); + self.0.swap(a, Ordering::Relaxed); + r + } + } + + unsafe impl GlobalAlloc for GlobalQuickAlloc { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + self.with_allocator(|quick| quick.malloc(layout)) + } + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + self.with_allocator(|quick| quick.free(ptr, layout)); + } + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + self.with_allocator(|quick| unsafe { quick.realloc(ptr, layout, new_size) }) + } + } + + #[global_allocator] + static GLOBAL_ALLOCATOR: GlobalQuickAlloc = GlobalQuickAlloc(AtomicPtr::new({ + static mut HEAP: GlobalHeap = GlobalHeap::new(); + static mut ALLOC: QuickFit = QuickFit::new(BumpAlloc::new(unsafe { + Block::new_from_raw_parts((&raw mut HEAP).cast(), mem::size_of::()) + })); + &raw mut ALLOC + })); +} diff --git a/port/src/fdt.rs b/port/src/fdt.rs index b4a6251..75773e1 100644 --- a/port/src/fdt.rs +++ b/port/src/fdt.rs @@ -383,7 +383,7 @@ impl<'a> DeviceTree<'a> { } /// Return the first node matching the compatible string 'comp' - pub fn find_compatible(&'a self, comp: &'a str) -> impl Iterator + '_ { + pub fn find_compatible(&'a self, comp: &'a str) -> impl Iterator + 'a { // Iterate over all nodes. For each node, iterate over all properties until we find a 'compatible' // property. The 'compatible' property contains a list of null terminated strings. If we find a matching // string, then return the node, otherwise return None. diff --git a/port/src/lib.rs b/port/src/lib.rs index 8120f05..66f324d 100644 --- a/port/src/lib.rs +++ b/port/src/lib.rs @@ -1,9 +1,15 @@ #![allow(clippy::upper_case_acronyms)] +#![allow(clippy::too_long_first_doc_paragraph)] #![cfg_attr(not(any(test)), no_std)] +#![feature(allocator_api)] #![feature(maybe_uninit_slice)] #![feature(step_trait)] +#![feature(strict_provenance)] #![forbid(unsafe_op_in_unsafe_fn)] +extern crate alloc; + +pub mod allocator; pub mod bitmapalloc; pub mod dat; pub mod devcons; diff --git a/riscv64/src/main.rs b/riscv64/src/main.rs index 19f1621..8551859 100644 --- a/riscv64/src/main.rs +++ b/riscv64/src/main.rs @@ -1,6 +1,4 @@ #![feature(alloc_error_handler)] -#![feature(asm_const)] -#![feature(panic_info_message)] #![cfg_attr(not(any(test)), no_std)] #![cfg_attr(not(test), no_main)] #![allow(clippy::upper_case_acronyms)] diff --git a/riscv64/src/runtime.rs b/riscv64/src/runtime.rs index 78eef68..a901120 100644 --- a/riscv64/src/runtime.rs +++ b/riscv64/src/runtime.rs @@ -2,15 +2,12 @@ extern crate alloc; -use alloc::alloc::{GlobalAlloc, Layout}; +use alloc::alloc::Layout; use core::arch::asm; use core::panic::PanicInfo; use port::{print, println}; -// /////////////////////////////////// -// / LANGUAGE STRUCTURES / FUNCTIONS -// /////////////////////////////////// #[no_mangle] extern "C" fn eh_personality() {} @@ -18,7 +15,7 @@ extern "C" fn eh_personality() {} fn panic(info: &PanicInfo) -> ! { print!("Panic: "); if let Some(p) = info.location() { - println!("line {}, file {}: {}", p.line(), p.file(), info.message().unwrap()); + println!("line {}, file {}: {}", p.line(), p.file(), info.message()); } else { println!("no information available."); } @@ -37,17 +34,3 @@ extern "C" fn abort() -> ! { fn oom(_layout: Layout) -> ! { panic!("oom"); } - -struct FakeAlloc; - -unsafe impl GlobalAlloc for FakeAlloc { - unsafe fn alloc(&self, _layout: Layout) -> *mut u8 { - panic!("fake alloc"); - } - unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) { - panic!("fake dealloc"); - } -} - -#[global_allocator] -static FAKE_ALLOCATOR: FakeAlloc = FakeAlloc {}; diff --git a/rust-toolchain.toml b/rust-toolchain.toml index edc3dd8..849533d 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "nightly-2024-06-08" +channel = "nightly-2024-09-27" components = [ "rustfmt", "rust-src", "clippy", "llvm-tools" ] targets = [ "aarch64-unknown-none", diff --git a/x86_64/src/main.rs b/x86_64/src/main.rs index cd85932..ebd4a42 100644 --- a/x86_64/src/main.rs +++ b/x86_64/src/main.rs @@ -1,5 +1,4 @@ #![feature(alloc_error_handler)] -#![feature(asm_const)] #![feature(naked_functions)] #![feature(sync_unsafe_cell)] #![cfg_attr(not(any(test)), no_std)] diff --git a/x86_64/src/runtime.rs b/x86_64/src/runtime.rs index 6535d47..8dd50b8 100644 --- a/x86_64/src/runtime.rs +++ b/x86_64/src/runtime.rs @@ -2,7 +2,7 @@ extern crate alloc; -use alloc::alloc::{GlobalAlloc, Layout}; +use alloc::alloc::Layout; use core::panic::PanicInfo; #[panic_handler] @@ -15,17 +15,3 @@ pub fn panic(_info: &PanicInfo) -> ! { fn oom(_layout: Layout) -> ! { panic!("oom"); } - -struct FakeAlloc; - -unsafe impl GlobalAlloc for FakeAlloc { - unsafe fn alloc(&self, _layout: Layout) -> *mut u8 { - panic!("fake alloc"); - } - unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) { - panic!("fake dealloc"); - } -} - -#[global_allocator] -static FAKE_ALLOCATOR: FakeAlloc = FakeAlloc {}; diff --git a/xtask/src/main.rs b/xtask/src/main.rs index 03d4afa..e03df4a 100644 --- a/xtask/src/main.rs +++ b/xtask/src/main.rs @@ -325,6 +325,7 @@ impl BuildStep { if self.profile == Profile::Release { cmd.arg("--release"); } + cmd.arg("-Z").arg("build-std=core,alloc"); if self.verbose { println!("Executing {cmd:?}"); }