From 5ae633903d09d226ae663183cf2a336efe785065 Mon Sep 17 00:00:00 2001 From: Julian Frimmel Date: Sat, 20 Aug 2022 12:34:06 +0200 Subject: [PATCH] Only split large enough blocks on allocations Previously the code always tried to split allocations and only failed to do so, when the end of the buffer was reached. The critical part was, that a block thus was split always when it is in the middle, as it is common for allocations, that will be reused. So, every reused allocation is split into a USED block and a following FREE block. But what happens if the block, that this split is not large enough for a USED block and (even a 0-sized payload) FREE block? Then the splitting clearly should not happen. But, the current code splits regardless of that. That overwrites the following USED block with the information of the "free" block. This makes the allocation state unusable, since now random parts of heap mem- ory might be read or written to, even uninitialized memory! This issue was hidden by a not less important bug: `entry_size - n -HEADER_SIZE` will subtract with overflow in such cases and happily panic in debug mode (which is [forbidden] by the current contract of [`GlobalAlloc`][globalalloc]) and produce a huge number in release mode. Both renders the allocator unusable as it either violated its safety contract or simply has an invalid state. This commit fixes that by simply adding a test, if the block is large enough for splitting. A proper test case, that fails without the change, is added as well. [forbidden]: https://doc.rust-lang.org/1.63.0/core/alloc/trait.GlobalAlloc.html#safety [globalalloc]: https://doc.rust-lang.org/1.63.0/core/alloc/trait.GlobalAlloc.html --- src/raw_allocator/mod.rs | 36 ++++++++++++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/src/raw_allocator/mod.rs b/src/raw_allocator/mod.rs index 003c959..d42cd1d 100644 --- a/src/raw_allocator/mod.rs +++ b/src/raw_allocator/mod.rs @@ -67,8 +67,10 @@ impl RawAllocator { // if the found block is large enough, split it into a used and a free let entry_size = self.buffer[offset].size(); self.buffer[offset] = Entry::used(n); - if let Some(following) = self.buffer.following_entry(offset) { - following.write(Entry::free(entry_size - n - HEADER_SIZE)); + if entry_size - n > HEADER_SIZE { + if let Some(following) = self.buffer.following_entry(offset) { + following.write(Entry::free(entry_size - n - HEADER_SIZE)); + } } Some(self.buffer.memory_of_mut(offset)) } @@ -307,4 +309,34 @@ mod tests { // therefore there must be two free blocks assert_allocations!(allocator, Entry::free(4), Entry::free(4)); } + + #[test] + fn alloc_impossible_splitting() { + let mut allocator = RawAllocator::<32>::new(); + let _ptr1 = address!(allocator.alloc(4).unwrap()); + let ptr2 = address!(allocator.alloc(12).unwrap()); + let _ptr3 = address!(allocator.alloc(4).unwrap()); + allocator.free(ptr2).unwrap(); + assert_allocations!(allocator, Entry::used(4), Entry::free(12), Entry::used(4)); + + // new we've set up the heap such there is a free block of 12 in the + // middle (and no free data at the end). If one acquires a block of size + // 4 everything should work fine and the free block should be split up.; + let ptr4 = address!(allocator.alloc(4).unwrap()); + assert_allocations!( + allocator, + Entry::used(4), + Entry::used(4), + Entry::free(4), + Entry::used(4) + ); + allocator.free(ptr4).unwrap(); + assert_allocations!(allocator, Entry::used(4), Entry::free(12), Entry::used(4)); + + // now the previous state is restored. If there is an allocation for a + // size of 12, no splitting must be happening, since the block is only + // 12 bytes of size, so splitting would tamper the following block. + let _ptr5 = address!(allocator.alloc(12).unwrap()); + assert_allocations!(allocator, Entry::used(4), Entry::used(12), Entry::used(4)); + } }