Skip to content
This repository has been archived by the owner on Sep 11, 2024. It is now read-only.

Commit

Permalink
Kernel: Don't immediately commit pages for AnonymousVMObjects
Browse files Browse the repository at this point in the history
We actually save a lot more memory doing this than I thought we would.
  • Loading branch information
byteduck committed Mar 5, 2024
1 parent 229ff2d commit a7de5c0
Show file tree
Hide file tree
Showing 16 changed files with 158 additions and 98 deletions.
39 changes: 26 additions & 13 deletions kernel/interrupt/isr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -117,24 +117,37 @@ namespace Interrupt {
break;

case 14: //Page fault
if(!TaskManager::current_thread() || TaskManager::current_thread()->is_kernel_mode() || TaskManager::is_preempting()) {
MemoryManager::inst().page_fault_handler(regs);
} else {
size_t err_pos;
asm volatile ("mov %%cr2, %0" : "=r" (err_pos));
PageFault::Type type;
if(regs->err_code == FAULT_USER_READ)
{
size_t err_pos;
asm volatile ("mov %%cr2, %0" : "=r" (err_pos));
PageFault::Type type;
switch (regs->err_code) {
case FAULT_USER_READ:
case FAULT_USER_READ_GPF:
case FAULT_KERNEL_READ:
case FAULT_KERNEL_READ_GPF:
type = PageFault::Type::Read;
else if(regs->err_code == FAULT_USER_WRITE)
break;
case FAULT_USER_WRITE:
case FAULT_USER_WRITE_GPF:
case FAULT_KERNEL_WRITE:
case FAULT_KERNEL_WRITE_GPF:
type = PageFault::Type::Write;
else
break;
default:
type = PageFault::Type::Unknown;
TaskManager::current_thread()->handle_pagefault({
err_pos,
regs,
});
}
const PageFault fault { err_pos, regs, type };
if(TaskManager::is_preempting() || fault.type == PageFault::Type::Unknown) {
// Never want to fault in the kernel or while preempting
MemoryManager::inst().page_fault_handler(regs);
} else if (err_pos >= HIGHER_HALF) {
MM.kernel_space()->try_pagefault(fault);
} else {
TaskManager::current_thread()->handle_pagefault(fault);
}
break;
}

default:
handle_fault("UNKNOWN_FAULT", "What did you do?", SIGILL, regs);
Expand Down
33 changes: 30 additions & 3 deletions kernel/memory/AnonymousVMObject.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@

#include "AnonymousVMObject.h"
#include "MemoryManager.h"
#include "../kstd/cstring.h"

Mutex AnonymousVMObject::s_shared_lock {"AnonymousVMObject::Shared"};
int AnonymousVMObject::s_cur_shm_id = 1;
Expand All @@ -16,8 +15,14 @@ AnonymousVMObject::~AnonymousVMObject() {
}
}

ResultRet<kstd::Arc<AnonymousVMObject>> AnonymousVMObject::alloc(size_t size, kstd::string name) {
ResultRet<kstd::Arc<AnonymousVMObject>> AnonymousVMObject::alloc(size_t size, kstd::string name, bool commit) {
size_t num_pages = kstd::ceil_div(size, PAGE_SIZE);

// If we requested uncommitted pages, don't allocate any physical pages yet.
if (!commit)
return kstd::Arc<AnonymousVMObject>(new AnonymousVMObject(name, num_pages, false));

// If we requested committed pages, alloc them now.
auto pages = TRY(MemoryManager::inst().alloc_physical_pages(num_pages));
auto object = kstd::Arc<AnonymousVMObject>(new AnonymousVMObject(name, pages, false));
auto tmp_mapped = MM.map_object(object);
Expand Down Expand Up @@ -52,6 +57,7 @@ ResultRet<kstd::Arc<AnonymousVMObject>> AnonymousVMObject::map_to_physical(Physi
}

auto object = new AnonymousVMObject("Physical Mapping", kstd::move(pages), false);
object->m_num_committed_pages = 0; // Hack - but we don't want this counting towards our memory total.
object->m_fork_action = ForkAction::Share;
return kstd::Arc<AnonymousVMObject>(object);
}
Expand Down Expand Up @@ -97,5 +103,26 @@ ResultRet<kstd::Arc<VMObject>> AnonymousVMObject::clone() {
return kstd::static_pointer_cast<VMObject>(new_object);
}

ResultRet<bool> AnonymousVMObject::try_fault_in_page(PageIndex page) {
LOCK(m_page_lock);
if (page > m_physical_pages.size())
return Result(EINVAL);
if (m_physical_pages[page])
return false;
m_physical_pages[page] = TRY(MM.alloc_physical_page());
m_num_committed_pages++;
MM.with_quickmapped(m_physical_pages[page], [](void* pagemem) {
memset(pagemem, 0, PAGE_SIZE);
});
return true;
}

size_t AnonymousVMObject::num_committed_pages() const {
return m_num_committed_pages;
}

AnonymousVMObject::AnonymousVMObject(kstd::string name, kstd::vector<PageIndex> physical_pages, bool cow):
VMObject(kstd::move(name), kstd::move(physical_pages), cow) {}
VMObject(kstd::move(name), kstd::move(physical_pages), cow), m_num_committed_pages(m_physical_pages.size()) {}

AnonymousVMObject::AnonymousVMObject(kstd::string name, size_t n_pages, bool cow):
VMObject(kstd::move(name), kstd::vector(n_pages, (PageIndex) 0), cow) {}
8 changes: 7 additions & 1 deletion kernel/memory/AnonymousVMObject.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,11 @@ class AnonymousVMObject: public VMObject {
/**
* Allocates a new anonymous VMObject.
* @param size The minimum size, in bytes, of the object.
* @param name The name of the object.
* @param commit Whether to immediately commit physical pages.
* @return The newly allocated object, if successful.
*/
static ResultRet<kstd::Arc<AnonymousVMObject>> alloc(size_t size, kstd::string name = "anonymous");
static ResultRet<kstd::Arc<AnonymousVMObject>> alloc(size_t size, kstd::string name = "anonymous", bool commit = false);

/**
* Allocates a new anonymous VMObject backed by contiguous physical pages.
Expand Down Expand Up @@ -72,12 +74,15 @@ class AnonymousVMObject: public VMObject {
bool is_anonymous() const override { return true; }
ForkAction fork_action() const override { return m_fork_action; }
ResultRet<kstd::Arc<VMObject>> clone() override;
ResultRet<bool> try_fault_in_page(PageIndex page) override;
size_t num_committed_pages() const override;


private:
friend class MemoryManager;

explicit AnonymousVMObject(kstd::string name, kstd::vector<PageIndex> physical_pages, bool cow);
explicit AnonymousVMObject(kstd::string name, size_t n_pages, bool cow);

static Mutex s_shared_lock;
static int s_cur_shm_id;
Expand All @@ -88,4 +93,5 @@ class AnonymousVMObject: public VMObject {
ForkAction m_fork_action = ForkAction::BecomeCoW;
pid_t m_shared_owner;
int m_shm_id = 0;
size_t m_num_committed_pages = 0;
};
3 changes: 2 additions & 1 deletion kernel/memory/InodeVMObject.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,8 @@ InodeVMObject::InodeVMObject(kstd::string name, kstd::vector<PageIndex> physical
m_type(type)
{}

ResultRet<bool> InodeVMObject::read_page_if_needed(size_t index) {
ResultRet<bool> InodeVMObject::try_fault_in_page(size_t index) {
LOCK(m_page_lock);
if(index >= m_physical_pages.size())
return Result(ERANGE);
if(m_physical_pages[index])
Expand Down
8 changes: 1 addition & 7 deletions kernel/memory/InodeVMObject.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,20 +14,14 @@ class InodeVMObject: public VMObject {

static kstd::Arc<InodeVMObject> make_for_inode(kstd::string name, kstd::Arc<Inode> inode, Type type);


PageIndex& physical_page_index(size_t index) const {
return m_physical_pages[index];
};

/**
* Reads in the page at the given index if it isn't allocated yet.
* @param index The index of the page to read in.
* @return A successful result if the index is in range and could be read. True if read, false if already exists.
*/
ResultRet<bool> read_page_if_needed(size_t index);
ResultRet<bool> try_fault_in_page(PageIndex index) override;

kstd::Arc<Inode> inode() const { return m_inode; }
Mutex& lock() { return m_page_lock; }
Type type() const { return m_type; }
bool is_inode() const override { return true; }
ForkAction fork_action() const override {
Expand Down
4 changes: 2 additions & 2 deletions kernel/memory/Memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@
#define KERNEL_DATA_SIZE (KERNEL_DATA_END - KERNEL_DATA)
#define KERNEL_END_VIRTADDR (HIGHER_HALF + KERNEL_SIZE_PAGES * PAGE_SIZE)
#define KERNEL_VIRTUAL_HEAP_BEGIN 0xE0000000
#define KERNEL_QUICKMAP_PAGE_A (KERNEL_VIRTUAL_HEAP_BEGIN - PAGE_SIZE)
#define KERNEL_QUICKMAP_PAGE_B (KERNEL_VIRTUAL_HEAP_BEGIN - (PAGE_SIZE * 2))
#define MAX_QUICKMAP_PAGES 8
#define KERNEL_QUICKMAP_PAGES (KERNEL_VIRTUAL_HEAP_BEGIN - (PAGE_SIZE * MAX_QUICKMAP_PAGES))

// For disambiguating parameter meanings.
typedef size_t PageIndex;
Expand Down
15 changes: 13 additions & 2 deletions kernel/memory/MemoryManager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ ResultRet<kstd::vector<PageIndex>> MemoryManager::alloc_contiguous_physical_page

kstd::Arc<VMRegion> MemoryManager::alloc_kernel_region(size_t size) {
auto do_alloc = [&]() -> ResultRet<kstd::Arc<VMRegion>> {
auto object = TRY(AnonymousVMObject::alloc(size, "kernel"));
auto object = TRY(AnonymousVMObject::alloc(size, "kernel", true));
return TRY(m_kernel_space->map_object(object, VMProt::RW));
};
auto res = do_alloc();
Expand All @@ -340,7 +340,7 @@ kstd::Arc<VMRegion> MemoryManager::alloc_kernel_region(size_t size) {

kstd::Arc<VMRegion> MemoryManager::alloc_kernel_stack_region(size_t size) {
auto do_alloc = [&]() -> ResultRet<kstd::Arc<VMRegion>> {
auto object = TRY(AnonymousVMObject::alloc(size, "kernel_stack"));
auto object = TRY(AnonymousVMObject::alloc(size, "kernel_stack", true));
return TRY(m_kernel_space->map_object_with_sentinel(object, VMProt::RW));
};
auto res = do_alloc();
Expand All @@ -360,6 +360,17 @@ kstd::Arc<VMRegion> MemoryManager::alloc_dma_region(size_t size) {
return res.value();
}

kstd::Arc<VMRegion> MemoryManager::alloc_contiguous_kernel_region(size_t size) {
auto do_alloc = [&]() -> ResultRet<kstd::Arc<VMRegion>> {
auto object = TRY(AnonymousVMObject::alloc_contiguous(size, "kernel_contig"));
return TRY(m_kernel_space->map_object(object, VMProt::RW));
};
auto res = do_alloc();
if(res.is_error())
PANIC("ALLOC_DMA_REGION_FAIL", "Could not allocate a new contiguous anonymous memory region.");
return res.value();
}

kstd::Arc<VMRegion> MemoryManager::alloc_mapped_region(PhysicalAddress start, size_t size) {
auto do_map = [&]() -> ResultRet<kstd::Arc<VMRegion>> {
auto object = TRY(AnonymousVMObject::map_to_physical(start, size));
Expand Down
60 changes: 42 additions & 18 deletions kernel/memory/MemoryManager.h
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,12 @@ class MemoryManager {
*/
kstd::Arc<VMRegion> alloc_dma_region(size_t size);

/**
* Allocates a new contiguous anonymous region in kernel space.
* @param size The minimum size, in bytes, of the new region.
*/
kstd::Arc<VMRegion> alloc_contiguous_kernel_region(size_t size);

/**
* Allocates a new virtual region in kernel space that is mapped to an existing range of physical pages.
* @param start The start physical address to map to. Will be rounded down to a page boundary.
Expand All @@ -154,13 +160,19 @@ class MemoryManager {
*/
template<typename F>
void with_quickmapped(PageIndex page, F&& callback) {
LOCK(m_quickmap_lock);
ASSERT(!m_is_quickmapping);
m_is_quickmapping = true;
kernel_page_directory.map_page(KERNEL_QUICKMAP_PAGE_A / PAGE_SIZE, page, VMProt::RW);
callback((void*) KERNEL_QUICKMAP_PAGE_A);
kernel_page_directory.unmap_page(KERNEL_QUICKMAP_PAGE_A / PAGE_SIZE);
m_is_quickmapping = false;
size_t page_idx = -1;
for (int i = 0; i < MAX_QUICKMAP_PAGES; i++) {
bool expected = false;
if (m_quickmap_page[i].compare_exchange_strong(expected, true, MemoryOrder::Acquire)) {
page_idx = i;
break;
}
}
ASSERT(page_idx != -1);
kernel_page_directory.map_page((KERNEL_QUICKMAP_PAGES / PAGE_SIZE) + page_idx, page, VMProt::RW);
callback((void*) (KERNEL_QUICKMAP_PAGES + page_idx * PAGE_SIZE));
kernel_page_directory.unmap_page((KERNEL_QUICKMAP_PAGES / PAGE_SIZE) + page_idx);
m_quickmap_page[page_idx].store(false, MemoryOrder::Release);
}

/**
Expand All @@ -171,15 +183,28 @@ class MemoryManager {
*/
template<typename F>
void with_dual_quickmapped(PageIndex page_a, PageIndex page_b, F&& callback) {
LOCK(m_quickmap_lock);
ASSERT(!m_is_quickmapping);
m_is_quickmapping = true;
kernel_page_directory.map_page(KERNEL_QUICKMAP_PAGE_A / PAGE_SIZE, page_a, VMProt::RW);
kernel_page_directory.map_page(KERNEL_QUICKMAP_PAGE_B / PAGE_SIZE, page_b, VMProt::RW);
callback((void*) KERNEL_QUICKMAP_PAGE_A, (void*) KERNEL_QUICKMAP_PAGE_B);
kernel_page_directory.unmap_page(KERNEL_QUICKMAP_PAGE_A / PAGE_SIZE);
kernel_page_directory.unmap_page(KERNEL_QUICKMAP_PAGE_B / PAGE_SIZE);
m_is_quickmapping = false;
size_t page_idx_a = -1, page_idx_b = -1;
for (int i = 0; i < MAX_QUICKMAP_PAGES; i++) {
bool expected = false;
if (m_quickmap_page[i].compare_exchange_strong(expected, true, MemoryOrder::Acquire)) {
if (page_idx_a == -1) {
page_idx_a = i;
} else {
page_idx_b = i;
break;
}
}
}
ASSERT((page_idx_a != -1) && (page_idx_b != -1));
auto page_a_idx = (KERNEL_QUICKMAP_PAGES / PAGE_SIZE) + page_idx_a;
auto page_b_idx = (KERNEL_QUICKMAP_PAGES / PAGE_SIZE) + page_idx_b;
kernel_page_directory.map_page(page_a_idx, page_a, VMProt::RW);
kernel_page_directory.map_page(page_b_idx, page_b, VMProt::RW);
callback((void*) (page_a_idx * PAGE_SIZE), (void*) (page_b_idx * PAGE_SIZE));
kernel_page_directory.unmap_page(page_a_idx);
kernel_page_directory.unmap_page(page_b_idx);
m_quickmap_page[page_idx_a].store(false, MemoryOrder::Release);
m_quickmap_page[page_idx_b].store(false, MemoryOrder::Release);
}

/** Copies the contents of one physical page to another. **/
Expand Down Expand Up @@ -242,8 +267,7 @@ class MemoryManager {
kstd::Arc<VMSpace> m_kernel_space;
kstd::Arc<VMSpace> m_heap_space;

Mutex m_quickmap_lock {"quickmap"};
bool m_is_quickmapping = false;
Atomic<bool> m_quickmap_page[MAX_QUICKMAP_PAGES] {};
};

void liballoc_lock();
Expand Down
4 changes: 2 additions & 2 deletions kernel/memory/PageDirectory.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ PageDirectory::PageDirectory(PageDirectory::DirectoryType type):
m_type(type)
{
if(type == DirectoryType::USER) {
m_entries_region = MemoryManager::inst().alloc_kernel_region(sizeof(Entry) * 1024);
m_entries_region = MemoryManager::inst().alloc_contiguous_kernel_region(sizeof(Entry) * 1024);
m_entries = (Entry*) m_entries_region->start();
// Map the kernel into the directory
for(auto i = 768; i < 1024; i++) {
Expand Down Expand Up @@ -313,7 +313,7 @@ Result PageDirectory::map_page(PageIndex vpage, PageIndex ppage, VMProt prot) {

entry->data.present = true;
entry->data.read_write = prot.write;
entry->data.user = true;
entry->data.user = directory_index < 768;
entry->data.set_address(ppage * PAGE_SIZE);
MemoryManager::inst().invlpg((void *) (vpage * PAGE_SIZE));

Expand Down
2 changes: 1 addition & 1 deletion kernel/memory/PageTable.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ PageTable::PageTable(size_t vaddr, bool alloc_table):
_vaddr(vaddr)
{
if(alloc_table) {
m_entries_region = MM.alloc_kernel_region(4096);
m_entries_region = MM.alloc_contiguous_kernel_region(4096);
_entries = (Entry*) m_entries_region->start();
}
}
Expand Down
12 changes: 12 additions & 0 deletions kernel/memory/VMObject.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,24 @@ class VMObject: public kstd::ArcSelf<VMObject> {
/** What the object should do when a memory space containing it is forked. **/
virtual ForkAction fork_action() const { return ForkAction::Share; }

/** Lock for manipulating the object. **/
Mutex& lock() { return m_page_lock; }

PageIndex& physical_page_index(size_t index) const {
return m_physical_pages[index];
};

/** Tries to copy the page at a given index if it is marked CoW. If it is not, EINVAL is returned. **/
Result try_cow_page(PageIndex page);
/** Returns whether a page in the object is marked CoW. **/
bool page_is_cow(PageIndex page) const { return m_cow_pages.get(page); };
/** Clones this VMObject using all the same physical pages and properties. **/
virtual ResultRet<kstd::Arc<VMObject>> clone();
/** Try to fault in an unmapped page in the object.
* @return Returns true if a new page was mapped in, false if already mapped. **/
virtual ResultRet<bool> try_fault_in_page(PageIndex page) { return Result(EINVAL); };
/** The number of committed pages this VMObject is responsible for (i.e. memory usage) **/
virtual size_t num_committed_pages() const { return 0; };

protected:
/** Marks every page in this object as CoW, and increases the reference count of all pages by 1. **/
Expand Down
Loading

0 comments on commit a7de5c0

Please sign in to comment.