From a7de5c0204be26ea2c57f8e75bf3ac1699512383 Mon Sep 17 00:00:00 2001 From: Aaron <10217842+byteduck@users.noreply.github.com> Date: Mon, 4 Mar 2024 22:05:55 -0800 Subject: [PATCH] Kernel: Don't immediately commit pages for AnonymousVMObjects We actually save a lot more memory doing this than I thought we would. --- kernel/interrupt/isr.cpp | 39 ++++++++++++------- kernel/memory/AnonymousVMObject.cpp | 33 ++++++++++++++-- kernel/memory/AnonymousVMObject.h | 8 +++- kernel/memory/InodeVMObject.cpp | 3 +- kernel/memory/InodeVMObject.h | 8 +--- kernel/memory/Memory.h | 4 +- kernel/memory/MemoryManager.cpp | 15 +++++++- kernel/memory/MemoryManager.h | 60 ++++++++++++++++++++--------- kernel/memory/PageDirectory.cpp | 4 +- kernel/memory/PageTable.cpp | 2 +- kernel/memory/VMObject.h | 12 ++++++ kernel/memory/VMSpace.cpp | 52 +++++++++---------------- kernel/syscall/mem.cpp | 2 - kernel/syscall/thread.cpp | 1 - kernel/tasking/Process.cpp | 11 +----- kernel/tasking/Process.h | 2 - 16 files changed, 158 insertions(+), 98 deletions(-) diff --git a/kernel/interrupt/isr.cpp b/kernel/interrupt/isr.cpp index c022b441..12869450 100644 --- a/kernel/interrupt/isr.cpp +++ b/kernel/interrupt/isr.cpp @@ -117,24 +117,37 @@ namespace Interrupt { break; case 14: //Page fault - if(!TaskManager::current_thread() || TaskManager::current_thread()->is_kernel_mode() || TaskManager::is_preempting()) { - MemoryManager::inst().page_fault_handler(regs); - } else { - size_t err_pos; - asm volatile ("mov %%cr2, %0" : "=r" (err_pos)); - PageFault::Type type; - if(regs->err_code == FAULT_USER_READ) + { + size_t err_pos; + asm volatile ("mov %%cr2, %0" : "=r" (err_pos)); + PageFault::Type type; + switch (regs->err_code) { + case FAULT_USER_READ: + case FAULT_USER_READ_GPF: + case FAULT_KERNEL_READ: + case FAULT_KERNEL_READ_GPF: type = PageFault::Type::Read; - else if(regs->err_code == FAULT_USER_WRITE) + break; + case FAULT_USER_WRITE: + case FAULT_USER_WRITE_GPF: + case FAULT_KERNEL_WRITE: + case FAULT_KERNEL_WRITE_GPF: type = PageFault::Type::Write; - else + break; + default: type = PageFault::Type::Unknown; - TaskManager::current_thread()->handle_pagefault({ - err_pos, - regs, - }); + } + const PageFault fault { err_pos, regs, type }; + if(TaskManager::is_preempting() || fault.type == PageFault::Type::Unknown) { + // Never want to fault in the kernel or while preempting + MemoryManager::inst().page_fault_handler(regs); + } else if (err_pos >= HIGHER_HALF) { + MM.kernel_space()->try_pagefault(fault); + } else { + TaskManager::current_thread()->handle_pagefault(fault); } break; + } default: handle_fault("UNKNOWN_FAULT", "What did you do?", SIGILL, regs); diff --git a/kernel/memory/AnonymousVMObject.cpp b/kernel/memory/AnonymousVMObject.cpp index e86c9c39..5fe17ca7 100644 --- a/kernel/memory/AnonymousVMObject.cpp +++ b/kernel/memory/AnonymousVMObject.cpp @@ -3,7 +3,6 @@ #include "AnonymousVMObject.h" #include "MemoryManager.h" -#include "../kstd/cstring.h" Mutex AnonymousVMObject::s_shared_lock {"AnonymousVMObject::Shared"}; int AnonymousVMObject::s_cur_shm_id = 1; @@ -16,8 +15,14 @@ AnonymousVMObject::~AnonymousVMObject() { } } -ResultRet> AnonymousVMObject::alloc(size_t size, kstd::string name) { +ResultRet> AnonymousVMObject::alloc(size_t size, kstd::string name, bool commit) { size_t num_pages = kstd::ceil_div(size, PAGE_SIZE); + + // If we requested uncommitted pages, don't allocate any physical pages yet. + if (!commit) + return kstd::Arc(new AnonymousVMObject(name, num_pages, false)); + + // If we requested committed pages, alloc them now. auto pages = TRY(MemoryManager::inst().alloc_physical_pages(num_pages)); auto object = kstd::Arc(new AnonymousVMObject(name, pages, false)); auto tmp_mapped = MM.map_object(object); @@ -52,6 +57,7 @@ ResultRet> AnonymousVMObject::map_to_physical(Physi } auto object = new AnonymousVMObject("Physical Mapping", kstd::move(pages), false); + object->m_num_committed_pages = 0; // Hack - but we don't want this counting towards our memory total. object->m_fork_action = ForkAction::Share; return kstd::Arc(object); } @@ -97,5 +103,26 @@ ResultRet> AnonymousVMObject::clone() { return kstd::static_pointer_cast(new_object); } +ResultRet AnonymousVMObject::try_fault_in_page(PageIndex page) { + LOCK(m_page_lock); + if (page > m_physical_pages.size()) + return Result(EINVAL); + if (m_physical_pages[page]) + return false; + m_physical_pages[page] = TRY(MM.alloc_physical_page()); + m_num_committed_pages++; + MM.with_quickmapped(m_physical_pages[page], [](void* pagemem) { + memset(pagemem, 0, PAGE_SIZE); + }); + return true; +} + +size_t AnonymousVMObject::num_committed_pages() const { + return m_num_committed_pages; +} + AnonymousVMObject::AnonymousVMObject(kstd::string name, kstd::vector physical_pages, bool cow): - VMObject(kstd::move(name), kstd::move(physical_pages), cow) {} + VMObject(kstd::move(name), kstd::move(physical_pages), cow), m_num_committed_pages(m_physical_pages.size()) {} + +AnonymousVMObject::AnonymousVMObject(kstd::string name, size_t n_pages, bool cow): + VMObject(kstd::move(name), kstd::vector(n_pages, (PageIndex) 0), cow) {} diff --git a/kernel/memory/AnonymousVMObject.h b/kernel/memory/AnonymousVMObject.h index cd266c81..4f5717fb 100644 --- a/kernel/memory/AnonymousVMObject.h +++ b/kernel/memory/AnonymousVMObject.h @@ -18,9 +18,11 @@ class AnonymousVMObject: public VMObject { /** * Allocates a new anonymous VMObject. * @param size The minimum size, in bytes, of the object. + * @param name The name of the object. + * @param commit Whether to immediately commit physical pages. * @return The newly allocated object, if successful. */ - static ResultRet> alloc(size_t size, kstd::string name = "anonymous"); + static ResultRet> alloc(size_t size, kstd::string name = "anonymous", bool commit = false); /** * Allocates a new anonymous VMObject backed by contiguous physical pages. @@ -72,12 +74,15 @@ class AnonymousVMObject: public VMObject { bool is_anonymous() const override { return true; } ForkAction fork_action() const override { return m_fork_action; } ResultRet> clone() override; + ResultRet try_fault_in_page(PageIndex page) override; + size_t num_committed_pages() const override; private: friend class MemoryManager; explicit AnonymousVMObject(kstd::string name, kstd::vector physical_pages, bool cow); + explicit AnonymousVMObject(kstd::string name, size_t n_pages, bool cow); static Mutex s_shared_lock; static int s_cur_shm_id; @@ -88,4 +93,5 @@ class AnonymousVMObject: public VMObject { ForkAction m_fork_action = ForkAction::BecomeCoW; pid_t m_shared_owner; int m_shm_id = 0; + size_t m_num_committed_pages = 0; }; diff --git a/kernel/memory/InodeVMObject.cpp b/kernel/memory/InodeVMObject.cpp index 9ef44108..726be61e 100644 --- a/kernel/memory/InodeVMObject.cpp +++ b/kernel/memory/InodeVMObject.cpp @@ -25,7 +25,8 @@ InodeVMObject::InodeVMObject(kstd::string name, kstd::vector physical m_type(type) {} -ResultRet InodeVMObject::read_page_if_needed(size_t index) { +ResultRet InodeVMObject::try_fault_in_page(size_t index) { + LOCK(m_page_lock); if(index >= m_physical_pages.size()) return Result(ERANGE); if(m_physical_pages[index]) diff --git a/kernel/memory/InodeVMObject.h b/kernel/memory/InodeVMObject.h index 6285d7fa..9e15a4fe 100644 --- a/kernel/memory/InodeVMObject.h +++ b/kernel/memory/InodeVMObject.h @@ -14,20 +14,14 @@ class InodeVMObject: public VMObject { static kstd::Arc make_for_inode(kstd::string name, kstd::Arc inode, Type type); - - PageIndex& physical_page_index(size_t index) const { - return m_physical_pages[index]; - }; - /** * Reads in the page at the given index if it isn't allocated yet. * @param index The index of the page to read in. * @return A successful result if the index is in range and could be read. True if read, false if already exists. */ - ResultRet read_page_if_needed(size_t index); + ResultRet try_fault_in_page(PageIndex index) override; kstd::Arc inode() const { return m_inode; } - Mutex& lock() { return m_page_lock; } Type type() const { return m_type; } bool is_inode() const override { return true; } ForkAction fork_action() const override { diff --git a/kernel/memory/Memory.h b/kernel/memory/Memory.h index ed3aac31..72caa4f1 100644 --- a/kernel/memory/Memory.h +++ b/kernel/memory/Memory.h @@ -22,8 +22,8 @@ #define KERNEL_DATA_SIZE (KERNEL_DATA_END - KERNEL_DATA) #define KERNEL_END_VIRTADDR (HIGHER_HALF + KERNEL_SIZE_PAGES * PAGE_SIZE) #define KERNEL_VIRTUAL_HEAP_BEGIN 0xE0000000 -#define KERNEL_QUICKMAP_PAGE_A (KERNEL_VIRTUAL_HEAP_BEGIN - PAGE_SIZE) -#define KERNEL_QUICKMAP_PAGE_B (KERNEL_VIRTUAL_HEAP_BEGIN - (PAGE_SIZE * 2)) +#define MAX_QUICKMAP_PAGES 8 +#define KERNEL_QUICKMAP_PAGES (KERNEL_VIRTUAL_HEAP_BEGIN - (PAGE_SIZE * MAX_QUICKMAP_PAGES)) // For disambiguating parameter meanings. typedef size_t PageIndex; diff --git a/kernel/memory/MemoryManager.cpp b/kernel/memory/MemoryManager.cpp index 04a5161a..de6a60de 100644 --- a/kernel/memory/MemoryManager.cpp +++ b/kernel/memory/MemoryManager.cpp @@ -329,7 +329,7 @@ ResultRet> MemoryManager::alloc_contiguous_physical_page kstd::Arc MemoryManager::alloc_kernel_region(size_t size) { auto do_alloc = [&]() -> ResultRet> { - auto object = TRY(AnonymousVMObject::alloc(size, "kernel")); + auto object = TRY(AnonymousVMObject::alloc(size, "kernel", true)); return TRY(m_kernel_space->map_object(object, VMProt::RW)); }; auto res = do_alloc(); @@ -340,7 +340,7 @@ kstd::Arc MemoryManager::alloc_kernel_region(size_t size) { kstd::Arc MemoryManager::alloc_kernel_stack_region(size_t size) { auto do_alloc = [&]() -> ResultRet> { - auto object = TRY(AnonymousVMObject::alloc(size, "kernel_stack")); + auto object = TRY(AnonymousVMObject::alloc(size, "kernel_stack", true)); return TRY(m_kernel_space->map_object_with_sentinel(object, VMProt::RW)); }; auto res = do_alloc(); @@ -360,6 +360,17 @@ kstd::Arc MemoryManager::alloc_dma_region(size_t size) { return res.value(); } +kstd::Arc MemoryManager::alloc_contiguous_kernel_region(size_t size) { + auto do_alloc = [&]() -> ResultRet> { + auto object = TRY(AnonymousVMObject::alloc_contiguous(size, "kernel_contig")); + return TRY(m_kernel_space->map_object(object, VMProt::RW)); + }; + auto res = do_alloc(); + if(res.is_error()) + PANIC("ALLOC_DMA_REGION_FAIL", "Could not allocate a new contiguous anonymous memory region."); + return res.value(); +} + kstd::Arc MemoryManager::alloc_mapped_region(PhysicalAddress start, size_t size) { auto do_map = [&]() -> ResultRet> { auto object = TRY(AnonymousVMObject::map_to_physical(start, size)); diff --git a/kernel/memory/MemoryManager.h b/kernel/memory/MemoryManager.h index 17451ffb..07e768ee 100644 --- a/kernel/memory/MemoryManager.h +++ b/kernel/memory/MemoryManager.h @@ -132,6 +132,12 @@ class MemoryManager { */ kstd::Arc alloc_dma_region(size_t size); + /** + * Allocates a new contiguous anonymous region in kernel space. + * @param size The minimum size, in bytes, of the new region. + */ + kstd::Arc alloc_contiguous_kernel_region(size_t size); + /** * Allocates a new virtual region in kernel space that is mapped to an existing range of physical pages. * @param start The start physical address to map to. Will be rounded down to a page boundary. @@ -154,13 +160,19 @@ class MemoryManager { */ template void with_quickmapped(PageIndex page, F&& callback) { - LOCK(m_quickmap_lock); - ASSERT(!m_is_quickmapping); - m_is_quickmapping = true; - kernel_page_directory.map_page(KERNEL_QUICKMAP_PAGE_A / PAGE_SIZE, page, VMProt::RW); - callback((void*) KERNEL_QUICKMAP_PAGE_A); - kernel_page_directory.unmap_page(KERNEL_QUICKMAP_PAGE_A / PAGE_SIZE); - m_is_quickmapping = false; + size_t page_idx = -1; + for (int i = 0; i < MAX_QUICKMAP_PAGES; i++) { + bool expected = false; + if (m_quickmap_page[i].compare_exchange_strong(expected, true, MemoryOrder::Acquire)) { + page_idx = i; + break; + } + } + ASSERT(page_idx != -1); + kernel_page_directory.map_page((KERNEL_QUICKMAP_PAGES / PAGE_SIZE) + page_idx, page, VMProt::RW); + callback((void*) (KERNEL_QUICKMAP_PAGES + page_idx * PAGE_SIZE)); + kernel_page_directory.unmap_page((KERNEL_QUICKMAP_PAGES / PAGE_SIZE) + page_idx); + m_quickmap_page[page_idx].store(false, MemoryOrder::Release); } /** @@ -171,15 +183,28 @@ class MemoryManager { */ template void with_dual_quickmapped(PageIndex page_a, PageIndex page_b, F&& callback) { - LOCK(m_quickmap_lock); - ASSERT(!m_is_quickmapping); - m_is_quickmapping = true; - kernel_page_directory.map_page(KERNEL_QUICKMAP_PAGE_A / PAGE_SIZE, page_a, VMProt::RW); - kernel_page_directory.map_page(KERNEL_QUICKMAP_PAGE_B / PAGE_SIZE, page_b, VMProt::RW); - callback((void*) KERNEL_QUICKMAP_PAGE_A, (void*) KERNEL_QUICKMAP_PAGE_B); - kernel_page_directory.unmap_page(KERNEL_QUICKMAP_PAGE_A / PAGE_SIZE); - kernel_page_directory.unmap_page(KERNEL_QUICKMAP_PAGE_B / PAGE_SIZE); - m_is_quickmapping = false; + size_t page_idx_a = -1, page_idx_b = -1; + for (int i = 0; i < MAX_QUICKMAP_PAGES; i++) { + bool expected = false; + if (m_quickmap_page[i].compare_exchange_strong(expected, true, MemoryOrder::Acquire)) { + if (page_idx_a == -1) { + page_idx_a = i; + } else { + page_idx_b = i; + break; + } + } + } + ASSERT((page_idx_a != -1) && (page_idx_b != -1)); + auto page_a_idx = (KERNEL_QUICKMAP_PAGES / PAGE_SIZE) + page_idx_a; + auto page_b_idx = (KERNEL_QUICKMAP_PAGES / PAGE_SIZE) + page_idx_b; + kernel_page_directory.map_page(page_a_idx, page_a, VMProt::RW); + kernel_page_directory.map_page(page_b_idx, page_b, VMProt::RW); + callback((void*) (page_a_idx * PAGE_SIZE), (void*) (page_b_idx * PAGE_SIZE)); + kernel_page_directory.unmap_page(page_a_idx); + kernel_page_directory.unmap_page(page_b_idx); + m_quickmap_page[page_idx_a].store(false, MemoryOrder::Release); + m_quickmap_page[page_idx_b].store(false, MemoryOrder::Release); } /** Copies the contents of one physical page to another. **/ @@ -242,8 +267,7 @@ class MemoryManager { kstd::Arc m_kernel_space; kstd::Arc m_heap_space; - Mutex m_quickmap_lock {"quickmap"}; - bool m_is_quickmapping = false; + Atomic m_quickmap_page[MAX_QUICKMAP_PAGES] {}; }; void liballoc_lock(); diff --git a/kernel/memory/PageDirectory.cpp b/kernel/memory/PageDirectory.cpp index 1724741e..a52bb447 100644 --- a/kernel/memory/PageDirectory.cpp +++ b/kernel/memory/PageDirectory.cpp @@ -112,7 +112,7 @@ PageDirectory::PageDirectory(PageDirectory::DirectoryType type): m_type(type) { if(type == DirectoryType::USER) { - m_entries_region = MemoryManager::inst().alloc_kernel_region(sizeof(Entry) * 1024); + m_entries_region = MemoryManager::inst().alloc_contiguous_kernel_region(sizeof(Entry) * 1024); m_entries = (Entry*) m_entries_region->start(); // Map the kernel into the directory for(auto i = 768; i < 1024; i++) { @@ -313,7 +313,7 @@ Result PageDirectory::map_page(PageIndex vpage, PageIndex ppage, VMProt prot) { entry->data.present = true; entry->data.read_write = prot.write; - entry->data.user = true; + entry->data.user = directory_index < 768; entry->data.set_address(ppage * PAGE_SIZE); MemoryManager::inst().invlpg((void *) (vpage * PAGE_SIZE)); diff --git a/kernel/memory/PageTable.cpp b/kernel/memory/PageTable.cpp index 8c735e38..124199fe 100644 --- a/kernel/memory/PageTable.cpp +++ b/kernel/memory/PageTable.cpp @@ -33,7 +33,7 @@ PageTable::PageTable(size_t vaddr, bool alloc_table): _vaddr(vaddr) { if(alloc_table) { - m_entries_region = MM.alloc_kernel_region(4096); + m_entries_region = MM.alloc_contiguous_kernel_region(4096); _entries = (Entry*) m_entries_region->start(); } } diff --git a/kernel/memory/VMObject.h b/kernel/memory/VMObject.h index 4944ae94..51ffb79d 100644 --- a/kernel/memory/VMObject.h +++ b/kernel/memory/VMObject.h @@ -37,12 +37,24 @@ class VMObject: public kstd::ArcSelf { /** What the object should do when a memory space containing it is forked. **/ virtual ForkAction fork_action() const { return ForkAction::Share; } + /** Lock for manipulating the object. **/ + Mutex& lock() { return m_page_lock; } + + PageIndex& physical_page_index(size_t index) const { + return m_physical_pages[index]; + }; + /** Tries to copy the page at a given index if it is marked CoW. If it is not, EINVAL is returned. **/ Result try_cow_page(PageIndex page); /** Returns whether a page in the object is marked CoW. **/ bool page_is_cow(PageIndex page) const { return m_cow_pages.get(page); }; /** Clones this VMObject using all the same physical pages and properties. **/ virtual ResultRet> clone(); + /** Try to fault in an unmapped page in the object. + * @return Returns true if a new page was mapped in, false if already mapped. **/ + virtual ResultRet try_fault_in_page(PageIndex page) { return Result(EINVAL); }; + /** The number of committed pages this VMObject is responsible for (i.e. memory usage) **/ + virtual size_t num_committed_pages() const { return 0; }; protected: /** Marks every page in this object as CoW, and increases the reference count of all pages by 1. **/ diff --git a/kernel/memory/VMSpace.cpp b/kernel/memory/VMSpace.cpp index 0f5d93cc..5f45bef0 100644 --- a/kernel/memory/VMSpace.cpp +++ b/kernel/memory/VMSpace.cpp @@ -240,45 +240,31 @@ Result VMSpace::try_pagefault(PageFault fault) { } PageIndex error_page = (fault.address - vmRegion->start()) / PAGE_SIZE; - - // Check if the region is a mapped inode. - if(vmRegion->object()->is_inode()) { - PageIndex inode_page = error_page + (vmRegion->object_start() / PAGE_SIZE); - auto inode_object = kstd::static_pointer_cast(vmRegion->object()); - - // Check to see if it needs to be read in - LOCK_N(inode_object->lock(), inode_locker); - if(inode_object->physical_page_index(inode_page)) { - // This page may be marked CoW, so copy it if it is - if(vmRegion->prot().write && inode_object->page_is_cow(inode_page)) { - auto res = vmRegion->m_object->try_cow_page(inode_page); - if(res.is_error()) - return res; - } - - // Or, we may have encountered a race where the page was created by another thread after the fault. - m_page_directory.map(*vmRegion, VirtualRange { inode_page * PAGE_SIZE, PAGE_SIZE }); - return Result(SUCCESS); + PageIndex object_page = error_page + (vmRegion->object_start() / PAGE_SIZE); + auto object = vmRegion->object(); + + // Check to see if it needs to be read in + LOCK_N(object->lock(), object_locker); + if(object->physical_page_index(object_page)) { + // This page may be marked CoW, so copy it if it is + if(vmRegion->prot().write && object->page_is_cow(object_page)) { + auto res = vmRegion->m_object->try_cow_page(object_page); + if(res.is_error()) + return res; } - // Otherwise, read in the page and map it - auto did_read = TRY(inode_object->read_page_if_needed(inode_page)); - ASSERT(inode_object->physical_page_index(inode_page)); - if(did_read) - m_page_directory.map(*vmRegion, VirtualRange { error_page * PAGE_SIZE, PAGE_SIZE }); - + // Or, we may have encountered a race where the page was created by another thread after the fault. + m_page_directory.map(*vmRegion, VirtualRange { object_page * PAGE_SIZE, PAGE_SIZE }); return Result(SUCCESS); } - // CoW if the region is writeable. - if(vmRegion->prot().write) { - auto result = vmRegion->m_object->try_cow_page(error_page); - if(result.is_success()) - m_page_directory.map(*vmRegion, VirtualRange { error_page * PAGE_SIZE, PAGE_SIZE }); - return result; - } + // Otherwise, read in the page and map it + auto did_read = TRY(object->try_fault_in_page(object_page)); + ASSERT(object->physical_page_index(object_page)); + if(did_read) + m_page_directory.map(*vmRegion, VirtualRange { error_page * PAGE_SIZE, PAGE_SIZE }); - return Result(EINVAL); + return Result(SUCCESS); } cur_region = cur_region->next; } diff --git a/kernel/syscall/mem.cpp b/kernel/syscall/mem.cpp index b37dcc7e..69eaafcf 100644 --- a/kernel/syscall/mem.cpp +++ b/kernel/syscall/mem.cpp @@ -169,7 +169,6 @@ Result Process::sys_mmap(UserspacePointer args_ptr) { if(!region) return Result(EINVAL); - m_used_pmem += region->size(); _vm_regions.push_back(region); UserspacePointer(args.addr_p).set((void*) region->start()); return Result(SUCCESS); @@ -182,7 +181,6 @@ int Process::sys_munmap(void* addr, size_t length) { for(size_t i = 0; i < _vm_regions.size(); i++) { /* TODO: Size mismatch? */ if(_vm_regions[i]->start() == (VirtualAddress) addr /* && _vm_regions[i]->size() == length*/) { - m_used_pmem -= _vm_regions[i]->size(); _vm_regions.erase(i); return SUCCESS; } diff --git a/kernel/syscall/thread.cpp b/kernel/syscall/thread.cpp index c9a45198..2f44b930 100644 --- a/kernel/syscall/thread.cpp +++ b/kernel/syscall/thread.cpp @@ -6,7 +6,6 @@ int Process::sys_threadcreate(void* (*entry_func)(void* (*)(void*), void*), void* (*thread_func)(void*), void* arg) { auto thread = kstd::make_shared(_self_ptr, TaskManager::get_new_pid(), entry_func, thread_func, arg); - recalculate_pmem_total(); insert_thread(thread); TaskManager::queue_thread(thread); return thread->tid(); diff --git a/kernel/tasking/Process.cpp b/kernel/tasking/Process.cpp index 76d4a46e..9dd05523 100644 --- a/kernel/tasking/Process.cpp +++ b/kernel/tasking/Process.cpp @@ -77,8 +77,6 @@ ResultRet Process::create_user(const kstd::string& executable_loc, Use for(const auto& region : regions) proc->_vm_regions.push_back(region); - proc->recalculate_pmem_total(); - return proc->_self_ptr; } @@ -214,7 +212,6 @@ Process::Process(Process *to_fork, ThreadRegisters& regs): _user(to_fork->_user) _pgid = to_fork->_pgid; _umask = to_fork->_umask; _tty = to_fork->_tty; - m_used_pmem = to_fork->m_used_pmem; m_used_shmem = to_fork->m_used_shmem; _state = ALIVE; @@ -302,7 +299,7 @@ ResultRet> Process::map_object(kstd::Arc object, V } size_t Process::used_pmem() const { - return m_used_pmem; + return _vm_space->calculate_regular_anonymous_total(); } size_t Process::used_vmem() const { @@ -350,12 +347,6 @@ void Process::alert_thread_died(kstd::Arc thread) { } } -void Process::recalculate_pmem_total() { - if(_is_destroying || !_vm_space) - return; - m_used_pmem = _vm_space->calculate_regular_anonymous_total(); -} - void Process::insert_thread(const kstd::Arc& thread) { LOCK(_thread_lock); _threads[thread->_tid] = thread; diff --git a/kernel/tasking/Process.h b/kernel/tasking/Process.h index 2b945a4e..f8fda20e 100644 --- a/kernel/tasking/Process.h +++ b/kernel/tasking/Process.h @@ -175,7 +175,6 @@ class Process { Process(Process* to_fork, ThreadRegisters& regs); void alert_thread_died(kstd::Arc thread); - void recalculate_pmem_total(); void insert_thread(const kstd::Arc& thread); void remove_thread(const kstd::Arc& thread); @@ -217,7 +216,6 @@ class Process { kstd::Arc _page_directory; kstd::vector> _vm_regions; Mutex m_mem_lock {"Process::Memory"}; - size_t m_used_pmem = 0; size_t m_used_shmem = 0; //Files & Pipes