From 2c4bd44d7e2dfe2cc57fea2cb16bf173abdf43f8 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Fri, 17 Sep 2021 22:01:58 -0700 Subject: [PATCH] kern: support dynamic resource expansion for system heaps/events/sessions. --- .../program/source/smc/secmon_smc_info.cpp | 17 +- .../arch/arm64/kern_k_slab_heap_impl.hpp | 31 +++- .../kern_k_dynamic_resource_manager.hpp | 60 ++++++ .../mesosphere/kern_k_dynamic_slab_heap.hpp | 91 ++++----- .../include/mesosphere/kern_k_event.hpp | 2 +- .../mesosphere/kern_k_light_session.hpp | 2 +- .../mesosphere/kern_k_memory_layout.hpp | 1 + .../mesosphere/kern_k_page_table_manager.hpp | 83 +++------ .../kern_k_page_table_slab_heap.hpp | 93 ++++++++++ .../include/mesosphere/kern_k_process.hpp | 5 +- .../mesosphere/kern_k_resource_limit.hpp | 2 + .../include/mesosphere/kern_k_session.hpp | 2 +- .../mesosphere/kern_k_session_request.hpp | 10 +- .../mesosphere/kern_k_shared_memory.hpp | 2 +- .../include/mesosphere/kern_k_slab_heap.hpp | 173 ++++++++++-------- .../mesosphere/kern_k_target_system.hpp | 15 +- .../mesosphere/kern_k_unused_slab_memory.hpp | 27 +++ .../include/mesosphere/kern_kernel.hpp | 27 ++- .../include/mesosphere/kern_slab_helpers.hpp | 42 ++++- .../source/arch/arm64/kern_k_page_table.cpp | 2 +- .../nintendo/nx/kern_k_device_page_table.cpp | 14 +- .../nintendo/nx/kern_k_system_control.cpp | 1 + .../board/nintendo/nx/kern_secure_monitor.hpp | 17 +- .../source/init/kern_init_slab_setup.cpp | 31 +++- .../source/kern_initial_process.cpp | 6 +- .../source/kern_k_client_port.cpp | 86 ++++++--- .../source/kern_k_dump_object.cpp | 4 +- .../libmesosphere/source/kern_k_event.cpp | 2 +- .../source/kern_k_memory_manager.cpp | 1 + .../source/kern_k_page_table_base.cpp | 2 +- .../libmesosphere/source/kern_k_process.cpp | 23 ++- .../source/kern_k_resource_limit.cpp | 43 ++++- .../source/kern_k_unused_slab_memory.cpp | 159 ++++++++++++++++ .../libmesosphere/source/kern_kernel.cpp | 33 ++-- .../source/svc/kern_svc_event.cpp | 22 ++- .../source/svc/kern_svc_session.cpp | 38 +++- .../source/kern_kernel_instantiations.cpp | 15 +- 37 files changed, 856 insertions(+), 328 deletions(-) create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_dynamic_resource_manager.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_page_table_slab_heap.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_unused_slab_memory.hpp create mode 100644 libraries/libmesosphere/source/kern_k_unused_slab_memory.cpp diff --git a/exosphere/program/source/smc/secmon_smc_info.cpp b/exosphere/program/source/smc/secmon_smc_info.cpp index 3af6b83f8..8690ef056 100644 --- a/exosphere/program/source/smc/secmon_smc_info.cpp +++ b/exosphere/program/source/smc/secmon_smc_info.cpp @@ -33,14 +33,15 @@ namespace ams::secmon::smc { using PhysicalMemorySize = util::BitPack32::Field<16, 2>; /* Kernel view, from libmesosphere. */ - using DebugFillMemory = util::BitPack32::Field<0, 1, bool>; - using EnableUserExceptionHandlers = util::BitPack32::Field; - using EnableUserPmuAccess = util::BitPack32::Field; - using IncreaseThreadResourceLimit = util::BitPack32::Field; - using Reserved4 = util::BitPack32::Field; - using UseSecureMonitorPanicCall = util::BitPack32::Field; - using Reserved9 = util::BitPack32::Field; - using MemorySize = util::BitPack32::Field; /* smc::MemorySize = pkg1::MemorySize */ + using DebugFillMemory = util::BitPack32::Field<0, 1, bool>; + using EnableUserExceptionHandlers = util::BitPack32::Field; + using EnableUserPmuAccess = util::BitPack32::Field; + using IncreaseThreadResourceLimit = util::BitPack32::Field; + using DisableDynamicResourceLimits = util::BitPack32::Field; + using Reserved5 = util::BitPack32::Field; + using UseSecureMonitorPanicCall = util::BitPack32::Field; + using Reserved9 = util::BitPack32::Field; + using MemorySize = util::BitPack32::Field; /* smc::MemorySize = pkg1::MemorySize */ }; constexpr const pkg1::MemorySize DramIdToMemorySize[fuse::DramId_Count] = { diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_slab_heap_impl.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_slab_heap_impl.hpp index c52f28528..05e23c577 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_slab_heap_impl.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_slab_heap_impl.hpp @@ -16,6 +16,7 @@ #pragma once #include #include +#include namespace ams::kern::arch::arm64 { @@ -24,6 +25,32 @@ namespace ams::kern::arch::arm64 { { t.next } -> std::convertible_to; }; + ALWAYS_INLINE bool IsSlabAtomicValid() { + /* Without careful consideration, slab heaps atomics are vulnerable to */ + /* the ABA problem, when doing compare and swap of node pointers. */ + /* We resolve this by using the ARM exclusive monitor; we bundle the */ + /* load and store of the relevant values into a single exclusive monitor */ + /* hold, preventing the ABA problem. */ + /* However, our assembly must do both a load and a store under a single */ + /* hold, at different memory addresses. Considering the case where the */ + /* addresses are distinct but resolve to the same cache set (by chance), */ + /* we can note that under a 1-way associative (direct-mapped) cache */ + /* we would have as a guarantee that the second access would evict the */ + /* cache line from the first access, invalidating our exclusive monitor */ + /* hold. Thus, we require that the cache is not 1-way associative, for */ + /* our implementation to be correct. */ + { + /* Disable interrupts. */ + KScopedInterruptDisable di; + + /* Select L1 cache. */ + cpu::SetCsselrEl1(0); + + /* Check that the L1 cache is not direct-mapped. */ + return cpu::CacheSizeIdRegisterAccessor().GetAssociativity() != 0; + } + } + template requires SlabHeapNode ALWAYS_INLINE T *AllocateFromSlabAtomic(T **head) { u32 tmp; @@ -36,10 +63,7 @@ namespace ams::kern::arch::arm64 { " ldr %[next], [%[node]]\n" " stlxr %w[tmp], %[next], [%[head]]\n" " cbnz %w[tmp], 1b\n" - " b 3f\n" "2:\n" - " clrex\n" - "3:\n" : [tmp]"=&r"(tmp), [node]"=&r"(node), [next]"=&r"(next), [head]"+&r"(head) : : "cc", "memory" @@ -59,7 +83,6 @@ namespace ams::kern::arch::arm64 { " str %[next], [%[node]]\n" " stlxr %w[tmp], %[node], [%[head]]\n" " cbnz %w[tmp], 1b\n" - "2:\n" : [tmp]"=&r"(tmp), [node]"+&r"(node), [next]"=&r"(next), [head]"+&r"(head) : : "cc", "memory" diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_resource_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_resource_manager.hpp new file mode 100644 index 000000000..5bf9eedbe --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_resource_manager.hpp @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern { + + template + class KDynamicResourceManager { + NON_COPYABLE(KDynamicResourceManager); + NON_MOVEABLE(KDynamicResourceManager); + public: + using DynamicSlabType = KDynamicSlabHeap; + private: + KDynamicPageManager *m_page_allocator{}; + DynamicSlabType *m_slab_heap{}; + public: + constexpr KDynamicResourceManager() = default; + + constexpr ALWAYS_INLINE KVirtualAddress GetAddress() const { return m_slab_heap->GetAddress(); } + constexpr ALWAYS_INLINE size_t GetSize() const { return m_slab_heap->GetSize(); } + constexpr ALWAYS_INLINE size_t GetUsed() const { return m_slab_heap->GetUsed(); } + constexpr ALWAYS_INLINE size_t GetPeak() const { return m_slab_heap->GetPeak(); } + constexpr ALWAYS_INLINE size_t GetCount() const { return m_slab_heap->GetCount(); } + + ALWAYS_INLINE void Initialize(KDynamicPageManager *page_allocator, DynamicSlabType *slab_heap) { + m_page_allocator = page_allocator; + m_slab_heap = slab_heap; + } + + T *Allocate() const { + return m_slab_heap->Allocate(m_page_allocator); + } + + void Free(T *t) const { + m_slab_heap->Free(t); + } + }; + + class KBlockInfoManager : public KDynamicResourceManager{}; + class KMemoryBlockSlabManager : public KDynamicResourceManager{}; + + using KBlockInfoSlabHeap = typename KBlockInfoManager::DynamicSlabType; + using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_slab_heap.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_slab_heap.hpp index 7665c4de7..fc6b7d098 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_slab_heap.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_slab_heap.hpp @@ -23,95 +23,71 @@ namespace ams::kern { template - class KDynamicSlabHeap { + class KDynamicSlabHeap : protected impl::KSlabHeapImpl { NON_COPYABLE(KDynamicSlabHeap); NON_MOVEABLE(KDynamicSlabHeap); private: - using Impl = impl::KSlabHeapImpl; using PageBuffer = KDynamicPageManager::PageBuffer; private: - Impl m_impl; - KDynamicPageManager *m_page_allocator; - std::atomic m_used; - std::atomic m_peak; - std::atomic m_count; - KVirtualAddress m_address; - size_t m_size; - private: - ALWAYS_INLINE Impl *GetImpl() { - return std::addressof(m_impl); - } - ALWAYS_INLINE const Impl *GetImpl() const { - return std::addressof(m_impl); - } + std::atomic m_used{}; + std::atomic m_peak{}; + std::atomic m_count{}; + KVirtualAddress m_address{}; + size_t m_size{}; public: - constexpr KDynamicSlabHeap() : m_impl(), m_page_allocator(), m_used(), m_peak(), m_count(), m_address(), m_size() { /* ... */ } + constexpr KDynamicSlabHeap() = default; - constexpr KVirtualAddress GetAddress() const { return m_address; } - constexpr size_t GetSize() const { return m_size; } - constexpr size_t GetUsed() const { return m_used.load(); } - constexpr size_t GetPeak() const { return m_peak.load(); } - constexpr size_t GetCount() const { return m_count.load(); } + constexpr ALWAYS_INLINE KVirtualAddress GetAddress() const { return m_address; } + constexpr ALWAYS_INLINE size_t GetSize() const { return m_size; } + constexpr ALWAYS_INLINE size_t GetUsed() const { return m_used.load(); } + constexpr ALWAYS_INLINE size_t GetPeak() const { return m_peak.load(); } + constexpr ALWAYS_INLINE size_t GetCount() const { return m_count.load(); } - constexpr bool IsInRange(KVirtualAddress addr) const { + constexpr ALWAYS_INLINE bool IsInRange(KVirtualAddress addr) const { return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1; } - void Initialize(KVirtualAddress memory, size_t sz) { - /* Set tracking fields. */ - m_address = memory; - m_count = sz / sizeof(T); - m_size = m_count * sizeof(T); - - /* Free blocks to memory. */ - u8 *cur = GetPointer(m_address + m_size); - for (size_t i = 0; i < sz / sizeof(T); i++) { - cur -= sizeof(T); - this->GetImpl()->Free(cur); - } - } - - void Initialize(KDynamicPageManager *page_allocator) { - m_page_allocator = page_allocator; - m_address = m_page_allocator->GetAddress(); - m_size = m_page_allocator->GetSize(); - } - - void Initialize(KDynamicPageManager *page_allocator, size_t num_objects) { + ALWAYS_INLINE void Initialize(KDynamicPageManager *page_allocator, size_t num_objects) { MESOSPHERE_ASSERT(page_allocator != nullptr); /* Initialize members. */ - this->Initialize(page_allocator); + m_address = page_allocator->GetAddress(); + m_size = page_allocator->GetSize(); + + /* Initialize the base allocator. */ + KSlabHeapImpl::Initialize(); /* Allocate until we have the correct number of objects. */ while (m_count.load() < num_objects) { - auto *allocated = reinterpret_cast(m_page_allocator->Allocate()); + auto *allocated = reinterpret_cast(page_allocator->Allocate()); MESOSPHERE_ABORT_UNLESS(allocated != nullptr); + for (size_t i = 0; i < sizeof(PageBuffer) / sizeof(T); i++) { - this->GetImpl()->Free(allocated + i); + KSlabHeapImpl::Free(allocated + i); } + m_count.fetch_add(sizeof(PageBuffer) / sizeof(T)); } } - T *Allocate() { - T *allocated = reinterpret_cast(this->GetImpl()->Allocate()); + ALWAYS_INLINE T *Allocate(KDynamicPageManager *page_allocator) { + T *allocated = static_cast(KSlabHeapImpl::Allocate()); /* If we successfully allocated and we should clear the node, do so. */ if constexpr (ClearNode) { if (AMS_LIKELY(allocated != nullptr)) { - reinterpret_cast(allocated)->next = nullptr; + reinterpret_cast(allocated)->next = nullptr; } } /* If we fail to allocate, try to get a new page from our next allocator. */ - if (AMS_UNLIKELY(allocated == nullptr)) { - if (m_page_allocator != nullptr) { - allocated = reinterpret_cast(m_page_allocator->Allocate()); + if (AMS_UNLIKELY(allocated == nullptr) ) { + if (page_allocator != nullptr) { + allocated = reinterpret_cast(page_allocator->Allocate()); if (allocated != nullptr) { /* If we succeeded in getting a page, free the rest to our slab. */ for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) { - this->GetImpl()->Free(allocated + i); + KSlabHeapImpl::Free(allocated + i); } m_count.fetch_add(sizeof(PageBuffer) / sizeof(T)); } @@ -135,13 +111,10 @@ namespace ams::kern { return allocated; } - void Free(T *t) { - this->GetImpl()->Free(t); + ALWAYS_INLINE void Free(T *t) { + KSlabHeapImpl::Free(t); m_used.fetch_sub(1); } }; - class KBlockInfoManager : public KDynamicSlabHeap{}; - class KMemoryBlockSlabManager : public KDynamicSlabHeap{}; - } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_event.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_event.hpp index 8f0d98e4c..d228b82ae 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_event.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_event.hpp @@ -21,7 +21,7 @@ namespace ams::kern { - class KEvent final : public KAutoObjectWithSlabHeapAndContainer { + class KEvent final : public KAutoObjectWithSlabHeapAndContainer { MESOSPHERE_AUTOOBJECT_TRAITS(KEvent, KAutoObject); private: KReadableEvent m_readable_event; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_light_session.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_light_session.hpp index fba1b46e2..dde7263b0 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_light_session.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_light_session.hpp @@ -25,7 +25,7 @@ namespace ams::kern { class KClientPort; class KProcess; - class KLightSession final : public KAutoObjectWithSlabHeapAndContainer { + class KLightSession final : public KAutoObjectWithSlabHeapAndContainer { MESOSPHERE_AUTOOBJECT_TRAITS(KLightSession, KAutoObject); private: enum class State : u8 { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp index e38a361dc..569b8062c 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp @@ -144,6 +144,7 @@ namespace ams::kern { static NOINLINE const KMemoryRegion &GetPageTableHeapRegion() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelPtHeap)); } static NOINLINE const KMemoryRegion &GetKernelStackRegion() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelStack)); } static NOINLINE const KMemoryRegion &GetTempRegion() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelTemp)); } + static NOINLINE const KMemoryRegion &GetSlabRegion() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab)); } static NOINLINE const KMemoryRegion &GetKernelTraceBufferRegion() { return Dereference(GetVirtualLinearMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelTraceBuffer)); } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_manager.hpp index fc5652ed1..c1fc5fcb8 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_manager.hpp @@ -15,58 +15,27 @@ */ #pragma once #include -#include -#include +#include +#include namespace ams::kern { - namespace impl { - - class PageTablePage { - private: - u8 m_buffer[PageSize]; - public: - ALWAYS_INLINE PageTablePage() { /* Do not initialize anything. */ } - }; - static_assert(sizeof(PageTablePage) == PageSize); - - } - - class KPageTableManager : public KDynamicSlabHeap { + class KPageTableManager : public KDynamicResourceManager { public: - using RefCount = u16; - static constexpr size_t PageTableSize = sizeof(impl::PageTablePage); - static_assert(PageTableSize == PageSize); + using RefCount = KPageTableSlabHeap::RefCount; + static constexpr size_t PageTableSize = KPageTableSlabHeap::PageTableSize; private: - using BaseHeap = KDynamicSlabHeap; + using BaseHeap = KDynamicResourceManager; private: - RefCount *m_ref_counts; + KPageTableSlabHeap *m_pt_heap{}; public: - static constexpr size_t CalculateReferenceCountSize(size_t size) { - return (size / PageSize) * sizeof(RefCount); - } - public: - constexpr KPageTableManager() : BaseHeap(), m_ref_counts() { /* ... */ } - private: - void Initialize(RefCount *rc) { - m_ref_counts = rc; - for (size_t i = 0; i < this->GetSize() / PageSize; i++) { - m_ref_counts[i] = 0; - } - } + constexpr KPageTableManager() = default; - constexpr RefCount *GetRefCountPointer(KVirtualAddress addr) const { - return std::addressof(m_ref_counts[(addr - this->GetAddress()) / PageSize]); - } - public: - void Initialize(KDynamicPageManager *page_allocator, RefCount *rc) { - BaseHeap::Initialize(page_allocator); - this->Initialize(rc); - } + ALWAYS_INLINE void Initialize(KDynamicPageManager *page_allocator, KPageTableSlabHeap *pt_heap) { + m_pt_heap = pt_heap; - void Initialize(KDynamicPageManager *page_allocator, size_t object_count, RefCount *rc) { - BaseHeap::Initialize(page_allocator, object_count); - this->Initialize(rc); + static_assert(std::derived_from); + BaseHeap::Initialize(page_allocator, pt_heap); } KVirtualAddress Allocate() { @@ -74,33 +43,23 @@ namespace ams::kern { } void Free(KVirtualAddress addr) { - /* Free the page. */ - BaseHeap::Free(GetPointer(addr)); + return BaseHeap::Free(GetPointer(addr)); } - RefCount GetRefCount(KVirtualAddress addr) const { - MESOSPHERE_ASSERT(this->IsInRange(addr)); - return *this->GetRefCountPointer(addr); + ALWAYS_INLINE RefCount GetRefCount(KVirtualAddress addr) const { + return m_pt_heap->GetRefCount(addr); } - void Open(KVirtualAddress addr, int count) { - MESOSPHERE_ASSERT(this->IsInRange(addr)); - - *this->GetRefCountPointer(addr) += count; - - MESOSPHERE_ABORT_UNLESS(this->GetRefCount(addr) > 0); + ALWAYS_INLINE void Open(KVirtualAddress addr, int count) { + return m_pt_heap->Open(addr, count); } - bool Close(KVirtualAddress addr, int count) { - MESOSPHERE_ASSERT(this->IsInRange(addr)); - MESOSPHERE_ABORT_UNLESS(this->GetRefCount(addr) >= count); - - *this->GetRefCountPointer(addr) -= count; - return this->GetRefCount(addr) == 0; + ALWAYS_INLINE bool Close(KVirtualAddress addr, int count) { + return m_pt_heap->Close(addr, count); } - constexpr bool IsInPageTableHeap(KVirtualAddress addr) const { - return this->IsInRange(addr); + constexpr ALWAYS_INLINE bool IsInPageTableHeap(KVirtualAddress addr) const { + return m_pt_heap->IsInRange(addr); } }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_slab_heap.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_slab_heap.hpp new file mode 100644 index 000000000..0667bbd03 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_slab_heap.hpp @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + namespace impl { + + class PageTablePage { + private: + u8 m_buffer[PageSize]; + public: + ALWAYS_INLINE PageTablePage() { /* Do not initialize anything. */ } + }; + static_assert(sizeof(PageTablePage) == PageSize); + + } + + class KPageTableSlabHeap : public KDynamicSlabHeap { + public: + using RefCount = u16; + static constexpr size_t PageTableSize = sizeof(impl::PageTablePage); + static_assert(PageTableSize == PageSize); + private: + using BaseHeap = KDynamicSlabHeap; + private: + RefCount *m_ref_counts{}; + public: + static constexpr ALWAYS_INLINE size_t CalculateReferenceCountSize(size_t size) { + return (size / PageSize) * sizeof(RefCount); + } + public: + constexpr KPageTableSlabHeap() = default; + private: + ALWAYS_INLINE void Initialize(RefCount *rc) { + m_ref_counts = rc; + for (size_t i = 0; i < this->GetSize() / PageSize; i++) { + m_ref_counts[i] = 0; + } + } + + constexpr ALWAYS_INLINE RefCount *GetRefCountPointer(KVirtualAddress addr) const { + return m_ref_counts + ((addr - this->GetAddress()) / PageSize); + } + public: + ALWAYS_INLINE void Initialize(KDynamicPageManager *page_allocator, size_t object_count, RefCount *rc) { + BaseHeap::Initialize(page_allocator, object_count); + this->Initialize(rc); + } + + ALWAYS_INLINE RefCount GetRefCount(KVirtualAddress addr) const { + MESOSPHERE_ASSERT(this->IsInRange(addr)); + return *this->GetRefCountPointer(addr); + } + + ALWAYS_INLINE void Open(KVirtualAddress addr, int count) { + MESOSPHERE_ASSERT(this->IsInRange(addr)); + + *this->GetRefCountPointer(addr) += count; + + MESOSPHERE_ABORT_UNLESS(this->GetRefCount(addr) > 0); + } + + ALWAYS_INLINE bool Close(KVirtualAddress addr, int count) { + MESOSPHERE_ASSERT(this->IsInRange(addr)); + MESOSPHERE_ABORT_UNLESS(this->GetRefCount(addr) >= count); + + *this->GetRefCountPointer(addr) -= count; + return this->GetRefCount(addr) == 0; + } + + constexpr ALWAYS_INLINE bool IsInPageTableHeap(KVirtualAddress addr) const { + return this->IsInRange(addr); + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp index 148d2b7bc..41606d3fc 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include namespace ams::kern { @@ -121,6 +121,9 @@ namespace ams::kern { KMemoryBlockSlabManager m_memory_block_slab_manager{}; KBlockInfoManager m_block_info_manager{}; KPageTableManager m_page_table_manager{}; + KMemoryBlockSlabHeap m_memory_block_heap{}; + KBlockInfoSlabHeap m_block_info_heap{}; + KPageTableSlabHeap m_page_table_heap{}; private: Result Initialize(const ams::svc::CreateProcessParameter ¶ms); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_resource_limit.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_resource_limit.hpp index 822dbb853..5a127f86a 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_resource_limit.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_resource_limit.hpp @@ -47,6 +47,8 @@ namespace ams::kern { Result SetLimitValue(ams::svc::LimitableResource which, s64 value); + void Add(ams::svc::LimitableResource which, s64 value); + bool Reserve(ams::svc::LimitableResource which, s64 value); bool Reserve(ams::svc::LimitableResource which, s64 value, s64 timeout); void Release(ams::svc::LimitableResource which, s64 value); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_session.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_session.hpp index fc88341c1..8c490a761 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_session.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_session.hpp @@ -25,7 +25,7 @@ namespace ams::kern { class KClientPort; class KProcess; - class KSession final : public KAutoObjectWithSlabHeapAndContainer { + class KSession final : public KAutoObjectWithSlabHeapAndContainer { MESOSPHERE_AUTOOBJECT_TRAITS(KSession, KAutoObject); private: enum class State : u8 { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_session_request.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_session_request.hpp index 3707e1372..0deb5ed26 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_session_request.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_session_request.hpp @@ -24,7 +24,7 @@ namespace ams::kern { - class KSessionRequest final : public KSlabAllocated, public KAutoObject, public util::IntrusiveListBaseNode { + class KSessionRequest final : public KSlabAllocated, public KAutoObject, public util::IntrusiveListBaseNode { MESOSPHERE_AUTOOBJECT_TRAITS(KSessionRequest, KAutoObject); public: class SessionMappings { @@ -140,6 +140,14 @@ namespace ams::kern { return req; } + static KSessionRequest *CreateFromUnusedSlabMemory() { + KSessionRequest *req = KSessionRequest::AllocateFromUnusedSlabMemory(); + if (req != nullptr) { + KAutoObject::Create(req); + } + return req; + } + virtual void Destroy() override { this->Finalize(); KSessionRequest::Free(this); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory.hpp index 66acfd42e..5685cc177 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory.hpp @@ -35,7 +35,7 @@ namespace ams::kern { bool m_is_initialized; public: explicit KSharedMemory() - : m_page_group(std::addressof(Kernel::GetBlockInfoManager())), m_resource_limit(nullptr), m_owner_process_id(std::numeric_limits::max()), + : m_page_group(std::addressof(Kernel::GetSystemBlockInfoManager())), m_resource_limit(nullptr), m_owner_process_id(std::numeric_limits::max()), m_owner_perm(ams::svc::MemoryPermission_None), m_remote_perm(ams::svc::MemoryPermission_None), m_is_initialized(false) { /* ... */ diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp index 6afa5c94d..176dcacdc 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp @@ -16,11 +16,13 @@ #pragma once #include #include +#include #if defined(ATMOSPHERE_ARCH_ARM64) #include namespace ams::kern { + using ams::kern::arch::arm64::IsSlabAtomicValid; using ams::kern::arch::arm64::AllocateFromSlabAtomic; using ams::kern::arch::arm64::FreeToSlabAtomic; } @@ -44,78 +46,73 @@ namespace ams::kern { Node *next; }; private: - Node * m_head; - size_t m_obj_size; + Node *m_head{nullptr}; public: - constexpr KSlabHeapImpl() : m_head(nullptr), m_obj_size(0) { MESOSPHERE_ASSERT_THIS(); } + constexpr KSlabHeapImpl() = default; - void Initialize(size_t size) { - MESOSPHERE_INIT_ABORT_UNLESS(m_head == nullptr); - m_obj_size = size; + void Initialize() { + MESOSPHERE_ABORT_UNLESS(m_head == nullptr); + MESOSPHERE_ABORT_UNLESS(IsSlabAtomicValid()); } - Node *GetHead() const { + ALWAYS_INLINE Node *GetHead() const { return m_head; } - size_t GetObjectSize() const { - return m_obj_size; - } - - void *Allocate() { - MESOSPHERE_ASSERT_THIS(); - + ALWAYS_INLINE void *Allocate() { return AllocateFromSlabAtomic(std::addressof(m_head)); } - void Free(void *obj) { - MESOSPHERE_ASSERT_THIS(); - - Node *node = reinterpret_cast(obj); - - return FreeToSlabAtomic(std::addressof(m_head), node); + ALWAYS_INLINE void Free(void *obj) { + return FreeToSlabAtomic(std::addressof(m_head), static_cast(obj)); } }; } - class KSlabHeapBase { + template + class KSlabHeapBase : protected impl::KSlabHeapImpl { NON_COPYABLE(KSlabHeapBase); NON_MOVEABLE(KSlabHeapBase); private: - using Impl = impl::KSlabHeapImpl; + size_t m_obj_size{}; + uintptr_t m_peak{}; + uintptr_t m_start{}; + uintptr_t m_end{}; private: - Impl m_impl; - uintptr_t m_peak; - uintptr_t m_start; - uintptr_t m_end; - private: - ALWAYS_INLINE Impl *GetImpl() { - return std::addressof(m_impl); - } - ALWAYS_INLINE const Impl *GetImpl() const { - return std::addressof(m_impl); + ALWAYS_INLINE void UpdatePeakImpl(uintptr_t obj) { + static_assert(std::atomic_ref::is_always_lock_free); + std::atomic_ref peak_ref(m_peak); + + const uintptr_t alloc_peak = obj + this->GetObjectSize(); + uintptr_t cur_peak = m_peak; + do { + if (alloc_peak <= cur_peak) { + break; + } + } while (!peak_ref.compare_exchange_strong(cur_peak, alloc_peak)); } public: - constexpr KSlabHeapBase() : m_impl(), m_peak(0), m_start(0), m_end(0) { MESOSPHERE_ASSERT_THIS(); } + constexpr KSlabHeapBase() = default; ALWAYS_INLINE bool Contains(uintptr_t address) const { return m_start <= address && address < m_end; } - void InitializeImpl(size_t obj_size, void *memory, size_t memory_size) { - MESOSPHERE_ASSERT_THIS(); - + void Initialize(size_t obj_size, void *memory, size_t memory_size) { /* Ensure we don't initialize a slab using null memory. */ MESOSPHERE_ABORT_UNLESS(memory != nullptr); + /* Set our object size. */ + m_obj_size = obj_size; + /* Initialize the base allocator. */ - this->GetImpl()->Initialize(obj_size); + KSlabHeapImpl::Initialize(); /* Set our tracking variables. */ const size_t num_obj = (memory_size / obj_size); m_start = reinterpret_cast(memory); - m_end = m_start + num_obj * obj_size; + m_end = m_start + num_obj * obj_size; m_peak = m_start; /* Free the objects. */ @@ -123,75 +120,91 @@ namespace ams::kern { for (size_t i = 0; i < num_obj; i++) { cur -= obj_size; - this->GetImpl()->Free(cur); + KSlabHeapImpl::Free(cur); } } - size_t GetSlabHeapSize() const { + ALWAYS_INLINE size_t GetSlabHeapSize() const { return (m_end - m_start) / this->GetObjectSize(); } - size_t GetObjectSize() const { - return this->GetImpl()->GetObjectSize(); + ALWAYS_INLINE size_t GetObjectSize() const { + return m_obj_size; } - void *AllocateImpl() { - MESOSPHERE_ASSERT_THIS(); - - void *obj = this->GetImpl()->Allocate(); + ALWAYS_INLINE void *Allocate() { + void *obj = KSlabHeapImpl::Allocate(); /* Track the allocated peak. */ #if defined(MESOSPHERE_BUILD_FOR_DEBUGGING) if (AMS_LIKELY(obj != nullptr)) { - static_assert(std::atomic_ref::is_always_lock_free); - std::atomic_ref peak_ref(m_peak); - - const uintptr_t alloc_peak = reinterpret_cast(obj) + this->GetObjectSize(); - uintptr_t cur_peak = m_peak; - do { - if (alloc_peak <= cur_peak) { - break; + if constexpr (SupportDynamicExpansion) { + if (this->Contains(reinterpret_cast(obj))) { + this->UpdatePeakImpl(reinterpret_cast(obj)); + } else { + this->UpdatePeakImpl(reinterpret_cast(m_end) - this->GetObjectSize()); } - } while (!peak_ref.compare_exchange_strong(cur_peak, alloc_peak)); + } else { + this->UpdatePeakImpl(reinterpret_cast(obj)); + } } #endif return obj; } - void FreeImpl(void *obj) { - MESOSPHERE_ASSERT_THIS(); - + ALWAYS_INLINE void Free(void *obj) { /* Don't allow freeing an object that wasn't allocated from this heap. */ - MESOSPHERE_ABORT_UNLESS(this->Contains(reinterpret_cast(obj))); + const bool contained = this->Contains(reinterpret_cast(obj)); + if constexpr (SupportDynamicExpansion) { + const bool is_slab = KMemoryLayout::GetSlabRegion().Contains(reinterpret_cast(obj)); + MESOSPHERE_ABORT_UNLESS(contained || is_slab); + } else { + MESOSPHERE_ABORT_UNLESS(contained); + } - this->GetImpl()->Free(obj); + KSlabHeapImpl::Free(obj); } - size_t GetObjectIndexImpl(const void *obj) const { + ALWAYS_INLINE size_t GetObjectIndex(const void *obj) const { + if constexpr (SupportDynamicExpansion) { + if (!this->Contains(reinterpret_cast(obj))) { + return std::numeric_limits::max(); + } + } + return (reinterpret_cast(obj) - m_start) / this->GetObjectSize(); } - size_t GetPeakIndex() const { - return this->GetObjectIndexImpl(reinterpret_cast(m_peak)); + ALWAYS_INLINE size_t GetPeakIndex() const { + return this->GetObjectIndex(reinterpret_cast(m_peak)); } - uintptr_t GetSlabHeapAddress() const { + ALWAYS_INLINE uintptr_t GetSlabHeapAddress() const { return m_start; } - size_t GetNumRemaining() const { + ALWAYS_INLINE size_t GetNumRemaining() const { size_t remaining = 0; /* Only calculate the number of remaining objects under debug configuration. */ #if defined(MESOSPHERE_BUILD_FOR_DEBUGGING) while (true) { - auto *cur = this->GetImpl()->GetHead(); + auto *cur = this->GetHead(); remaining = 0; - while (this->Contains(reinterpret_cast(cur))) { - ++remaining; - cur = cur->next; + if constexpr (SupportDynamicExpansion) { + const auto &slab_region = KMemoryLayout::GetSlabRegion(); + + while (this->Contains(reinterpret_cast(cur)) || slab_region.Contains(reinterpret_cast(cur))) { + ++remaining; + cur = cur->next; + } + } else { + while (this->Contains(reinterpret_cast(cur))) { + ++remaining; + cur = cur->next; + } } if (cur == nullptr) { @@ -204,29 +217,31 @@ namespace ams::kern { } }; - template - class KSlabHeap : public KSlabHeapBase { + template + class KSlabHeap : public KSlabHeapBase { + private: + using BaseHeap = KSlabHeapBase; public: - constexpr KSlabHeap() : KSlabHeapBase() { /* ... */ } + constexpr KSlabHeap() = default; void Initialize(void *memory, size_t memory_size) { - this->InitializeImpl(sizeof(T), memory, memory_size); + BaseHeap::Initialize(sizeof(T), memory, memory_size); } - T *Allocate() { - T *obj = reinterpret_cast(this->AllocateImpl()); + ALWAYS_INLINE T *Allocate() { + T *obj = static_cast(BaseHeap::Allocate()); if (AMS_LIKELY(obj != nullptr)) { std::construct_at(obj); } return obj; } - void Free(T *obj) { - this->FreeImpl(obj); + ALWAYS_INLINE void Free(T *obj) { + BaseHeap::Free(obj); } - size_t GetObjectIndex(const T *obj) const { - return this->GetObjectIndexImpl(obj); + ALWAYS_INLINE size_t GetObjectIndex(const T *obj) const { + return BaseHeap::GetObjectIndex(obj); } }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_target_system.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_target_system.hpp index 2134f830f..6bc2ca5d4 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_target_system.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_target_system.hpp @@ -23,12 +23,13 @@ namespace ams::kern { private: friend class KSystemControl; private: - static inline bool s_is_debug_mode; - static inline bool s_enable_debug_logging; - static inline bool s_enable_user_exception_handlers; - static inline bool s_enable_debug_memory_fill; - static inline bool s_enable_user_pmu_access; - static inline bool s_enable_kernel_debugging; + static inline constinit bool s_is_debug_mode; + static inline constinit bool s_enable_debug_logging; + static inline constinit bool s_enable_user_exception_handlers; + static inline constinit bool s_enable_debug_memory_fill; + static inline constinit bool s_enable_user_pmu_access; + static inline constinit bool s_enable_kernel_debugging; + static inline constinit bool s_enable_dynamic_resource_limits; private: static ALWAYS_INLINE void SetIsDebugMode(bool en) { s_is_debug_mode = en; } static ALWAYS_INLINE void EnableDebugLogging(bool en) { s_enable_debug_logging = en; } @@ -36,6 +37,7 @@ namespace ams::kern { static ALWAYS_INLINE void EnableDebugMemoryFill(bool en) { s_enable_debug_memory_fill = en; } static ALWAYS_INLINE void EnableUserPmuAccess(bool en) { s_enable_user_pmu_access = en; } static ALWAYS_INLINE void EnableKernelDebugging(bool en) { s_enable_kernel_debugging = en; } + static ALWAYS_INLINE void EnableDynamicResourceLimits(bool en) { s_enable_dynamic_resource_limits = en; } public: static ALWAYS_INLINE bool IsDebugMode() { return s_is_debug_mode; } static ALWAYS_INLINE bool IsDebugLoggingEnabled() { return s_enable_debug_logging; } @@ -43,6 +45,7 @@ namespace ams::kern { static ALWAYS_INLINE bool IsDebugMemoryFillEnabled() { return s_enable_debug_memory_fill; } static ALWAYS_INLINE bool IsUserPmuAccessEnabled() { return s_enable_user_pmu_access; } static ALWAYS_INLINE bool IsKernelDebuggingEnabled() { return s_enable_kernel_debugging; } + static ALWAYS_INLINE bool IsDynamicResourceLimitsEnabled() { return s_enable_dynamic_resource_limits; } }; } \ No newline at end of file diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_unused_slab_memory.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_unused_slab_memory.hpp new file mode 100644 index 000000000..e1e815545 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_unused_slab_memory.hpp @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern { + + /* Utilities to allocate/free memory from the "unused" gaps between slab heaps. */ + /* See KTargetSystem::IsDynamicResourceLimitsEnabled() usage for more context. */ + KVirtualAddress AllocateUnusedSlabMemory(size_t size, size_t alignment); + void FreeUnusedSlabMemory(KVirtualAddress address, size_t size); + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_kernel.hpp b/libraries/libmesosphere/include/mesosphere/kern_kernel.hpp index 3d7fdffe0..679f9ee29 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_kernel.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_kernel.hpp @@ -63,14 +63,21 @@ namespace ams::kern { static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000; static constexpr size_t SystemMemoryBlockSlabHeapSize = 10000; static constexpr size_t BlockInfoSlabHeapSize = 4000; + static constexpr size_t ReservedDynamicPageCount = 70; private: static State s_state; static KResourceLimit s_system_resource_limit; static KMemoryManager s_memory_manager; - static KPageTableManager s_page_table_manager; + static KPageTableSlabHeap s_page_table_heap; + static KMemoryBlockSlabHeap s_app_memory_block_heap; + static KMemoryBlockSlabHeap s_sys_memory_block_heap; + static KBlockInfoSlabHeap s_block_info_heap; + static KPageTableManager s_app_page_table_manager; + static KPageTableManager s_sys_page_table_manager; static KMemoryBlockSlabManager s_app_memory_block_manager; static KMemoryBlockSlabManager s_sys_memory_block_manager; - static KBlockInfoManager s_block_info_manager; + static KBlockInfoManager s_app_block_info_manager; + static KBlockInfoManager s_sys_block_info_manager; static KSupervisorPageTable s_supervisor_page_table; static KUnsafeMemory s_unsafe_memory; static KWorkerTaskManager s_worker_task_managers[KWorkerTaskManager::WorkerType_Count]; @@ -130,12 +137,20 @@ namespace ams::kern { return s_sys_memory_block_manager; } - static ALWAYS_INLINE KBlockInfoManager &GetBlockInfoManager() { - return s_block_info_manager; + static ALWAYS_INLINE KBlockInfoManager &GetApplicationBlockInfoManager() { + return s_app_block_info_manager; } - static ALWAYS_INLINE KPageTableManager &GetPageTableManager() { - return s_page_table_manager; + static ALWAYS_INLINE KBlockInfoManager &GetSystemBlockInfoManager() { + return s_sys_block_info_manager; + } + + static ALWAYS_INLINE KPageTableManager &GetApplicationPageTableManager() { + return s_app_page_table_manager; + } + + static ALWAYS_INLINE KPageTableManager &GetSystemPageTableManager() { + return s_sys_page_table_manager; } static ALWAYS_INLINE KSupervisorPageTable &GetKernelPageTable() { diff --git a/libraries/libmesosphere/include/mesosphere/kern_slab_helpers.hpp b/libraries/libmesosphere/include/mesosphere/kern_slab_helpers.hpp index 8a6b96aa7..c5ddad1e1 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_slab_helpers.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_slab_helpers.hpp @@ -18,15 +18,16 @@ #include #include #include +#include namespace ams::kern { - template + template class KSlabAllocated { private: - static inline KSlabHeap s_slab_heap; + static constinit inline KSlabHeap s_slab_heap; public: - constexpr KSlabAllocated() { /* ... */ } + constexpr KSlabAllocated() = default; size_t GetSlabIndex() const { return s_slab_heap.GetIndex(static_cast(this)); @@ -36,14 +37,25 @@ namespace ams::kern { s_slab_heap.Initialize(memory, memory_size); } - static ALWAYS_INLINE Derived *Allocate() { + static Derived *Allocate() { return s_slab_heap.Allocate(); } - static ALWAYS_INLINE void Free(Derived *obj) { + static void Free(Derived *obj) { s_slab_heap.Free(obj); } + template::type> + static Derived *AllocateFromUnusedSlabMemory() { + static_assert(Enable == SupportDynamicExpansion); + + Derived * const obj = GetPointer(AllocateUnusedSlabMemory(sizeof(Derived), alignof(Derived))); + if (AMS_LIKELY(obj != nullptr)) { + std::construct_at(obj); + } + return obj; + } + static size_t GetObjectSize() { return s_slab_heap.GetObjectSize(); } static size_t GetSlabHeapSize() { return s_slab_heap.GetSlabHeapSize(); } static size_t GetPeakIndex() { return s_slab_heap.GetPeakIndex(); } @@ -52,12 +64,12 @@ namespace ams::kern { static size_t GetNumRemaining() { return s_slab_heap.GetNumRemaining(); } }; - template + template class KAutoObjectWithSlabHeapAndContainer : public Base { static_assert(std::is_base_of::value); private: - static inline KSlabHeap s_slab_heap; - static inline KAutoObjectWithListContainer s_container; + static constinit inline KSlabHeap s_slab_heap; + static constinit inline KAutoObjectWithListContainer s_container; private: static ALWAYS_INLINE Derived *Allocate() { return s_slab_heap.Allocate(); @@ -73,7 +85,7 @@ namespace ams::kern { ALWAYS_INLINE ~ListAccessor() { /* ... */ } }; public: - constexpr KAutoObjectWithSlabHeapAndContainer() : Base() { /* ... */ } + constexpr KAutoObjectWithSlabHeapAndContainer() = default; virtual void Destroy() override { const bool is_initialized = this->IsInitialized(); @@ -109,6 +121,18 @@ namespace ams::kern { return obj; } + template::type> + static Derived *CreateFromUnusedSlabMemory() { + static_assert(Enable == SupportDynamicExpansion); + + Derived * const obj = GetPointer(AllocateUnusedSlabMemory(sizeof(Derived), alignof(Derived))); + if (AMS_LIKELY(obj != nullptr)) { + std::construct_at(obj); + KAutoObject::Create(obj); + } + return obj; + } + static void Register(Derived *obj) { return s_container.Register(obj); } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index a6a025696..2b8898eed 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -166,7 +166,7 @@ namespace ams::kern::arch::arm64 { Result KPageTable::InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end) { /* Initialize basic fields. */ m_asid = 0; - m_manager = std::addressof(Kernel::GetPageTableManager()); + m_manager = std::addressof(Kernel::GetSystemPageTableManager()); /* Allocate a page for ttbr. */ const u64 asid_tag = (static_cast(m_asid) << 48ul); diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp index 6c4e79a02..41ae80e01 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp @@ -650,11 +650,11 @@ namespace ams::kern::board::nintendo::nx { g_memory_controller_address = KMemoryLayout::GetDevicePhysicalAddress(KMemoryRegionType_MemoryController); /* Allocate a page to use as a reserved/no device table. */ - const KVirtualAddress table_virt_addr = Kernel::GetPageTableManager().Allocate(); + const KVirtualAddress table_virt_addr = Kernel::GetSystemPageTableManager().Allocate(); MESOSPHERE_ABORT_UNLESS(table_virt_addr != Null); const KPhysicalAddress table_phys_addr = GetPageTablePhysicalAddress(table_virt_addr); MESOSPHERE_ASSERT(IsValidPhysicalAddress(table_phys_addr)); - Kernel::GetPageTableManager().Open(table_virt_addr, 1); + Kernel::GetSystemPageTableManager().Open(table_virt_addr, 1); /* Clear the page and save it. */ /* NOTE: Nintendo does not check the result of StoreDataCache. */ @@ -779,7 +779,7 @@ namespace ams::kern::board::nintendo::nx { const size_t end_index = (space_address + space_size - 1) / DeviceRegionSize; /* Get the page table manager. */ - auto &ptm = Kernel::GetPageTableManager(); + auto &ptm = Kernel::GetSystemPageTableManager(); /* Clear the tables. */ static_assert(TableCount == (1ul << DeviceVirtualAddressBits) / DeviceRegionSize); @@ -840,7 +840,7 @@ namespace ams::kern::board::nintendo::nx { void KDevicePageTable::Finalize() { /* Get the page table manager. */ - auto &ptm = Kernel::GetPageTableManager(); + auto &ptm = Kernel::GetSystemPageTableManager(); /* Detach from all devices. */ { @@ -1017,7 +1017,7 @@ namespace ams::kern::board::nintendo::nx { /* Get the memory manager and page table manager. */ KMemoryManager &mm = Kernel::GetMemoryManager(); - KPageTableManager &ptm = Kernel::GetPageTableManager(); + KPageTableManager &ptm = Kernel::GetSystemPageTableManager(); /* Cache permissions. */ const bool read = (device_perm & ams::svc::MemoryPermission_Read) != 0; @@ -1181,10 +1181,10 @@ namespace ams::kern::board::nintendo::nx { /* Get the memory manager and page table manager. */ KMemoryManager &mm = Kernel::GetMemoryManager(); - KPageTableManager &ptm = Kernel::GetPageTableManager(); + KPageTableManager &ptm = Kernel::GetSystemPageTableManager(); /* Make a page group for the pages we're closing. */ - KPageGroup pg(std::addressof(Kernel::GetBlockInfoManager())); + KPageGroup pg(std::addressof(Kernel::GetSystemBlockInfoManager())); /* Walk the directory. */ u64 remaining = size; diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp index 8768e0fa1..220857520 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp @@ -459,6 +459,7 @@ namespace ams::kern::board::nintendo::nx { KTargetSystem::EnableDebugMemoryFill(kernel_config.Get()); KTargetSystem::EnableUserExceptionHandlers(kernel_config.Get()); + KTargetSystem::EnableDynamicResourceLimits(!kernel_config.Get()); KTargetSystem::EnableUserPmuAccess(kernel_config.Get()); g_call_smc_on_panic = kernel_config.Get(); diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.hpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.hpp index 757323a39..05bb3a1a0 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.hpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.hpp @@ -80,14 +80,15 @@ namespace ams::kern::board::nintendo::nx::smc { }; struct KernelConfiguration { - using DebugFillMemory = util::BitPack32::Field<0, 1, bool>; - using EnableUserExceptionHandlers = util::BitPack32::Field; - using EnableUserPmuAccess = util::BitPack32::Field; - using IncreaseThreadResourceLimit = util::BitPack32::Field; - using Reserved4 = util::BitPack32::Field; - using UseSecureMonitorPanicCall = util::BitPack32::Field; - using Reserved9 = util::BitPack32::Field; - using MemorySize = util::BitPack32::Field; + using DebugFillMemory = util::BitPack32::Field<0, 1, bool>; + using EnableUserExceptionHandlers = util::BitPack32::Field; + using EnableUserPmuAccess = util::BitPack32::Field; + using IncreaseThreadResourceLimit = util::BitPack32::Field; + using DisableDynamicResourceLimits = util::BitPack32::Field; + using Reserved5 = util::BitPack32::Field; + using UseSecureMonitorPanicCall = util::BitPack32::Field; + using Reserved9 = util::BitPack32::Field; + using MemorySize = util::BitPack32::Field; }; enum UserRebootType { diff --git a/libraries/libmesosphere/source/init/kern_init_slab_setup.cpp b/libraries/libmesosphere/source/init/kern_init_slab_setup.cpp index 733abc300..c52eff90a 100644 --- a/libraries/libmesosphere/source/init/kern_init_slab_setup.cpp +++ b/libraries/libmesosphere/source/init/kern_init_slab_setup.cpp @@ -173,8 +173,9 @@ namespace ams::kern::init { } void InitializeSlabHeaps() { - /* Get the start of the slab region, since that's where we'll be working. */ - KVirtualAddress address = KMemoryLayout::GetSlabRegionAddress(); + /* Get the slab region, since that's where we'll be working. */ + const KMemoryRegion &slab_region = KMemoryLayout::GetSlabRegion(); + KVirtualAddress address = slab_region.GetAddress(); /* Initialize slab type array to be in sorted order. */ KSlabType slab_types[KSlabType_Count]; @@ -202,13 +203,21 @@ namespace ams::kern::init { } } + /* Track the gaps, so that we can free them to the unused slab tree. */ + KVirtualAddress gap_start = address; + size_t gap_size = 0; + for (size_t i = 0; i < util::size(slab_types); i++) { /* Add the random gap to the address. */ - address += (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1]; + const auto cur_gap = (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1]; + address += cur_gap; + gap_size += cur_gap; - #define INITIALIZE_SLAB_HEAP(NAME, COUNT, ...) \ - case KSlabType_##NAME: \ - address = InitializeSlabHeap(address, COUNT); \ + #define INITIALIZE_SLAB_HEAP(NAME, COUNT, ...) \ + case KSlabType_##NAME: \ + if (COUNT > 0) { \ + address = InitializeSlabHeap(address, COUNT); \ + } \ break; /* Initialize the slabheap. */ @@ -218,7 +227,17 @@ namespace ams::kern::init { /* If we somehow get an invalid type, abort. */ MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); } + + /* If we've hit the end of a gap, free it. */ + if (gap_start + gap_size != address) { + FreeUnusedSlabMemory(gap_start, gap_size); + gap_start = address; + gap_size = 0; + } } + + /* Free the end of the slab region. */ + FreeUnusedSlabMemory(gap_start, gap_size + (slab_region.GetEndAddress() - GetInteger(address))); } } \ No newline at end of file diff --git a/libraries/libmesosphere/source/kern_initial_process.cpp b/libraries/libmesosphere/source/kern_initial_process.cpp index 2e1507a43..af198ee67 100644 --- a/libraries/libmesosphere/source/kern_initial_process.cpp +++ b/libraries/libmesosphere/source/kern_initial_process.cpp @@ -128,13 +128,13 @@ namespace ams::kern { KProcess *new_process = nullptr; { /* Make page groups to represent the data. */ - KPageGroup pg(std::addressof(Kernel::GetBlockInfoManager())); - KPageGroup workaround_pg(std::addressof(Kernel::GetBlockInfoManager())); + KPageGroup pg(std::addressof(Kernel::GetSystemBlockInfoManager())); + KPageGroup workaround_pg(std::addressof(Kernel::GetSystemBlockInfoManager())); /* Populate the page group to represent the data. */ { /* Allocate the previously unreserved pages. */ - KPageGroup unreserve_pg(std::addressof(Kernel::GetBlockInfoManager())); + KPageGroup unreserve_pg(std::addressof(Kernel::GetSystemBlockInfoManager())); MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(unreserve_pg), unreserved_size / PageSize, KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront))); /* Add the previously reserved pages. */ diff --git a/libraries/libmesosphere/source/kern_k_client_port.cpp b/libraries/libmesosphere/source/kern_k_client_port.cpp index 76ad3a373..ec0c9ea9c 100644 --- a/libraries/libmesosphere/source/kern_k_client_port.cpp +++ b/libraries/libmesosphere/source/kern_k_client_port.cpp @@ -62,11 +62,46 @@ namespace ams::kern { Result KClientPort::CreateSession(KClientSession **out) { MESOSPHERE_ASSERT_THIS(); + /* Declare the session we're going to allocate. */ + KSession *session; + /* Reserve a new session from the resource limit. */ KScopedResourceReservation session_reservation(GetCurrentProcessPointer(), ams::svc::LimitableResource_SessionCountMax); - R_UNLESS(session_reservation.Succeeded(), svc::ResultLimitReached()); + if (session_reservation.Succeeded()) { + /* Allocate a session normally. */ + session = KSession::Create(); + } else { + /* We couldn't reserve a session. Check that we support dynamically expanding the resource limit. */ + R_UNLESS(GetCurrentProcess().GetResourceLimit() == std::addressof(Kernel::GetSystemResourceLimit()), svc::ResultLimitReached()); + R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), svc::ResultLimitReached()); + + /* Try to allocate a session from unused slab memory. */ + session = KSession::CreateFromUnusedSlabMemory(); + R_UNLESS(session != nullptr, svc::ResultLimitReached()); + + /* Ensure that if we fail to allocate our session requests, we close the session we created. */ + auto session_guard = SCOPE_GUARD { session->Close(); }; + { + /* We want to add two KSessionRequests to the heap, to prevent request exhaustion. */ + for (size_t i = 0; i < 2; ++i) { + KSessionRequest *request = KSessionRequest::CreateFromUnusedSlabMemory(); + R_UNLESS(request != nullptr, svc::ResultLimitReached()); + + request->Close(); + } + } + session_guard.Cancel(); + + /* We successfully allocated a session, so add the object we allocated to the resource limit. */ + Kernel::GetSystemResourceLimit().Add(ams::svc::LimitableResource_SessionCountMax, 1); + } + + /* Check that we successfully created a session. */ + R_UNLESS(session != nullptr, svc::ResultOutOfResource()); + /* Update the session counts. */ + auto count_guard = SCOPE_GUARD { session->Close(); }; { /* Atomically increment the number of sessions. */ s32 new_sessions; @@ -90,18 +125,7 @@ namespace ams::kern { } while (!m_peak_sessions.compare_exchange_weak(peak, new_sessions, std::memory_order_relaxed)); } } - - /* Create a new session. */ - KSession *session = KSession::Create(); - if (session == nullptr) { - /* Decrement the session count. */ - const auto prev = m_num_sessions--; - if (prev == m_max_sessions) { - this->NotifyAvailable(); - } - - return svc::ResultOutOfResource(); - } + count_guard.Cancel(); /* Initialize the session. */ session->Initialize(this, m_parent->GetName()); @@ -128,11 +152,32 @@ namespace ams::kern { Result KClientPort::CreateLightSession(KLightClientSession **out) { MESOSPHERE_ASSERT_THIS(); + /* Declare the session we're going to allocate. */ + KLightSession *session; + /* Reserve a new session from the resource limit. */ KScopedResourceReservation session_reservation(GetCurrentProcessPointer(), ams::svc::LimitableResource_SessionCountMax); - R_UNLESS(session_reservation.Succeeded(), svc::ResultLimitReached()); + if (session_reservation.Succeeded()) { + /* Allocate a session normally. */ + session = KLightSession::Create(); + } else { + /* We couldn't reserve a session. Check that we support dynamically expanding the resource limit. */ + R_UNLESS(GetCurrentProcess().GetResourceLimit() == std::addressof(Kernel::GetSystemResourceLimit()), svc::ResultLimitReached()); + R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), svc::ResultLimitReached()); + + /* Try to allocate a session from unused slab memory. */ + session = KLightSession::CreateFromUnusedSlabMemory(); + R_UNLESS(session != nullptr, svc::ResultLimitReached()); + + /* We successfully allocated a session, so add the object we allocated to the resource limit. */ + Kernel::GetSystemResourceLimit().Add(ams::svc::LimitableResource_SessionCountMax, 1); + } + + /* Check that we successfully created a session. */ + R_UNLESS(session != nullptr, svc::ResultOutOfResource()); /* Update the session counts. */ + auto count_guard = SCOPE_GUARD { session->Close(); }; { /* Atomically increment the number of sessions. */ s32 new_sessions; @@ -156,18 +201,7 @@ namespace ams::kern { } while (!m_peak_sessions.compare_exchange_weak(peak, new_sessions, std::memory_order_relaxed)); } } - - /* Create a new session. */ - KLightSession *session = KLightSession::Create(); - if (session == nullptr) { - /* Decrement the session count. */ - const auto prev = m_num_sessions--; - if (prev == m_max_sessions) { - this->NotifyAvailable(); - } - - return svc::ResultOutOfResource(); - } + count_guard.Cancel(); /* Initialize the session. */ session->Initialize(this, m_parent->GetName()); diff --git a/libraries/libmesosphere/source/kern_k_dump_object.cpp b/libraries/libmesosphere/source/kern_k_dump_object.cpp index 73e7af37a..024780003 100644 --- a/libraries/libmesosphere/source/kern_k_dump_object.cpp +++ b/libraries/libmesosphere/source/kern_k_dump_object.cpp @@ -369,14 +369,14 @@ namespace ams::kern::KDumpObject { /* KBlockInfo slab. */ { MESOSPHERE_RELEASE_LOG("KBlockInfo\n"); - auto &manager = Kernel::GetBlockInfoManager(); + auto &manager = Kernel::GetSystemBlockInfoManager(); MESOSPHERE_RELEASE_LOG(" Cur=%6zu Peak=%6zu Max=%6zu\n", manager.GetUsed(), manager.GetPeak(), manager.GetCount()); } /* Page Table slab. */ { MESOSPHERE_RELEASE_LOG("Page Table\n"); - auto &manager = Kernel::GetPageTableManager(); + auto &manager = Kernel::GetSystemPageTableManager(); MESOSPHERE_RELEASE_LOG(" Cur=%6zu Peak=%6zu Max=%6zu\n", manager.GetUsed(), manager.GetPeak(), manager.GetCount()); } } diff --git a/libraries/libmesosphere/source/kern_k_event.cpp b/libraries/libmesosphere/source/kern_k_event.cpp index 7ff862a74..9a49f5ac6 100644 --- a/libraries/libmesosphere/source/kern_k_event.cpp +++ b/libraries/libmesosphere/source/kern_k_event.cpp @@ -37,7 +37,7 @@ namespace ams::kern { void KEvent::Finalize() { MESOSPHERE_ASSERT_THIS(); - KAutoObjectWithSlabHeapAndContainer::Finalize(); + KAutoObjectWithSlabHeapAndContainer::Finalize(); } Result KEvent::Signal() { diff --git a/libraries/libmesosphere/source/kern_k_memory_manager.cpp b/libraries/libmesosphere/source/kern_k_memory_manager.cpp index 36434ee36..47e124a56 100644 --- a/libraries/libmesosphere/source/kern_k_memory_manager.cpp +++ b/libraries/libmesosphere/source/kern_k_memory_manager.cpp @@ -37,6 +37,7 @@ namespace ams::kern { void KMemoryManager::Initialize(KVirtualAddress management_region, size_t management_region_size) { /* Clear the management region to zero. */ + const KVirtualAddress management_region_end = management_region + management_region_size; std::memset(GetVoidPointer(management_region), 0, management_region_size); diff --git a/libraries/libmesosphere/source/kern_k_page_table_base.cpp b/libraries/libmesosphere/source/kern_k_page_table_base.cpp index c331bcea8..ecbbb0cfd 100644 --- a/libraries/libmesosphere/source/kern_k_page_table_base.cpp +++ b/libraries/libmesosphere/source/kern_k_page_table_base.cpp @@ -106,7 +106,7 @@ namespace ams::kern { m_mapped_ipc_server_memory = 0; m_memory_block_slab_manager = std::addressof(Kernel::GetSystemMemoryBlockManager()); - m_block_info_manager = std::addressof(Kernel::GetBlockInfoManager()); + m_block_info_manager = std::addressof(Kernel::GetSystemBlockInfoManager()); m_resource_limit = std::addressof(Kernel::GetSystemResourceLimit()); m_allocate_option = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront); diff --git a/libraries/libmesosphere/source/kern_k_process.cpp b/libraries/libmesosphere/source/kern_k_process.cpp index ba1defba9..b69e1eeeb 100644 --- a/libraries/libmesosphere/source/kern_k_process.cpp +++ b/libraries/libmesosphere/source/kern_k_process.cpp @@ -260,8 +260,8 @@ namespace ams::kern { const bool enable_das_merge = (params.flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0; const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication) != 0; auto *mem_block_manager = std::addressof(is_app ? Kernel::GetApplicationMemoryBlockManager() : Kernel::GetSystemMemoryBlockManager()); - auto *block_info_manager = std::addressof(Kernel::GetBlockInfoManager()); - auto *pt_manager = std::addressof(Kernel::GetPageTableManager()); + auto *block_info_manager = std::addressof(is_app ? Kernel::GetApplicationBlockInfoManager() : Kernel::GetSystemBlockInfoManager()); + auto *pt_manager = std::addressof(is_app ? Kernel::GetApplicationPageTableManager() : Kernel::GetSystemPageTableManager()); R_TRY(m_page_table.Initialize(m_process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, params.code_num_pages * PageSize, mem_block_manager, block_info_manager, pt_manager, res_limit)); } auto pt_guard = SCOPE_GUARD { m_page_table.Finalize(); }; @@ -326,12 +326,17 @@ namespace ams::kern { MESOSPHERE_ASSERT(m_system_resource_address != Null); m_system_resource_num_pages = system_resource_num_pages; - /* Initialize managers. */ - const size_t rc_size = util::AlignUp(KPageTableManager::CalculateReferenceCountSize(system_resource_size), PageSize); + /* Initialize slab heaps. */ + const size_t rc_size = util::AlignUp(KPageTableSlabHeap::CalculateReferenceCountSize(system_resource_size), PageSize); m_dynamic_page_manager.Initialize(m_system_resource_address + rc_size, system_resource_size - rc_size); - m_page_table_manager.Initialize(std::addressof(m_dynamic_page_manager), GetPointer(m_system_resource_address)); - m_memory_block_slab_manager.Initialize(std::addressof(m_dynamic_page_manager)); - m_block_info_manager.Initialize(std::addressof(m_dynamic_page_manager)); + m_page_table_heap.Initialize(std::addressof(m_dynamic_page_manager), 0, GetPointer(m_system_resource_address)); + m_memory_block_heap.Initialize(std::addressof(m_dynamic_page_manager), 0); + m_block_info_heap.Initialize(std::addressof(m_dynamic_page_manager), 0); + + /* Initialize managers. */ + m_page_table_manager.Initialize(std::addressof(m_dynamic_page_manager), std::addressof(m_page_table_heap)); + m_memory_block_slab_manager.Initialize(std::addressof(m_dynamic_page_manager), std::addressof(m_memory_block_heap)); + m_block_info_manager.Initialize(std::addressof(m_dynamic_page_manager), std::addressof(m_block_info_heap)); mem_block_manager = std::addressof(m_memory_block_slab_manager); block_info_manager = std::addressof(m_block_info_manager); @@ -339,8 +344,8 @@ namespace ams::kern { } else { const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication); mem_block_manager = std::addressof(is_app ? Kernel::GetApplicationMemoryBlockManager() : Kernel::GetSystemMemoryBlockManager()); - block_info_manager = std::addressof(Kernel::GetBlockInfoManager()); - pt_manager = std::addressof(Kernel::GetPageTableManager()); + block_info_manager = std::addressof(is_app ? Kernel::GetApplicationBlockInfoManager() : Kernel::GetSystemBlockInfoManager()); + pt_manager = std::addressof(is_app ? Kernel::GetApplicationPageTableManager() : Kernel::GetSystemPageTableManager()); } /* Ensure we don't leak any secure memory we allocated. */ diff --git a/libraries/libmesosphere/source/kern_k_resource_limit.cpp b/libraries/libmesosphere/source/kern_k_resource_limit.cpp index 79c2c7558..0210d8852 100644 --- a/libraries/libmesosphere/source/kern_k_resource_limit.cpp +++ b/libraries/libmesosphere/source/kern_k_resource_limit.cpp @@ -49,7 +49,8 @@ namespace ams::kern { KScopedLightLock lk(m_lock); value = m_limit_values[which]; MESOSPHERE_ASSERT(value >= 0); - MESOSPHERE_ASSERT(m_current_values[which] <= m_limit_values[which]); + MESOSPHERE_ASSERT(m_current_values[which] <= m_peak_values[which]); + MESOSPHERE_ASSERT(m_peak_values[which] <= m_limit_values[which]); MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]); } @@ -64,7 +65,8 @@ namespace ams::kern { KScopedLightLock lk(m_lock); value = m_current_values[which]; MESOSPHERE_ASSERT(value >= 0); - MESOSPHERE_ASSERT(m_current_values[which] <= m_limit_values[which]); + MESOSPHERE_ASSERT(m_current_values[which] <= m_peak_values[which]); + MESOSPHERE_ASSERT(m_peak_values[which] <= m_limit_values[which]); MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]); } @@ -79,7 +81,8 @@ namespace ams::kern { KScopedLightLock lk(m_lock); value = m_peak_values[which]; MESOSPHERE_ASSERT(value >= 0); - MESOSPHERE_ASSERT(m_current_values[which] <= m_limit_values[which]); + MESOSPHERE_ASSERT(m_current_values[which] <= m_peak_values[which]); + MESOSPHERE_ASSERT(m_peak_values[which] <= m_limit_values[which]); MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]); } @@ -93,7 +96,8 @@ namespace ams::kern { { KScopedLightLock lk(m_lock); MESOSPHERE_ASSERT(m_current_values[which] >= 0); - MESOSPHERE_ASSERT(m_current_values[which] <= m_limit_values[which]); + MESOSPHERE_ASSERT(m_current_values[which] <= m_peak_values[which]); + MESOSPHERE_ASSERT(m_peak_values[which] <= m_limit_values[which]); MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]); value = m_limit_values[which] - m_current_values[which]; } @@ -113,6 +117,37 @@ namespace ams::kern { return ResultSuccess(); } + void KResourceLimit::Add(ams::svc::LimitableResource which, s64 value) { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KTargetSystem::IsDynamicResourceLimitsEnabled()); + + KScopedLightLock lk(m_lock); + + /* Check that this is a true increase. */ + MESOSPHERE_ABORT_UNLESS(value > 0); + + /* Check that we can perform an increase. */ + MESOSPHERE_ABORT_UNLESS(m_current_values[which] <= m_peak_values[which]); + MESOSPHERE_ABORT_UNLESS(m_peak_values[which] <= m_limit_values[which]); + MESOSPHERE_ABORT_UNLESS(m_current_hints[which] <= m_current_values[which]); + + /* Check that the increase doesn't cause an overflow. */ + const auto increased_limit = m_limit_values[which] + value; + const auto increased_current = m_current_values[which] + value; + const auto increased_hint = m_current_hints[which] + value; + MESOSPHERE_ABORT_UNLESS(m_limit_values[which] < increased_limit); + MESOSPHERE_ABORT_UNLESS(m_current_values[which] < increased_current); + MESOSPHERE_ABORT_UNLESS(m_current_hints[which] < increased_hint); + + /* Add the value. */ + m_limit_values[which] = increased_limit; + m_current_values[which] = increased_current; + m_current_hints[which] = increased_hint; + + /* Update our peak. */ + m_peak_values[which] = std::max(m_peak_values[which], increased_current); + } + bool KResourceLimit::Reserve(ams::svc::LimitableResource which, s64 value) { return this->Reserve(which, value, KHardwareTimer::GetTick() + DefaultTimeout); } diff --git a/libraries/libmesosphere/source/kern_k_unused_slab_memory.cpp b/libraries/libmesosphere/source/kern_k_unused_slab_memory.cpp new file mode 100644 index 000000000..7ccf1cf65 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_unused_slab_memory.cpp @@ -0,0 +1,159 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + namespace { + + class KUnusedSlabMemory : public util::IntrusiveRedBlackTreeBaseNode { + NON_COPYABLE(KUnusedSlabMemory); + NON_MOVEABLE(KUnusedSlabMemory); + private: + size_t m_size; + public: + struct RedBlackKeyType { + size_t m_size; + + constexpr ALWAYS_INLINE size_t GetSize() const { + return m_size; + } + }; + + template requires (std::same_as || std::same_as) + static constexpr ALWAYS_INLINE int Compare(const T &lhs, const KUnusedSlabMemory &rhs) { + if (lhs.GetSize() < rhs.GetSize()) { + return -1; + } else { + return 1; + } + } + public: + constexpr KUnusedSlabMemory(size_t size) : m_size(size) { /* ... */ } + + constexpr ALWAYS_INLINE KVirtualAddress GetAddress() const { return reinterpret_cast(this); } + constexpr ALWAYS_INLINE size_t GetSize() const { return m_size; } + + }; + static_assert(std::is_trivially_destructible::value); + + using KUnusedSlabMemoryTree = util::IntrusiveRedBlackTreeBaseTraits::TreeType; + + constinit KLightLock g_unused_slab_memory_lock; + constinit KUnusedSlabMemoryTree g_unused_slab_memory_tree; + + } + + KVirtualAddress AllocateUnusedSlabMemory(size_t size, size_t alignment) { + /* Acquire exclusive access to the memory tree. */ + KScopedLightLock lk(g_unused_slab_memory_lock); + + /* Adjust size and alignment. */ + size = std::max(size, sizeof(KUnusedSlabMemory)); + alignment = std::max(alignment, alignof(KUnusedSlabMemory)); + + /* Find the smallest block which fits our allocation. */ + KUnusedSlabMemory *best_fit = std::addressof(*g_unused_slab_memory_tree.nfind_key({ size - 1 })); + + /* Ensure that the chunk is valid. */ + size_t prefix_waste; + KVirtualAddress alloc_start; + KVirtualAddress alloc_last; + KVirtualAddress alloc_end; + KVirtualAddress chunk_last; + KVirtualAddress chunk_end; + while (true) { + /* Check that we still have a chunk satisfying our size requirement. */ + if (AMS_UNLIKELY(best_fit == nullptr)) { + return Null; + } + + /* Determine where the actual allocation would start. */ + alloc_start = util::AlignUp(GetInteger(best_fit->GetAddress()), alignment); + if (AMS_LIKELY(alloc_start >= best_fit->GetAddress())) { + prefix_waste = alloc_start - best_fit->GetAddress(); + alloc_end = alloc_start + size; + alloc_last = alloc_end - 1; + + /* Check that the allocation remains in bounds. */ + if (alloc_start <= alloc_last) { + chunk_end = best_fit->GetAddress() + best_fit->GetSize(); + chunk_last = chunk_end - 1; + if (AMS_LIKELY(alloc_last <= chunk_last)) { + break; + } + } + } + + /* Check the next smallest block. */ + best_fit = best_fit->GetNext(); + } + + /* Remove the chunk we selected from the tree. */ + g_unused_slab_memory_tree.erase(g_unused_slab_memory_tree.iterator_to(*best_fit)); + std::destroy_at(best_fit); + + /* If there's enough prefix waste due to alignment for a new chunk, insert it into the tree. */ + if (prefix_waste >= sizeof(KUnusedSlabMemory)) { + std::construct_at(best_fit, prefix_waste); + g_unused_slab_memory_tree.insert(*best_fit); + } + + /* If there's enough suffix waste after the allocation for a new chunk, insert it into the tree. */ + if (alloc_last < alloc_end + sizeof(KUnusedSlabMemory) - 1 && alloc_end + sizeof(KUnusedSlabMemory) - 1 <= chunk_last) { + KUnusedSlabMemory *suffix_chunk = GetPointer(alloc_end); + std::construct_at(suffix_chunk, chunk_end - alloc_end); + g_unused_slab_memory_tree.insert(*suffix_chunk); + } + + /* Return the allocated memory. */ + return alloc_start; + } + + void FreeUnusedSlabMemory(KVirtualAddress address, size_t size) { + /* NOTE: This is called only during initialization, so we don't need exclusive access. */ + /* Nintendo doesn't acquire the lock here, either. */ + + /* Check that there's anything at all for us to free. */ + if (AMS_UNLIKELY(size == 0)) { + return; + } + + /* Determine the start of the block. */ + const KVirtualAddress block_start = util::AlignUp(GetInteger(address), alignof(KUnusedSlabMemory)); + + /* Check that there's space for a KUnusedSlabMemory to exist. */ + if (AMS_UNLIKELY(std::numeric_limits::max() - sizeof(KUnusedSlabMemory) < GetInteger(block_start))) { + return; + } + + /* Determine the end of the block region. */ + const KVirtualAddress block_end = util::AlignDown(GetInteger(address) + size, alignof(KUnusedSlabMemory)); + + /* Check that the block remains within bounds. */ + if (AMS_UNLIKELY(block_start + sizeof(KUnusedSlabMemory) - 1 > block_end - 1)){ + return; + } + + /* Create the block. */ + KUnusedSlabMemory *block = GetPointer(block_start); + std::construct_at(block, GetInteger(block_end) - GetInteger(block_start)); + + /* Insert the block into the tree. */ + g_unused_slab_memory_tree.insert(*block); + } + +} diff --git a/libraries/libmesosphere/source/kern_kernel.cpp b/libraries/libmesosphere/source/kern_kernel.cpp index 30f5c1d38..0cc9f855e 100644 --- a/libraries/libmesosphere/source/kern_kernel.cpp +++ b/libraries/libmesosphere/source/kern_kernel.cpp @@ -66,15 +66,11 @@ namespace ams::kern { void Kernel::InitializeResourceManagers(KVirtualAddress address, size_t size) { /* Ensure that the buffer is suitable for our use. */ - //const size_t app_size = ApplicationMemoryBlockSlabHeapSize * sizeof(KMemoryBlock); - //const size_t sys_size = SystemMemoryBlockSlabHeapSize * sizeof(KMemoryBlock); - //const size_t info_size = BlockInfoSlabHeapSize * sizeof(KBlockInfo); - //const size_t fixed_size = util::AlignUp(app_size + sys_size + info_size, PageSize); MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(address), PageSize)); MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize)); /* Ensure that we have space for our reference counts. */ - const size_t rc_size = util::AlignUp(KPageTableManager::CalculateReferenceCountSize(size), PageSize); + const size_t rc_size = util::AlignUp(KPageTableSlabHeap::CalculateReferenceCountSize(size), PageSize); MESOSPHERE_ABORT_UNLESS(rc_size < size); size -= rc_size; @@ -82,13 +78,28 @@ namespace ams::kern { g_resource_manager_page_manager.Initialize(address, size); /* Initialize the fixed-size slabheaps. */ - s_app_memory_block_manager.Initialize(std::addressof(g_resource_manager_page_manager), ApplicationMemoryBlockSlabHeapSize); - s_sys_memory_block_manager.Initialize(std::addressof(g_resource_manager_page_manager), SystemMemoryBlockSlabHeapSize); - s_block_info_manager.Initialize(std::addressof(g_resource_manager_page_manager), BlockInfoSlabHeapSize); + s_app_memory_block_heap.Initialize(std::addressof(g_resource_manager_page_manager), ApplicationMemoryBlockSlabHeapSize); + s_sys_memory_block_heap.Initialize(std::addressof(g_resource_manager_page_manager), SystemMemoryBlockSlabHeapSize); + s_block_info_heap.Initialize(std::addressof(g_resource_manager_page_manager), BlockInfoSlabHeapSize); - /* Reserve all remaining pages for the page table manager. */ - const size_t num_pt_pages = g_resource_manager_page_manager.GetCount() - g_resource_manager_page_manager.GetUsed(); - s_page_table_manager.Initialize(std::addressof(g_resource_manager_page_manager), num_pt_pages, GetPointer(address + size)); + /* Reserve all but a fixed number of remaining pages for the page table heap. */ + const size_t num_pt_pages = g_resource_manager_page_manager.GetCount() - g_resource_manager_page_manager.GetUsed() - ReservedDynamicPageCount; + s_page_table_heap.Initialize(std::addressof(g_resource_manager_page_manager), num_pt_pages, GetPointer(address + size)); + + /* Setup the slab managers. */ + KDynamicPageManager * const app_dynamic_page_manager = nullptr; + KDynamicPageManager * const sys_dynamic_page_manager = KTargetSystem::IsDynamicResourceLimitsEnabled() ? std::addressof(g_resource_manager_page_manager) : nullptr; + s_app_memory_block_manager.Initialize(app_dynamic_page_manager, std::addressof(s_app_memory_block_heap)); + s_sys_memory_block_manager.Initialize(sys_dynamic_page_manager, std::addressof(s_sys_memory_block_heap)); + + s_app_block_info_manager.Initialize(app_dynamic_page_manager, std::addressof(s_block_info_heap)); + s_sys_block_info_manager.Initialize(sys_dynamic_page_manager, std::addressof(s_block_info_heap)); + + s_app_page_table_manager.Initialize(app_dynamic_page_manager, std::addressof(s_page_table_heap)); + s_sys_page_table_manager.Initialize(sys_dynamic_page_manager, std::addressof(s_page_table_heap)); + + /* Check that we have the correct number of dynamic pages available. */ + MESOSPHERE_ABORT_UNLESS(g_resource_manager_page_manager.GetCount() - g_resource_manager_page_manager.GetUsed() == ReservedDynamicPageCount); } void Kernel::PrintLayout() { diff --git a/libraries/libmesosphere/source/svc/kern_svc_event.cpp b/libraries/libmesosphere/source/svc/kern_svc_event.cpp index 7ab2780ce..c458d6631 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_event.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_event.cpp @@ -60,12 +60,28 @@ namespace ams::kern::svc { auto &process = GetCurrentProcess(); auto &handle_table = process.GetHandleTable(); + /* Declare the event we're going to allocate. */ + KEvent *event; + /* Reserve a new event from the process resource limit. */ KScopedResourceReservation event_reservation(std::addressof(process), ams::svc::LimitableResource_EventCountMax); - R_UNLESS(event_reservation.Succeeded(), svc::ResultLimitReached()); + if (event_reservation.Succeeded()) { + /* Allocate an event normally. */ + event = KEvent::Create(); + } else { + /* We couldn't reserve an event. Check that we support dynamically expanding the resource limit. */ + R_UNLESS(process.GetResourceLimit() == std::addressof(Kernel::GetSystemResourceLimit()), svc::ResultLimitReached()); + R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), svc::ResultLimitReached()); - /* Create a new event. */ - KEvent *event = KEvent::Create(); + /* Try to allocate an event from unused slab memory. */ + event = KEvent::CreateFromUnusedSlabMemory(); + R_UNLESS(event != nullptr, svc::ResultLimitReached()); + + /* We successfully allocated an event, so add the object we allocated to the resource limit. */ + Kernel::GetSystemResourceLimit().Add(ams::svc::LimitableResource_EventCountMax, 1); + } + + /* Check that we successfully created an event. */ R_UNLESS(event != nullptr, svc::ResultOutOfResource()); /* Initialize the event. */ diff --git a/libraries/libmesosphere/source/svc/kern_svc_session.cpp b/libraries/libmesosphere/source/svc/kern_svc_session.cpp index f55eea5a1..4d5a04c90 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_session.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_session.cpp @@ -27,12 +27,44 @@ namespace ams::kern::svc { auto &process = GetCurrentProcess(); auto &handle_table = process.GetHandleTable(); + /* Declare the session we're going to allocate. */ + T *session; + /* Reserve a new session from the process resource limit. */ KScopedResourceReservation session_reservation(std::addressof(process), ams::svc::LimitableResource_SessionCountMax); - R_UNLESS(session_reservation.Succeeded(), svc::ResultLimitReached()); + if (session_reservation.Succeeded()) { + /* Allocate a session normally. */ + session = T::Create(); + } else { + /* We couldn't reserve a session. Check that we support dynamically expanding the resource limit. */ + R_UNLESS(process.GetResourceLimit() == std::addressof(Kernel::GetSystemResourceLimit()), svc::ResultLimitReached()); + R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), svc::ResultLimitReached()); - /* Create a new session. */ - T *session = T::Create(); + /* Try to allocate a session from unused slab memory. */ + session = T::CreateFromUnusedSlabMemory(); + R_UNLESS(session != nullptr, svc::ResultLimitReached()); + + /* If we're creating a KSession, we want to add two KSessionRequests to the heap, to prevent request exhaustion. */ + /* NOTE: Nintendo checks if session->DynamicCast() != nullptr, but there's no reason to not do this statically. */ + if constexpr (std::same_as) { + /* Ensure that if we fail to allocate our session requests, we close the session we created. */ + auto session_guard = SCOPE_GUARD { session->Close(); }; + { + for (size_t i = 0; i < 2; ++i) { + KSessionRequest *request = KSessionRequest::CreateFromUnusedSlabMemory(); + R_UNLESS(request != nullptr, svc::ResultLimitReached()); + + request->Close(); + } + } + session_guard.Cancel(); + } + + /* We successfully allocated a session, so add the object we allocated to the resource limit. */ + Kernel::GetSystemResourceLimit().Add(ams::svc::LimitableResource_SessionCountMax, 1); + } + + /* Check that we successfully created a session. */ R_UNLESS(session != nullptr, svc::ResultOutOfResource()); /* Initialize the session. */ diff --git a/mesosphere/kernel/source/kern_kernel_instantiations.cpp b/mesosphere/kernel/source/kern_kernel_instantiations.cpp index d64901947..050e94954 100644 --- a/mesosphere/kernel/source/kern_kernel_instantiations.cpp +++ b/mesosphere/kernel/source/kern_kernel_instantiations.cpp @@ -21,10 +21,6 @@ namespace ams::kern { constinit Kernel::State Kernel::s_state = Kernel::State::Invalid; constinit KResourceLimit Kernel::s_system_resource_limit; KMemoryManager Kernel::s_memory_manager; - constinit KPageTableManager Kernel::s_page_table_manager; - constinit KMemoryBlockSlabManager Kernel::s_app_memory_block_manager; - constinit KMemoryBlockSlabManager Kernel::s_sys_memory_block_manager; - constinit KBlockInfoManager Kernel::s_block_info_manager; constinit KSupervisorPageTable Kernel::s_supervisor_page_table; constinit KUnsafeMemory Kernel::s_unsafe_memory; constinit KWorkerTaskManager Kernel::s_worker_task_managers[KWorkerTaskManager::WorkerType_Count]; @@ -33,6 +29,17 @@ namespace ams::kern { constinit KInterruptTaskManager Kernel::s_interrupt_task_managers[cpu::NumCores]; constinit KHardwareTimer Kernel::s_hardware_timers[cpu::NumCores]; + constinit KPageTableSlabHeap Kernel::s_page_table_heap; + constinit KMemoryBlockSlabHeap Kernel::s_app_memory_block_heap; + constinit KMemoryBlockSlabHeap Kernel::s_sys_memory_block_heap; + constinit KBlockInfoSlabHeap Kernel::s_block_info_heap; + constinit KPageTableManager Kernel::s_app_page_table_manager; + constinit KPageTableManager Kernel::s_sys_page_table_manager; + constinit KMemoryBlockSlabManager Kernel::s_app_memory_block_manager; + constinit KMemoryBlockSlabManager Kernel::s_sys_memory_block_manager; + constinit KBlockInfoManager Kernel::s_app_block_info_manager; + constinit KBlockInfoManager Kernel::s_sys_block_info_manager; + namespace { constinit std::array g_main_threads;