kern: KMemoryManager::Allocate -> AllocateAndOpen

This commit is contained in:
Michael Scire 2020-12-01 06:01:44 -08:00 committed by SciresM
parent 3bce008170
commit cc11d452e5
8 changed files with 85 additions and 44 deletions

View file

@ -96,12 +96,23 @@ namespace ams::kern {
constexpr Impl *GetNext() const { return this->next; } constexpr Impl *GetNext() const { return this->next; }
constexpr Impl *GetPrev() const { return this->prev; } constexpr Impl *GetPrev() const { return this->prev; }
void OpenFirst(KVirtualAddress address, size_t num_pages) {
size_t index = this->GetPageOffset(address);
const size_t end = index + num_pages;
while (index < end) {
const RefCount ref_count = (++this->page_reference_counts[index]);
MESOSPHERE_ABORT_UNLESS(ref_count == 1);
index++;
}
}
void Open(KVirtualAddress address, size_t num_pages) { void Open(KVirtualAddress address, size_t num_pages) {
size_t index = this->GetPageOffset(address); size_t index = this->GetPageOffset(address);
const size_t end = index + num_pages; const size_t end = index + num_pages;
while (index < end) { while (index < end) {
const RefCount ref_count = (++this->page_reference_counts[index]); const RefCount ref_count = (++this->page_reference_counts[index]);
MESOSPHERE_ABORT_UNLESS(ref_count > 0); MESOSPHERE_ABORT_UNLESS(ref_count > 1);
index++; index++;
} }
@ -178,9 +189,9 @@ namespace ams::kern {
NOINLINE Result InitializeOptimizedMemory(u64 process_id, Pool pool); NOINLINE Result InitializeOptimizedMemory(u64 process_id, Pool pool);
NOINLINE void FinalizeOptimizedMemory(u64 process_id, Pool pool); NOINLINE void FinalizeOptimizedMemory(u64 process_id, Pool pool);
NOINLINE KVirtualAddress AllocateContinuous(size_t num_pages, size_t align_pages, u32 option); NOINLINE KVirtualAddress AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
NOINLINE Result Allocate(KPageGroup *out, size_t num_pages, u32 option); NOINLINE Result AllocateAndOpen(KPageGroup *out, size_t num_pages, u32 option);
NOINLINE Result AllocateForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern); NOINLINE Result AllocateAndOpenForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern);
void Open(KVirtualAddress address, size_t num_pages) { void Open(KVirtualAddress address, size_t num_pages) {
/* Repeatedly open references until we've done so for all pages. */ /* Repeatedly open references until we've done so for all pages. */

View file

@ -573,7 +573,7 @@ namespace ams::kern::arch::arm64 {
/* Ensure that any pages we track close on exit. */ /* Ensure that any pages we track close on exit. */
KPageGroup pages_to_close(this->GetBlockInfoManager()); KPageGroup pages_to_close(this->GetBlockInfoManager());
KScopedPageGroup spg(pages_to_close); ON_SCOPE_EXIT { pages_to_close.Close(); };
/* Begin traversal. */ /* Begin traversal. */
TraversalContext context; TraversalContext context;

View file

@ -483,7 +483,7 @@ namespace ams::kern::board::nintendo::nx {
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, SecureAppletMemorySize)); MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, SecureAppletMemorySize));
constexpr auto SecureAppletAllocateOption = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront); constexpr auto SecureAppletAllocateOption = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront);
g_secure_applet_memory_address = Kernel::GetMemoryManager().AllocateContinuous(SecureAppletMemorySize / PageSize, 1, SecureAppletAllocateOption); g_secure_applet_memory_address = Kernel::GetMemoryManager().AllocateAndOpenContinuous(SecureAppletMemorySize / PageSize, 1, SecureAppletAllocateOption);
MESOSPHERE_ABORT_UNLESS(g_secure_applet_memory_address != Null<KVirtualAddress>); MESOSPHERE_ABORT_UNLESS(g_secure_applet_memory_address != Null<KVirtualAddress>);
} }
@ -691,12 +691,9 @@ namespace ams::kern::board::nintendo::nx {
/* Allocate the memory. */ /* Allocate the memory. */
const size_t num_pages = size / PageSize; const size_t num_pages = size / PageSize;
const KVirtualAddress vaddr = Kernel::GetMemoryManager().AllocateContinuous(num_pages, alignment / PageSize, KMemoryManager::EncodeOption(static_cast<KMemoryManager::Pool>(pool), KMemoryManager::Direction_FromFront)); const KVirtualAddress vaddr = Kernel::GetMemoryManager().AllocateAndOpenContinuous(num_pages, alignment / PageSize, KMemoryManager::EncodeOption(static_cast<KMemoryManager::Pool>(pool), KMemoryManager::Direction_FromFront));
R_UNLESS(vaddr != Null<KVirtualAddress>, svc::ResultOutOfMemory()); R_UNLESS(vaddr != Null<KVirtualAddress>, svc::ResultOutOfMemory());
/* Open a reference to the memory. */
Kernel::GetMemoryManager().Open(vaddr, num_pages);
/* Ensure we don't leak references to the memory on error. */ /* Ensure we don't leak references to the memory on error. */
auto mem_guard = SCOPE_GUARD { Kernel::GetMemoryManager().Close(vaddr, num_pages); }; auto mem_guard = SCOPE_GUARD { Kernel::GetMemoryManager().Close(vaddr, num_pages); };

View file

@ -158,12 +158,9 @@ namespace ams::kern::init {
/* Allocate memory for the slab. */ /* Allocate memory for the slab. */
constexpr auto AllocateOption = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront); constexpr auto AllocateOption = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront);
const KVirtualAddress slab_address = Kernel::GetMemoryManager().AllocateContinuous(num_pages, 1, AllocateOption); const KVirtualAddress slab_address = Kernel::GetMemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption);
MESOSPHERE_ABORT_UNLESS(slab_address != Null<KVirtualAddress>); MESOSPHERE_ABORT_UNLESS(slab_address != Null<KVirtualAddress>);
/* Open references to the slab. */
Kernel::GetMemoryManager().Open(slab_address, num_pages);
/* Initialize the slabheap. */ /* Initialize the slabheap. */
KPageBuffer::InitializeSlabHeap(GetVoidPointer(slab_address), slab_size); KPageBuffer::InitializeSlabHeap(GetVoidPointer(slab_address), slab_size);
} }

View file

@ -91,11 +91,11 @@ namespace ams::kern {
/* Allocate memory for the process. */ /* Allocate memory for the process. */
auto &mm = Kernel::GetMemoryManager(); auto &mm = Kernel::GetMemoryManager();
const auto pool = reader.UsesSecureMemory() ? secure_pool : unsafe_pool; const auto pool = reader.UsesSecureMemory() ? secure_pool : unsafe_pool;
MESOSPHERE_R_ABORT_UNLESS(mm.Allocate(std::addressof(pg), params.code_num_pages, KMemoryManager::EncodeOption(pool, KMemoryManager::Direction_FromFront))); MESOSPHERE_R_ABORT_UNLESS(mm.AllocateAndOpen(std::addressof(pg), params.code_num_pages, KMemoryManager::EncodeOption(pool, KMemoryManager::Direction_FromFront)));
{ {
/* Ensure that we do not leak pages. */ /* Ensure that we do not leak pages. */
KScopedPageGroup spg(pg); ON_SCOPE_EXIT { pg.Close(); };
/* Map the process's memory into the temporary region. */ /* Map the process's memory into the temporary region. */
const auto &temp_region = KMemoryLayout::GetTempRegion(); const auto &temp_region = KMemoryLayout::GetTempRegion();
@ -170,9 +170,8 @@ namespace ams::kern {
/* Allocate memory for the image. */ /* Allocate memory for the image. */
const KMemoryManager::Pool pool = static_cast<KMemoryManager::Pool>(KSystemControl::GetCreateProcessMemoryPool()); const KMemoryManager::Pool pool = static_cast<KMemoryManager::Pool>(KSystemControl::GetCreateProcessMemoryPool());
const auto allocate_option = KMemoryManager::EncodeOption(pool, KMemoryManager::Direction_FromFront); const auto allocate_option = KMemoryManager::EncodeOption(pool, KMemoryManager::Direction_FromFront);
KVirtualAddress allocated_memory = mm.AllocateContinuous(num_pages, 1, allocate_option); KVirtualAddress allocated_memory = mm.AllocateAndOpenContinuous(num_pages, 1, allocate_option);
MESOSPHERE_ABORT_UNLESS(allocated_memory != Null<KVirtualAddress>); MESOSPHERE_ABORT_UNLESS(allocated_memory != Null<KVirtualAddress>);
mm.Open(allocated_memory, num_pages);
/* Relocate the image. */ /* Relocate the image. */
std::memmove(GetVoidPointer(allocated_memory), GetVoidPointer(GetInitialProcessBinaryAddress()), g_initial_process_binary_header.size); std::memmove(GetVoidPointer(allocated_memory), GetVoidPointer(GetInitialProcessBinaryAddress()), g_initial_process_binary_header.size);

View file

@ -117,7 +117,7 @@ namespace ams::kern {
} }
KVirtualAddress KMemoryManager::AllocateContinuous(size_t num_pages, size_t align_pages, u32 option) { KVirtualAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) {
/* Early return if we're allocating no pages. */ /* Early return if we're allocating no pages. */
if (num_pages == 0) { if (num_pages == 0) {
return Null<KVirtualAddress>; return Null<KVirtualAddress>;
@ -156,6 +156,9 @@ namespace ams::kern {
chosen_manager->TrackUnoptimizedAllocation(allocated_block, num_pages); chosen_manager->TrackUnoptimizedAllocation(allocated_block, num_pages);
} }
/* Open the first reference to the pages. */
chosen_manager->OpenFirst(allocated_block, num_pages);
return allocated_block; return allocated_block;
} }
@ -210,7 +213,7 @@ namespace ams::kern {
return ResultSuccess(); return ResultSuccess();
} }
Result KMemoryManager::Allocate(KPageGroup *out, size_t num_pages, u32 option) { Result KMemoryManager::AllocateAndOpen(KPageGroup *out, size_t num_pages, u32 option) {
MESOSPHERE_ASSERT(out != nullptr); MESOSPHERE_ASSERT(out != nullptr);
MESOSPHERE_ASSERT(out->GetNumPages() == 0); MESOSPHERE_ASSERT(out->GetNumPages() == 0);
@ -222,10 +225,30 @@ namespace ams::kern {
KScopedLightLock lk(this->pool_locks[pool]); KScopedLightLock lk(this->pool_locks[pool]);
/* Allocate the page group. */ /* Allocate the page group. */
return this->AllocatePageGroupImpl(out, num_pages, pool, dir, this->has_optimized_process[pool], true); R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, this->has_optimized_process[pool], true));
/* Open the first reference to the pages. */
for (const auto &block : *out) {
KVirtualAddress cur_address = block.GetAddress();
size_t remaining_pages = block.GetNumPages();
while (remaining_pages > 0) {
/* Get the manager for the current address. */
auto &manager = this->GetManager(cur_address);
/* Process part or all of the block. */
const size_t cur_pages = std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
manager.OpenFirst(cur_address, cur_pages);
/* Advance. */
cur_address += cur_pages * PageSize;
remaining_pages -= cur_pages;
}
}
return ResultSuccess();
} }
Result KMemoryManager::AllocateForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern) { Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern) {
MESOSPHERE_ASSERT(out != nullptr); MESOSPHERE_ASSERT(out != nullptr);
MESOSPHERE_ASSERT(out->GetNumPages() == 0); MESOSPHERE_ASSERT(out->GetNumPages() == 0);
@ -247,6 +270,24 @@ namespace ams::kern {
/* Set whether we should optimize. */ /* Set whether we should optimize. */
optimized = has_optimized && is_optimized; optimized = has_optimized && is_optimized;
/* Open the first reference to the pages. */
for (const auto &block : *out) {
KVirtualAddress cur_address = block.GetAddress();
size_t remaining_pages = block.GetNumPages();
while (remaining_pages > 0) {
/* Get the manager for the current address. */
auto &manager = this->GetManager(cur_address);
/* Process part or all of the block. */
const size_t cur_pages = std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
manager.OpenFirst(cur_address, cur_pages);
/* Advance. */
cur_address += cur_pages * PageSize;
remaining_pages -= cur_pages;
}
}
} }
/* Perform optimized memory tracking, if we should. */ /* Perform optimized memory tracking, if we should. */

View file

@ -1023,10 +1023,10 @@ namespace ams::kern {
KPageGroup pg(this->block_info_manager); KPageGroup pg(this->block_info_manager);
/* Allocate the pages. */ /* Allocate the pages. */
R_TRY(Kernel::GetMemoryManager().Allocate(std::addressof(pg), num_pages, this->allocate_option)); R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, this->allocate_option));
/* Ensure that the page group is open while we work with it. */ /* Ensure that the page group is closed when we're done working with it. */
KScopedPageGroup spg(pg); ON_SCOPE_EXIT { pg.Close(); };
/* Clear all pages. */ /* Clear all pages. */
for (const auto &it : pg) { for (const auto &it : pg) {
@ -1488,11 +1488,10 @@ namespace ams::kern {
/* Allocate pages for the heap extension. */ /* Allocate pages for the heap extension. */
KPageGroup pg(this->block_info_manager); KPageGroup pg(this->block_info_manager);
R_TRY(Kernel::GetMemoryManager().Allocate(std::addressof(pg), allocation_size / PageSize, this->allocate_option)); R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), allocation_size / PageSize, this->allocate_option));
/* Open the pages in the group for the duration of the call, and close them at the end. */ /* Close the opened pages when we're done with them. */
/* If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed automatically. */ /* If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed automatically. */
pg.Open();
ON_SCOPE_EXIT { pg.Close(); }; ON_SCOPE_EXIT { pg.Close(); };
/* Clear all the newly allocated pages. */ /* Clear all the newly allocated pages. */
@ -3115,20 +3114,22 @@ namespace ams::kern {
KScopedResourceReservation memory_reservation(GetCurrentProcess().GetResourceLimit(), ams::svc::LimitableResource_PhysicalMemoryMax, unmapped_size); KScopedResourceReservation memory_reservation(GetCurrentProcess().GetResourceLimit(), ams::svc::LimitableResource_PhysicalMemoryMax, unmapped_size);
R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached()); R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached());
/* Ensure that we we clean up on failure. */ /* Ensure that we manage page references correctly. */
KVirtualAddress start_partial_page = Null<KVirtualAddress>; KVirtualAddress start_partial_page = Null<KVirtualAddress>;
KVirtualAddress end_partial_page = Null<KVirtualAddress>; KVirtualAddress end_partial_page = Null<KVirtualAddress>;
KProcessAddress cur_mapped_addr = dst_addr; KProcessAddress cur_mapped_addr = dst_addr;
auto cleanup_guard = SCOPE_GUARD { /* If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll free on scope exit. */
ON_SCOPE_EXIT {
if (start_partial_page != Null<KVirtualAddress>) { if (start_partial_page != Null<KVirtualAddress>) {
Kernel::GetMemoryManager().Open(start_partial_page, 1);
Kernel::GetMemoryManager().Close(start_partial_page, 1); Kernel::GetMemoryManager().Close(start_partial_page, 1);
} }
if (end_partial_page != Null<KVirtualAddress>) { if (end_partial_page != Null<KVirtualAddress>) {
Kernel::GetMemoryManager().Open(end_partial_page, 1);
Kernel::GetMemoryManager().Close(end_partial_page, 1); Kernel::GetMemoryManager().Close(end_partial_page, 1);
} }
};
auto cleanup_guard = SCOPE_GUARD {
if (cur_mapped_addr != dst_addr) { if (cur_mapped_addr != dst_addr) {
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None }; const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), dst_addr, (cur_mapped_addr - dst_addr) / PageSize, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, true)); MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), dst_addr, (cur_mapped_addr - dst_addr) / PageSize, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, true));
@ -3137,13 +3138,13 @@ namespace ams::kern {
/* Allocate the start page as needed. */ /* Allocate the start page as needed. */
if (aligned_src_start < mapping_src_start) { if (aligned_src_start < mapping_src_start) {
start_partial_page = Kernel::GetMemoryManager().AllocateContinuous(1, 0, this->allocate_option); start_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 0, this->allocate_option);
R_UNLESS(start_partial_page != Null<KVirtualAddress>, svc::ResultOutOfMemory()); R_UNLESS(start_partial_page != Null<KVirtualAddress>, svc::ResultOutOfMemory());
} }
/* Allocate the end page as needed. */ /* Allocate the end page as needed. */
if (mapping_src_end < aligned_src_end && (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) { if (mapping_src_end < aligned_src_end && (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) {
end_partial_page = Kernel::GetMemoryManager().AllocateContinuous(1, 0, this->allocate_option); end_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 0, this->allocate_option);
R_UNLESS(end_partial_page != Null<KVirtualAddress>, svc::ResultOutOfMemory()); R_UNLESS(end_partial_page != Null<KVirtualAddress>, svc::ResultOutOfMemory());
} }
@ -3676,10 +3677,9 @@ namespace ams::kern {
/* Allocate pages for the new memory. */ /* Allocate pages for the new memory. */
KPageGroup pg(this->block_info_manager); KPageGroup pg(this->block_info_manager);
R_TRY(Kernel::GetMemoryManager().AllocateForProcess(std::addressof(pg), (size - mapped_size) / PageSize, this->allocate_option, GetCurrentProcess().GetId(), this->heap_fill_value)); R_TRY(Kernel::GetMemoryManager().AllocateAndOpenForProcess(std::addressof(pg), (size - mapped_size) / PageSize, this->allocate_option, GetCurrentProcess().GetId(), this->heap_fill_value));
/* Open a reference to the pages we allocated, and close our reference when we're done. */ /* Close our reference when we're done. */
pg.Open();
ON_SCOPE_EXIT { pg.Close(); }; ON_SCOPE_EXIT { pg.Close(); };
/* Map the memory. */ /* Map the memory. */
@ -4100,10 +4100,9 @@ namespace ams::kern {
/* Allocate the new memory. */ /* Allocate the new memory. */
const size_t num_pages = size / PageSize; const size_t num_pages = size / PageSize;
R_TRY(Kernel::GetMemoryManager().Allocate(std::addressof(pg), num_pages, KMemoryManager::EncodeOption(KMemoryManager::Pool_Unsafe, KMemoryManager::Direction_FromFront))); R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, KMemoryManager::EncodeOption(KMemoryManager::Pool_Unsafe, KMemoryManager::Direction_FromFront)));
/* Open the page group, and close it when we're done with it. */ /* Close the page group when we're done with it. */
pg.Open();
ON_SCOPE_EXIT { pg.Close(); }; ON_SCOPE_EXIT { pg.Close(); };
/* Clear the new memory. */ /* Clear the new memory. */

View file

@ -37,7 +37,7 @@ namespace ams::kern {
R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached()); R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached());
/* Allocate the memory. */ /* Allocate the memory. */
R_TRY(Kernel::GetMemoryManager().Allocate(std::addressof(this->page_group), num_pages, owner->GetAllocateOption())); R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(this->page_group), num_pages, owner->GetAllocateOption()));
/* Commit our reservation. */ /* Commit our reservation. */
memory_reservation.Commit(); memory_reservation.Commit();
@ -46,9 +46,6 @@ namespace ams::kern {
this->resource_limit = reslimit; this->resource_limit = reslimit;
this->resource_limit->Open(); this->resource_limit->Open();
/* Open the memory. */
this->page_group.Open();
/* Mark initialized. */ /* Mark initialized. */
this->is_initialized = true; this->is_initialized = true;