From 1b9acc4a6abfd616a7d42bf17a17ada7d4d2159d Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Fri, 24 Jul 2020 15:44:16 -0700 Subject: [PATCH] kern: SvcUnmapPhysicalMemory, cleanup thread pinning --- .../mesosphere/kern_k_current_context.hpp | 9 + .../include/mesosphere/kern_k_process.hpp | 4 +- .../include/mesosphere/kern_k_thread.hpp | 5 +- .../arch/arm64/kern_exception_handlers.cpp | 12 +- .../arch/arm64/svc/kern_svc_handlers_asm.s | 12 +- .../source/kern_k_page_table_base.cpp | 212 +++++++++++++++++- .../libmesosphere/source/kern_k_process.cpp | 2 +- .../libmesosphere/source/kern_k_scheduler.cpp | 7 +- .../libmesosphere/source/kern_k_thread.cpp | 2 +- .../source/svc/kern_svc_exception.cpp | 7 +- 10 files changed, 248 insertions(+), 24 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_current_context.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_current_context.hpp index 442f04fed..9702fd465 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_current_context.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_current_context.hpp @@ -30,6 +30,7 @@ namespace ams::kern { KInterruptTaskManager *interrupt_task_manager; s32 core_id; void *exception_stack_top; + ams::svc::ThreadLocalRegion *tlr; }; static_assert(std::is_standard_layout::value && std::is_trivially_destructible::value); static_assert(sizeof(KCurrentContext) <= cpu::DataCacheLineSize); @@ -80,6 +81,10 @@ namespace ams::kern { return impl::GetCurrentContext().core_id; } + ALWAYS_INLINE ams::svc::ThreadLocalRegion *GetCurrentThreadLocalRegion() { + return impl::GetCurrentContext().tlr; + } + ALWAYS_INLINE void SetCurrentThread(KThread *new_thread) { impl::GetCurrentContext().current_thread = new_thread; } @@ -88,4 +93,8 @@ namespace ams::kern { impl::GetCurrentContext().current_process = new_process; } + ALWAYS_INLINE void SetCurrentThreadLocalRegion(void *address) { + impl::GetCurrentContext().tlr = static_cast(address); + } + } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp index 01c1e9af5..2dba56e5c 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp @@ -197,7 +197,7 @@ namespace ams::kern { bool LeaveUserException(); bool ReleaseUserException(KThread *thread); - KThread *GetPreemptionStatePinnedThread(s32 core_id) const { + KThread *GetPinnedThread(s32 core_id) const { MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast(cpu::NumCores)); return this->pinned_threads[core_id]; } @@ -269,7 +269,7 @@ namespace ams::kern { Result SetActivity(ams::svc::ProcessActivity activity); - void SetPreemptionState(); + void PinCurrentThread(); Result SignalToAddress(KProcessAddress address) { return this->cond_var.SignalToAddress(address); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp index 87f7f9b0d..e344e98c5 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp @@ -421,8 +421,9 @@ namespace ams::kern { constexpr KSynchronizationObject **GetSynchronizationObjectBuffer() { return std::addressof(this->sync_object_buffer.sync_objects[0]); } constexpr ams::svc::Handle *GetHandleBuffer() { return std::addressof(this->sync_object_buffer.handles[sizeof(this->sync_object_buffer.sync_objects) / sizeof(ams::svc::Handle) - ams::svc::ArgumentHandleCountMax]); } - constexpr u16 GetUserPreemptionState() const { return *GetPointer(this->tls_address + 0x100); } - constexpr void SetKernelPreemptionState(u16 state) const { *GetPointer(this->tls_address + 0x100 + sizeof(u16)) = state; } + u16 GetUserDisableCount() const { return static_cast(this->tls_heap_address)->disable_count; } + void SetInterruptFlag() const { static_cast(this->tls_heap_address)->interrupt_flag = 1; } + void ClearInterruptFlag() const { static_cast(this->tls_heap_address)->interrupt_flag = 0; } constexpr void SetDebugAttached() { this->debug_attached = true; } constexpr bool IsAttachedToDebugger() const { return this->debug_attached; } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_exception_handlers.cpp b/libraries/libmesosphere/source/arch/arm64/kern_exception_handlers.cpp index 89cd0b9f7..b82d8faa4 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_exception_handlers.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_exception_handlers.cpp @@ -145,15 +145,15 @@ namespace ams::kern::arch::arm64 { { const bool is_user_mode = (context->psr & 0xF) == 0; if (is_user_mode) { - /* Handle any changes needed to the user preemption state. */ - if (GetCurrentThread().GetUserPreemptionState() != 0 && GetCurrentProcess().GetPreemptionStatePinnedThread(GetCurrentCoreId()) == nullptr) { + /* If the user disable count is set, we may need to pin the current thread. */ + if (GetCurrentThread().GetUserDisableCount() != 0 && GetCurrentProcess().GetPinnedThread(GetCurrentCoreId()) == nullptr) { KScopedSchedulerLock lk; - /* Note the preemption state in process. */ - GetCurrentProcess().SetPreemptionState(); + /* Pin the current thread. */ + GetCurrentProcess().PinCurrentThread(); - /* Set the kernel preemption state flag. */ - GetCurrentThread().SetKernelPreemptionState(1); + /* Set the interrupt flag for the thread. */ + GetCurrentThread().SetInterruptFlag(); } /* Enable interrupts while we process the usermode exception. */ diff --git a/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_handlers_asm.s b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_handlers_asm.s index b3106e0b2..39d8eda7e 100644 --- a/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_handlers_asm.s +++ b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_handlers_asm.s @@ -31,6 +31,7 @@ _ZN3ams4kern4arch5arm6412SvcHandler64Ev: mrs x9, elr_el1 mrs x10, spsr_el1 mrs x11, tpidr_el0 + mrs x18, tpidr_el1 /* Save callee-saved registers. */ stp x19, x20, [sp, #(8 * 19)] @@ -63,8 +64,8 @@ _ZN3ams4kern4arch5arm6412SvcHandler64Ev: tst x10, #1 b.eq 3f - /* Check if our preemption state allows us to call SVCs. */ - mrs x10, tpidrro_el0 + /* Check if our disable count allows us to call SVCs. */ + ldr x10, [x18, #0x30] ldrh w10, [x10, #0x100] cbz w10, 1f @@ -83,7 +84,6 @@ _ZN3ams4kern4arch5arm6412SvcHandler64Ev: strb w8, [sp, #(0x120 + 0x11)] /* Invoke the SVC handler. */ - mrs x18, tpidr_el1 msr daifclr, #2 blr x11 msr daifset, #2 @@ -211,6 +211,7 @@ _ZN3ams4kern4arch5arm6412SvcHandler32Ev: mrs x17, elr_el1 mrs x20, spsr_el1 mrs x19, tpidr_el0 + mrs x18, tpidr_el1 stp x17, x20, [sp, #(8 * 32)] str x19, [sp, #(8 * 34)] @@ -239,8 +240,8 @@ _ZN3ams4kern4arch5arm6412SvcHandler32Ev: tst x17, #1 b.eq 3f - /* Check if our preemption state allows us to call SVCs. */ - mrs x15, tpidrro_el0 + /* Check if our disable count allows us to call SVCs. */ + ldr x15, [x18, #0x30] ldrh w15, [x15, #0x100] cbz w15, 1f @@ -259,7 +260,6 @@ _ZN3ams4kern4arch5arm6412SvcHandler32Ev: strb w16, [sp, #(0x120 + 0x11)] /* Invoke the SVC handler. */ - mrs x18, tpidr_el1 msr daifclr, #2 blr x19 msr daifset, #2 diff --git a/libraries/libmesosphere/source/kern_k_page_table_base.cpp b/libraries/libmesosphere/source/kern_k_page_table_base.cpp index 6f185ad60..e77534dab 100644 --- a/libraries/libmesosphere/source/kern_k_page_table_base.cpp +++ b/libraries/libmesosphere/source/kern_k_page_table_base.cpp @@ -3352,7 +3352,217 @@ namespace ams::kern { } Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size) { - MESOSPHERE_UNIMPLEMENTED(); + /* Lock the physical memory lock. */ + KScopedLightLock phys_lk(this->map_physical_memory_lock); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Calculate the last address for convenience. */ + const KProcessAddress last_address = address + size - 1; + + /* Define iteration variables. */ + KProcessAddress cur_address; + size_t mapped_size; + + /* Check if the memory is mapped. */ + { + /* Iterate over the memory. */ + cur_address = address; + mapped_size = 0; + + auto it = this->memory_block_manager.FindIterator(cur_address); + while (true) { + /* Check that the iterator is valid. */ + MESOSPHERE_ASSERT(it != this->memory_block_manager.end()); + + /* Get the memory info. */ + const KMemoryInfo info = it->GetMemoryInfo(); + + /* Verify the memory's state. */ + const bool is_normal = info.GetState() == KMemoryState_Normal && info.GetAttribute() == 0; + const bool is_free = info.GetState() == KMemoryState_Free; + R_UNLESS(is_normal || is_free, svc::ResultInvalidCurrentMemory()); + + /* Check if we're done. */ + if (last_address <= info.GetLastAddress()) { + if (is_normal) { + mapped_size += (last_address + 1 - cur_address); + } + break; + } + + /* Track the memory if it's mapped. */ + if (is_normal) { + mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address; + } + + /* Advance. */ + cur_address = info.GetEndAddress(); + ++it; + } + + /* If there's nothing mapped, we've nothing to do. */ + R_SUCCEED_IF(mapped_size == 0); + } + + /* Make a page group for the unmap region. */ + KPageGroup pg(this->block_info_manager); + { + auto &impl = this->GetImpl(); + + /* Begin traversal. */ + TraversalContext context; + TraversalEntry cur_entry = {}; + bool cur_valid = false; + TraversalEntry next_entry; + bool next_valid; + size_t tot_size = 0; + + next_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), cur_address); + next_entry.block_size = (next_entry.block_size - (GetInteger(next_entry.phys_addr) & (next_entry.block_size - 1))); + + /* Iterate, building the group. */ + while (true) { + if ((!next_valid && !cur_valid) || (next_valid && cur_valid && next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) { + cur_entry.block_size += next_entry.block_size; + } else { + if (cur_valid) { + MESOSPHERE_ABORT_UNLESS(IsHeapPhysicalAddress(cur_entry.phys_addr)); + R_TRY(pg.AddBlock(GetHeapVirtualAddress(cur_entry.phys_addr), cur_entry.block_size / PageSize)); + } + + /* Update tracking variables. */ + tot_size += cur_entry.block_size; + cur_entry = next_entry; + cur_valid = next_valid; + } + + if (cur_entry.block_size + tot_size >= size) { + break; + } + + next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + } + + /* Add the last block. */ + if (cur_valid) { + MESOSPHERE_ABORT_UNLESS(IsHeapPhysicalAddress(cur_entry.phys_addr)); + R_TRY(pg.AddBlock(GetHeapVirtualAddress(cur_entry.phys_addr), (size - tot_size) / PageSize)); + } + } + MESOSPHERE_ASSERT(pg.GetNumPages() == mapped_size / PageSize); + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Open a reference to the pages, we're unmapping, and close the reference when we're done. */ + pg.Open(); + ON_SCOPE_EXIT { pg.Close(); }; + + /* Reset the current tracking address, and make sure we clean up on failure. */ + cur_address = address; + auto remap_guard = SCOPE_GUARD { + if (cur_address > address) { + const KProcessAddress last_map_address = cur_address - 1; + cur_address = address; + + /* Iterate over the memory we unmapped. */ + auto it = this->memory_block_manager.FindIterator(cur_address); + auto pg_it = pg.begin(); + KPhysicalAddress pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress()); + size_t pg_pages = pg_it->GetNumPages(); + + while (true) { + /* Get the memory info for the pages we unmapped, convert to property. */ + const KMemoryInfo info = it->GetMemoryInfo(); + const KPageProperties prev_properties = { info.GetPermission(), false, false, false }; + + /* If the memory is normal, we unmapped it and need to re-map it. */ + if (info.GetState() == KMemoryState_Normal) { + /* Determine the range to map. */ + size_t map_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_map_address + 1 - cur_address) / PageSize; + + /* While we have pages to map, map them. */ + while (map_pages > 0) { + /* Check if we're at the end of the physical block. */ + if (pg_pages == 0) { + /* Ensure there are more pages to map. */ + MESOSPHERE_ABORT_UNLESS(pg_it != pg.end()); + + /* Advance our physical block. */ + ++pg_it; + pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress()); + pg_pages = pg_it->GetNumPages(); + } + + /* Map whatever we can. */ + const size_t cur_pages = std::min(pg_pages, map_pages); + MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_pages, pg_phys_addr, true, prev_properties, OperationType_Map, true)); + + /* Advance. */ + cur_address += cur_pages * PageSize; + map_pages -= cur_pages; + + pg_phys_addr += cur_pages * PageSize; + pg_pages -= cur_pages; + } + } + + /* Check if we're done. */ + if (last_map_address <= info.GetLastAddress()) { + break; + } + + /* Advance. */ + ++it; + } + } + }; + + /* Iterate over the memory, unmapping as we go. */ + auto it = this->memory_block_manager.FindIterator(cur_address); + while (true) { + /* Check that the iterator is valid. */ + MESOSPHERE_ASSERT(it != this->memory_block_manager.end()); + + /* Get the memory info. */ + const KMemoryInfo info = it->GetMemoryInfo(); + + /* If the memory state is normal, we need to unmap it. */ + if (info.GetState() == KMemoryState_Normal) { + /* Determine the range to unmap. */ + const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, false }; + const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_address + 1 - cur_address) / PageSize; + + /* Unmap. */ + R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_pages, Null, false, unmap_properties, OperationType_Unmap, false)); + } + + /* Check if we're done. */ + if (last_address <= info.GetLastAddress()) { + break; + } + + /* Advance. */ + cur_address = info.GetEndAddress(); + ++it; + } + + /* Release the memory resource. */ + this->mapped_physical_memory_size -= mapped_size; + GetCurrentProcess().ReleaseResource(ams::svc::LimitableResource_PhysicalMemoryMax, mapped_size); + + /* Update memory blocks. */ + this->memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None); + + /* We succeeded. */ + remap_guard.Cancel(); + return ResultSuccess(); } Result KPageTableBase::MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) { diff --git a/libraries/libmesosphere/source/kern_k_process.cpp b/libraries/libmesosphere/source/kern_k_process.cpp index cb47993fa..5275df1a1 100644 --- a/libraries/libmesosphere/source/kern_k_process.cpp +++ b/libraries/libmesosphere/source/kern_k_process.cpp @@ -977,7 +977,7 @@ namespace ams::kern { } } - void KProcess::SetPreemptionState() { + void KProcess::PinCurrentThread() { MESOSPHERE_UNIMPLEMENTED(); } diff --git a/libraries/libmesosphere/source/kern_k_scheduler.cpp b/libraries/libmesosphere/source/kern_k_scheduler.cpp index 392a385ca..b1f0b96cd 100644 --- a/libraries/libmesosphere/source/kern_k_scheduler.cpp +++ b/libraries/libmesosphere/source/kern_k_scheduler.cpp @@ -118,11 +118,11 @@ namespace ams::kern { for (size_t core_id = 0; core_id < cpu::NumCores; core_id++) { KThread *top_thread = priority_queue.GetScheduledFront(core_id); if (top_thread != nullptr) { - /* If the thread has no waiters, we need to check if the process has a thread pinned by PreemptionState. */ + /* If the thread has no waiters, we need to check if the process has a thread pinned. */ if (top_thread->GetNumKernelWaiters() == 0) { if (KProcess *parent = top_thread->GetOwnerProcess(); parent != nullptr) { - if (KThread *suggested = parent->GetPreemptionStatePinnedThread(core_id); suggested != nullptr && suggested != top_thread) { - /* We prefer our parent's pinned thread possible. However, we also don't want to schedule un-runnable threads. */ + if (KThread *suggested = parent->GetPinnedThread(core_id); suggested != nullptr && suggested != top_thread && suggested->GetNumKernelWaiters() == 0) { + /* We prefer our parent's pinned thread if possible. However, we also don't want to schedule un-runnable threads. */ if (suggested->GetRawState() == KThread::ThreadState_Runnable) { top_thread = suggested; } else { @@ -261,6 +261,7 @@ namespace ams::kern { /* Set the new Thread Local region. */ cpu::SwitchThreadLocalRegion(GetInteger(next_thread->GetThreadLocalRegionAddress())); + SetCurrentThreadLocalRegion(next_thread->GetThreadLocalRegionHeapAddress()); } void KScheduler::ClearPreviousThread(KThread *thread) { diff --git a/libraries/libmesosphere/source/kern_k_thread.cpp b/libraries/libmesosphere/source/kern_k_thread.cpp index 9ec2802ca..6a050f2a1 100644 --- a/libraries/libmesosphere/source/kern_k_thread.cpp +++ b/libraries/libmesosphere/source/kern_k_thread.cpp @@ -327,7 +327,7 @@ namespace ams::kern { /* Release user exception, if relevant. */ if (this->parent != nullptr) { this->parent->ReleaseUserException(this); - if (this->parent->GetPreemptionStatePinnedThread(GetCurrentCoreId()) == this) { + if (this->parent->GetPinnedThread(GetCurrentCoreId()) == this) { /* TODO: this->parent->UnpinCurrentThread(); */ MESOSPHERE_UNIMPLEMENTED(); } diff --git a/libraries/libmesosphere/source/svc/kern_svc_exception.cpp b/libraries/libmesosphere/source/svc/kern_svc_exception.cpp index 77f24df2d..6932b6b50 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_exception.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_exception.cpp @@ -23,7 +23,7 @@ namespace ams::kern::svc { void Break(ams::svc::BreakReason break_reason, uintptr_t address, size_t size) { /* Log for debug that Break was called. */ - MESOSPHERE_LOG("%s: Break(%08x)\n", GetCurrentProcess().GetName(), static_cast(break_reason)); + MESOSPHERE_LOG("%s: Break(%08x, %016lx, %zu)\n", GetCurrentProcess().GetName(), static_cast(break_reason), address, size); /* If the current process is attached to debugger, notify it. */ if (GetCurrentProcess().IsAttachedToDebugger()) { @@ -36,7 +36,10 @@ namespace ams::kern::svc { } /* TODO */ - MESOSPHERE_UNIMPLEMENTED(); + if (size == sizeof(u32)) { + MESOSPHERE_LOG("DEBUG: %08x\n", *reinterpret_cast(address)); + } + MESOSPHERE_PANIC("Break was called\n"); } }