diff --git a/Kernel/Arch/i386/CPU.cpp b/Kernel/Arch/i386/CPU.cpp index 8af17c60273..67848f1422f 100644 --- a/Kernel/Arch/i386/CPU.cpp +++ b/Kernel/Arch/i386/CPU.cpp @@ -1463,6 +1463,7 @@ u32 Processor::init_context(Thread& thread, bool leave_crit) TrapFrame& trap = *reinterpret_cast(stack_top); trap.regs = &iretframe; trap.prev_irq_level = 0; + trap.next_trap = nullptr; stack_top -= sizeof(u32); // pointer to TrapFrame *reinterpret_cast(stack_top) = stack_top + 4; @@ -1612,6 +1613,15 @@ void Processor::enter_trap(TrapFrame& trap, bool raise_irq) trap.prev_irq_level = m_in_irq; if (raise_irq) m_in_irq++; + if (m_current_thread) { + auto& current_trap = m_current_thread->current_trap(); + trap.next_trap = current_trap; + current_trap = &trap; + // The cs register of this trap tells us where we will return back to + m_current_thread->set_previous_mode(((trap.regs->cs & 3) != 0) ? Thread::PreviousMode::UserMode : Thread::PreviousMode::KernelMode); + } else { + trap.next_trap = nullptr; + } } void Processor::exit_trap(TrapFrame& trap) @@ -1624,6 +1634,21 @@ void Processor::exit_trap(TrapFrame& trap) if (!m_in_irq && !m_in_critical) check_invoke_scheduler(); + + if (m_current_thread) { + auto& current_trap = m_current_thread->current_trap(); + current_trap = trap.next_trap; + if (current_trap) { + // If we have another higher level trap then we probably returned + // from an interrupt or irq handler. The cs register of the + // new/higher level trap tells us what the mode prior to it was + m_current_thread->set_previous_mode(((current_trap->regs->cs & 3) != 0) ? Thread::PreviousMode::UserMode : Thread::PreviousMode::KernelMode); + } else { + // If we don't have a higher level trap then we're back in user mode. + // Unless we're a kernel process, in which case we're always in kernel mode + m_current_thread->set_previous_mode(m_current_thread->process().is_kernel_process() ? Thread::PreviousMode::KernelMode : Thread::PreviousMode::UserMode); + } + } } void Processor::check_invoke_scheduler() diff --git a/Kernel/Arch/i386/CPU.h b/Kernel/Arch/i386/CPU.h index 4830aa77645..0595912189d 100644 --- a/Kernel/Arch/i386/CPU.h +++ b/Kernel/Arch/i386/CPU.h @@ -1078,6 +1078,7 @@ private: struct TrapFrame { u32 prev_irq_level; + TrapFrame* next_trap; RegisterState* regs; // must be last TrapFrame() = delete; @@ -1087,7 +1088,7 @@ struct TrapFrame { TrapFrame& operator=(TrapFrame&&) = delete; }; -#define TRAP_FRAME_SIZE (2 * 4) +#define TRAP_FRAME_SIZE (3 * 4) static_assert(TRAP_FRAME_SIZE == sizeof(TrapFrame)); extern "C" void enter_trap_no_irq(TrapFrame*); diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp index 4c666691831..f949f604620 100644 --- a/Kernel/Scheduler.cpp +++ b/Kernel/Scheduler.cpp @@ -369,10 +369,12 @@ bool Scheduler::context_switch(Thread* thread) ASSERT(thread == Thread::current()); #if ARCH(I386) - auto iopl = get_iopl_from_eflags(Thread::current()->get_register_dump_from_stack().eflags); - if (thread->process().is_user_process() && iopl != 0) { - dbgln("PANIC: Switched to thread {} with non-zero IOPL={}", Thread::current()->tid().value(), iopl); - Processor::halt(); + if (thread->process().is_user_process()) { + auto iopl = get_iopl_from_eflags(Thread::current()->get_register_dump_from_stack().eflags); + if (iopl != 0) { + dbgln("PANIC: Switched to thread {} with non-zero IOPL={}", Thread::current()->tid().value(), iopl); + Processor::halt(); + } } #endif @@ -392,7 +394,7 @@ void Scheduler::enter_current(Thread& prev_thread, bool is_first) // Check if we have any signals we should deliver (even if we don't // end up switching to another thread). auto current_thread = Thread::current(); - if (!current_thread->is_in_block()) { + if (!current_thread->is_in_block() && current_thread->previous_mode() != Thread::PreviousMode::KernelMode) { ScopedSpinLock lock(current_thread->get_lock()); if (current_thread->state() == Thread::Running && current_thread->pending_signals_for_state()) { current_thread->dispatch_one_pending_signal(); @@ -485,6 +487,10 @@ void Scheduler::timer_tick(const RegisterState& regs) if (!current_thread) return; + // Sanity checks + ASSERT(current_thread->current_trap()); + ASSERT(current_thread->current_trap()->regs == ®s); + bool is_bsp = Processor::current().id() == 0; if (!is_bsp) return; // TODO: This prevents scheduling on other CPUs! diff --git a/Kernel/Syscall.cpp b/Kernel/Syscall.cpp index 20416753c21..279cc98f10c 100644 --- a/Kernel/Syscall.cpp +++ b/Kernel/Syscall.cpp @@ -137,6 +137,7 @@ void syscall_handler(TrapFrame* trap) { auto& regs = *trap->regs; auto current_thread = Thread::current(); + ASSERT(current_thread->previous_mode() == Thread::PreviousMode::UserMode); auto& process = current_thread->process(); if (auto tracer = process.tracer(); tracer && tracer->is_tracing_syscalls()) { @@ -206,6 +207,9 @@ void syscall_handler(TrapFrame* trap) current_thread->check_dispatch_pending_signal(); + // If the previous mode somehow changed something is seriously messed up... + ASSERT(current_thread->previous_mode() == Thread::PreviousMode::UserMode); + // Check if we're supposed to return to userspace or just die. current_thread->die_if_needed(); diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index 3aa69b1d0f6..17f8748b36d 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -699,6 +699,8 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal) return DispatchSignalResult::Deferred; } + ASSERT(previous_mode() == PreviousMode::UserMode); + auto& action = m_signal_action_data[signal]; // FIXME: Implement SA_SIGINFO signal handlers. ASSERT(!(action.flags & SA_SIGINFO)); @@ -762,6 +764,9 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal) return DispatchSignalResult::Continue; } + ASSERT(previous_mode() == PreviousMode::UserMode); + ASSERT(current_trap()); + ProcessPagingScope paging_scope(m_process); u32 old_signal_mask = m_signal_mask; @@ -843,7 +848,19 @@ bool Thread::push_value_on_stack(FlatPtr value) RegisterState& Thread::get_register_dump_from_stack() { - return *(RegisterState*)(kernel_stack_top() - sizeof(RegisterState)); + auto* trap = current_trap(); + + // We should *always* have a trap. If we don't we're probably a kernel + // thread that hasn't been pre-empted. If we want to support this, we + // need to capture the registers probably into m_tss and return it + ASSERT(trap); + + while (trap) { + if (!trap->next_trap) + break; + trap = trap->next_trap; + } + return *trap->regs; } RefPtr Thread::clone(Process& process) diff --git a/Kernel/Thread.h b/Kernel/Thread.h index 99530bc1b59..94cda9b7c29 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -1106,6 +1106,14 @@ public: u32 ticks_in_user() const { return m_ticks_in_user; } u32 ticks_in_kernel() const { return m_ticks_in_kernel; } + enum class PreviousMode : u8 { + KernelMode = 0, + UserMode + }; + PreviousMode previous_mode() const { return m_previous_mode; } + void set_previous_mode(PreviousMode mode) { m_previous_mode = mode; } + TrapFrame*& current_trap() { return m_current_trap; } + RecursiveSpinLock& get_lock() const { return m_lock; } #if LOCK_DEBUG @@ -1230,6 +1238,7 @@ private: NonnullRefPtr m_process; ThreadID m_tid { -1 }; TSS32 m_tss; + TrapFrame* m_current_trap { nullptr }; Atomic m_cpu { 0 }; u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT }; u32 m_ticks_left { 0 }; @@ -1261,6 +1270,7 @@ private: Atomic m_is_active { false }; bool m_is_joinable { true }; bool m_handling_page_fault { false }; + PreviousMode m_previous_mode { PreviousMode::UserMode }; unsigned m_syscall_count { 0 }; unsigned m_inode_faults { 0 };