1
0
Fork 0
mirror of https://github.com/LadybirdBrowser/ladybird.git synced 2025-06-10 18:10:56 +09:00

Kernel: Retire SchedulerData and add Thread lookup table

This allows us to get rid of the thread lists in SchedulerData.
Also, instead of iterating over all threads to find a thread by id,
just use a lookup table. In the rare case of having to iterate over
all threads, just iterate the lookup table.
This commit is contained in:
Tom 2021-01-27 22:58:24 -07:00 committed by Andreas Kling
parent e55d227f93
commit d5472426ec
Notes: sideshowbarker 2024-07-18 22:47:19 +09:00
5 changed files with 57 additions and 153 deletions

View file

@ -53,15 +53,8 @@ public:
bool m_in_scheduler { true };
};
SchedulerData* g_scheduler_data;
RecursiveSpinLock g_scheduler_lock;
void Scheduler::init_thread(Thread& thread)
{
ASSERT(g_scheduler_data);
g_scheduler_data->m_nonrunnable_threads.append(thread);
}
static u32 time_slice_for(const Thread& thread)
{
// One time slice unit == 4ms (assuming 250 ticks/second)
@ -238,36 +231,29 @@ bool Scheduler::pick_next()
}
if constexpr (SCHEDULER_RUNNABLE_DEBUG) {
dbgln("Scheduler[{}j]: Non-runnables:", Processor::id());
Scheduler::for_each_nonrunnable([&](Thread& thread) -> IterationDecision {
if (thread.state() == Thread::Dying) {
dbgln("Scheduler thread list:", Processor::id());
Thread::for_each([&](Thread& thread) -> IterationDecision {
switch (thread.state()) {
case Thread::Dying:
dbgln(" {:12} {} @ {:04x}:{:08x} Finalizable: {}",
thread.state_string(),
thread,
thread.tss().cs,
thread.tss().eip,
thread.is_finalizable());
} else {
dbgln(" {:12} {} @ {:04x}:{:08x}",
break;
default:
dbgln(" {:12} Pr:{:2} {} @ {:04x}:{:08x}",
thread.state_string(),
thread.priority(),
thread,
thread.tss().cs,
thread.tss().eip);
break;
}
return IterationDecision::Continue;
});
dbgln("Scheduler[{}j]: Runnables:", Processor::id());
Scheduler::for_each_runnable([](Thread& thread) -> IterationDecision {
dbgln(" {:2} {:12} @ {:04x}:{:08x}",
thread.priority(),
thread.state_string(),
thread.tss().cs,
thread.tss().eip);
return IterationDecision::Continue;
});
}
auto pending_beneficiary = scheduler_data.m_pending_beneficiary.strong_ref();
@ -507,7 +493,6 @@ void Scheduler::initialize()
ASSERT(&Processor::current() != nullptr); // sanity check
RefPtr<Thread> idle_thread;
g_scheduler_data = new SchedulerData;
g_finalizer_wait_queue = new WaitQueue;
g_ready_queues = new ThreadReadyQueue[g_ready_queue_buckets];

View file

@ -40,12 +40,10 @@ class Process;
class Thread;
class WaitQueue;
struct RegisterState;
struct SchedulerData;
extern Thread* g_finalizer;
extern WaitQueue* g_finalizer_wait_queue;
extern Atomic<bool> g_finalizer_has_work;
extern SchedulerData* g_scheduler_data;
extern RecursiveSpinLock g_scheduler_lock;
class Scheduler {
@ -73,14 +71,6 @@ public:
static Thread& pull_next_runnable_thread();
static bool dequeue_runnable_thread(Thread&, bool = false);
static void queue_runnable_thread(Thread&);
template<typename Callback>
static inline IterationDecision for_each_runnable(Callback);
template<typename Callback>
static inline IterationDecision for_each_nonrunnable(Callback);
static void init_thread(Thread& thread);
};
}

View file

@ -45,6 +45,14 @@
namespace Kernel {
SpinLock<u8> Thread::g_tid_map_lock;
HashMap<ThreadID, Thread*>* Thread::g_tid_map;
void Thread::initialize()
{
g_tid_map = new HashMap<ThreadID, Thread*>();
}
Thread::Thread(NonnullRefPtr<Process> process)
: m_process(move(process))
, m_name(m_process->name())
@ -59,6 +67,11 @@ Thread::Thread(NonnullRefPtr<Process> process)
} else {
m_tid = Process::allocate_pid().value();
}
{
ScopedSpinLock lock(g_tid_map_lock);
auto result = g_tid_map->set(m_tid, this);
ASSERT(result == AK::HashSetResult::InsertedNewEntry);
}
if constexpr (THREAD_DEBUG)
dbgln("Created new thread {}({}:{})", m_process->name(), m_process->pid().value(), m_tid.value());
set_default_signal_dispositions();
@ -115,9 +128,6 @@ Thread::Thread(NonnullRefPtr<Process> process)
// thread is ready to be cleaned up.
ref();
guard.disarm();
if (m_process->pid() != 0)
Scheduler::init_thread(*this);
}
Thread::~Thread()
@ -131,11 +141,14 @@ Thread::~Thread()
// the middle of being destroyed.
ScopedSpinLock lock(g_scheduler_lock);
ASSERT(!m_process_thread_list_node.is_in_list());
g_scheduler_data->thread_list_for_state(m_state).remove(*this);
// We shouldn't be queued
ASSERT(m_runnable_priority < 0);
ASSERT(!m_runnable_list_node.is_in_list());
}
{
ScopedSpinLock lock(g_tid_map_lock);
auto result = g_tid_map->remove(m_tid);
ASSERT(result);
}
}
@ -903,11 +916,6 @@ void Thread::set_state(State new_state, u8 stop_signal)
dbgln<THREAD_DEBUG>("Set thread {} state to {}", *this, state_string());
}
if (m_process->pid() != 0) {
update_state_for_thread(previous_state);
ASSERT(g_scheduler_data->has_thread(*this));
}
if (previous_state == Runnable) {
Scheduler::dequeue_runnable_thread(*this);
} else if (previous_state == Stopped) {
@ -952,24 +960,6 @@ void Thread::set_state(State new_state, u8 stop_signal)
}
}
void Thread::update_state_for_thread(Thread::State previous_state)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(g_scheduler_data);
ASSERT(g_scheduler_lock.own_lock());
auto& previous_list = g_scheduler_data->thread_list_for_state(previous_state);
auto& list = g_scheduler_data->thread_list_for_state(state());
if (&previous_list != &list) {
previous_list.remove(*this);
}
if (list.contains(*this))
return;
list.append(*this);
}
String Thread::backtrace()
{
return backtrace_impl();
@ -1093,14 +1083,12 @@ const LogStream& operator<<(const LogStream& stream, const Thread& value)
RefPtr<Thread> Thread::from_tid(ThreadID tid)
{
RefPtr<Thread> found_thread;
ScopedSpinLock lock(g_scheduler_lock);
Thread::for_each([&](auto& thread) {
if (thread.tid() == tid) {
found_thread = &thread;
return IterationDecision::Break;
}
return IterationDecision::Continue;
});
{
ScopedSpinLock lock(g_tid_map_lock);
auto it = g_tid_map->find(tid);
if (it != g_tid_map->end())
found_thread = it->value;
}
return found_thread;
}

View file

@ -27,6 +27,7 @@
#pragma once
#include <AK/Function.h>
#include <AK/HashMap.h>
#include <AK/IntrusiveList.h>
#include <AK/Optional.h>
#include <AK/OwnPtr.h>
@ -87,12 +88,17 @@ class Thread
friend class Scheduler;
friend class ThreadReadyQueue;
static SpinLock<u8> g_tid_map_lock;
static HashMap<ThreadID, Thread*>* g_tid_map;
public:
inline static Thread* current()
{
return Processor::current_thread();
}
static void initialize();
explicit Thread(NonnullRefPtr<Process>);
~Thread();
@ -1090,8 +1096,6 @@ public:
template<typename Callback>
static IterationDecision for_each_in_state(State, Callback);
template<typename Callback>
static IterationDecision for_each_living(Callback);
template<typename Callback>
static IterationDecision for_each(Callback);
[[nodiscard]] static bool is_runnable_state(Thread::State state)
@ -1166,10 +1170,8 @@ public:
private:
IntrusiveListNode m_process_thread_list_node;
IntrusiveListNode m_runnable_list_node;
int m_runnable_priority { -1 };
friend struct SchedulerData;
friend class WaitQueue;
class JoinBlockCondition : public BlockCondition {
@ -1304,100 +1306,38 @@ private:
void yield_without_holding_big_lock();
void donate_without_holding_big_lock(RefPtr<Thread>&, const char*);
void yield_while_not_holding_big_lock();
void update_state_for_thread(Thread::State previous_state);
void drop_thread_count(bool);
};
template<typename Callback>
inline IterationDecision Thread::for_each_living(Callback callback)
{
ASSERT_INTERRUPTS_DISABLED();
return Thread::for_each([callback](Thread& thread) -> IterationDecision {
if (thread.state() != Thread::State::Dead && thread.state() != Thread::State::Dying)
return callback(thread);
return IterationDecision::Continue;
});
}
template<typename Callback>
inline IterationDecision Thread::for_each(Callback callback)
{
ASSERT_INTERRUPTS_DISABLED();
ScopedSpinLock lock(g_scheduler_lock);
auto ret = Scheduler::for_each_runnable(callback);
if (ret == IterationDecision::Break)
return ret;
return Scheduler::for_each_nonrunnable(callback);
ScopedSpinLock lock(g_tid_map_lock);
for (auto& it : *g_tid_map) {
IterationDecision decision = callback(*it.value);
if (decision != IterationDecision::Continue)
return decision;
}
return IterationDecision::Continue;
}
template<typename Callback>
inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
{
ASSERT_INTERRUPTS_DISABLED();
ScopedSpinLock lock(g_scheduler_lock);
auto new_callback = [=](Thread& thread) -> IterationDecision {
if (thread.state() == state)
return callback(thread);
return IterationDecision::Continue;
};
if (is_runnable_state(state))
return Scheduler::for_each_runnable(new_callback);
return Scheduler::for_each_nonrunnable(new_callback);
ScopedSpinLock lock(g_tid_map_lock);
for (auto& it : *g_tid_map) {
auto& thread = *it.value;
if (thread.state() != state)
continue;
IterationDecision decision = callback(thread);
if (decision != IterationDecision::Continue)
return decision;
}
return IterationDecision::Continue;
}
const LogStream& operator<<(const LogStream&, const Thread&);
struct SchedulerData {
typedef IntrusiveList<Thread, &Thread::m_runnable_list_node> ThreadList;
ThreadList m_runnable_threads;
ThreadList m_nonrunnable_threads;
bool has_thread(Thread& thread) const
{
return m_runnable_threads.contains(thread) || m_nonrunnable_threads.contains(thread);
}
ThreadList& thread_list_for_state(Thread::State state)
{
if (Thread::is_runnable_state(state))
return m_runnable_threads;
return m_nonrunnable_threads;
}
};
template<typename Callback>
inline IterationDecision Scheduler::for_each_runnable(Callback callback)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(g_scheduler_lock.own_lock());
auto& tl = g_scheduler_data->m_runnable_threads;
for (auto it = tl.begin(); it != tl.end();) {
auto& thread = *it;
it = ++it;
if (callback(thread) == IterationDecision::Break)
return IterationDecision::Break;
}
return IterationDecision::Continue;
}
template<typename Callback>
inline IterationDecision Scheduler::for_each_nonrunnable(Callback callback)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(g_scheduler_lock.own_lock());
auto& tl = g_scheduler_data->m_nonrunnable_threads;
for (auto it = tl.begin(); it != tl.end();) {
auto& thread = *it;
it = ++it;
if (callback(thread) == IterationDecision::Break)
return IterationDecision::Break;
}
return IterationDecision::Continue;
}
}
template<>

View file

@ -162,6 +162,7 @@ extern "C" [[noreturn]] void init()
}
VirtualConsole::switch_to(0);
Thread::initialize();
Process::initialize();
Scheduler::initialize();