mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2025-06-10 18:10:56 +09:00
Kernel: Some futex improvements
This adds support for FUTEX_WAKE_OP, FUTEX_WAIT_BITSET, FUTEX_WAKE_BITSET, FUTEX_REQUEUE, and FUTEX_CMP_REQUEUE, as well well as global and private futex and absolute/relative timeouts against the appropriate clock. This also changes the implementation so that kernel resources are only used when a thread is blocked on a futex. Global futexes are implemented as offsets in VMObjects, so that different processes can share a futex against the same VMObject despite potentially being mapped at different virtual addresses.
This commit is contained in:
parent
7581b64705
commit
1d621ab172
Notes:
sideshowbarker
2024-07-18 23:08:10 +09:00
Author: https://github.com/tomuta
Commit: 1d621ab172
Pull-request: https://github.com/SerenityOS/serenity/pull/4467
Reviewed-by: https://github.com/awesomekling
Reviewed-by: https://github.com/bgianfo
Reviewed-by: https://github.com/bugaevc
23 changed files with 928 additions and 63 deletions
|
@ -24,17 +24,83 @@
|
|||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <AK/Singleton.h>
|
||||
#include <AK/Time.h>
|
||||
#include <Kernel/Process.h>
|
||||
#include <Kernel/VM/MemoryManager.h>
|
||||
|
||||
//#define FUTEX_DEBUG
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
WaitQueue& Process::futex_queue(Userspace<const i32*> userspace_address)
|
||||
static SpinLock<u8> g_global_futex_lock;
|
||||
static AK::Singleton<HashMap<VMObject*, FutexQueues>> g_global_futex_queues;
|
||||
|
||||
FutexQueue::FutexQueue(FlatPtr user_address_or_offset, VMObject* vmobject)
|
||||
: m_user_address_or_offset(user_address_or_offset)
|
||||
, m_is_global(vmobject != nullptr)
|
||||
{
|
||||
auto& queue = m_futex_queues.ensure(userspace_address.ptr());
|
||||
if (!queue)
|
||||
queue = make<WaitQueue>();
|
||||
return *queue;
|
||||
#ifdef FUTEX_DEBUG
|
||||
dbg() << "Futex @ " << this << (m_is_global ? " (global)" : "(local)");
|
||||
#endif
|
||||
if (m_is_global) {
|
||||
// Only register for global futexes
|
||||
m_vmobject = vmobject->make_weak_ptr();
|
||||
vmobject->register_on_deleted_handler(*this);
|
||||
}
|
||||
}
|
||||
|
||||
FutexQueue::~FutexQueue()
|
||||
{
|
||||
if (m_is_global) {
|
||||
if (auto vmobject = m_vmobject.strong_ref())
|
||||
vmobject->unregister_on_deleted_handler(*this);
|
||||
}
|
||||
#ifdef FUTEX_DEBUG
|
||||
dbg() << "~Futex @ " << this << (m_is_global ? " (global)" : "(local)");
|
||||
#endif
|
||||
}
|
||||
|
||||
void FutexQueue::vmobject_deleted(VMObject& vmobject)
|
||||
{
|
||||
ASSERT(m_is_global); // If we got called we must be a global futex
|
||||
// Because we're taking ourselves out of the global queue, we need
|
||||
// to make sure we have at last a reference until we're done
|
||||
NonnullRefPtr<FutexQueue> own_ref(*this);
|
||||
|
||||
#ifdef FUTEX_DEBUG
|
||||
dbg() << "Futex::vmobject_deleted @ " << this << (m_is_global ? " (global)" : "(local)");
|
||||
#endif
|
||||
|
||||
// Because this is called from the VMObject's destructor, getting a
|
||||
// strong_ref in this function is unsafe!
|
||||
m_vmobject = nullptr; // Just to be safe...
|
||||
|
||||
{
|
||||
ScopedSpinLock lock(g_global_futex_lock);
|
||||
g_global_futex_queues->remove(&vmobject);
|
||||
}
|
||||
|
||||
bool did_wake_all;
|
||||
auto wake_count = wake_all(did_wake_all);
|
||||
#ifdef FUTEX_DEBUG
|
||||
if (wake_count > 0)
|
||||
dbg() << "Futex: @ " << this << " unblocked " << wake_count << " waiters due to vmobject free";
|
||||
#else
|
||||
(void)wake_count;
|
||||
#endif
|
||||
ASSERT(did_wake_all); // No one should be left behind...
|
||||
}
|
||||
|
||||
void Process::clear_futex_queues_on_exec()
|
||||
{
|
||||
ScopedSpinLock lock(m_futex_lock);
|
||||
for (auto& it : m_futex_queues) {
|
||||
bool did_wake_all;
|
||||
it.value->wake_all(did_wake_all);
|
||||
ASSERT(did_wake_all); // No one should be left behind...
|
||||
}
|
||||
m_futex_queues.clear();
|
||||
}
|
||||
|
||||
int Process::sys$futex(Userspace<const Syscall::SC_futex_params*> user_params)
|
||||
|
@ -45,42 +111,270 @@ int Process::sys$futex(Userspace<const Syscall::SC_futex_params*> user_params)
|
|||
if (!copy_from_user(¶ms, user_params))
|
||||
return -EFAULT;
|
||||
|
||||
switch (params.futex_op) {
|
||||
case FUTEX_WAIT: {
|
||||
i32 user_value;
|
||||
if (!copy_from_user(&user_value, params.userspace_address))
|
||||
return -EFAULT;
|
||||
if (user_value != params.val)
|
||||
return -EAGAIN;
|
||||
|
||||
Thread::BlockTimeout timeout;
|
||||
Thread::BlockTimeout timeout;
|
||||
u32 cmd = params.futex_op & FUTEX_CMD_MASK;
|
||||
switch (cmd) {
|
||||
case FUTEX_WAIT:
|
||||
case FUTEX_WAIT_BITSET:
|
||||
case FUTEX_REQUEUE:
|
||||
case FUTEX_CMP_REQUEUE: {
|
||||
if (params.timeout) {
|
||||
timespec ts_abstimeout { 0, 0 };
|
||||
if (!copy_from_user(&ts_abstimeout, params.timeout))
|
||||
timespec ts_stimeout { 0, 0 };
|
||||
if (!copy_from_user(&ts_stimeout, params.timeout))
|
||||
return -EFAULT;
|
||||
timeout = Thread::BlockTimeout(true, &ts_abstimeout);
|
||||
clockid_t clock_id = (params.futex_op & FUTEX_CLOCK_REALTIME) ? CLOCK_REALTIME_COARSE : CLOCK_MONOTONIC_COARSE;
|
||||
bool is_absolute = cmd != FUTEX_WAIT;
|
||||
timeout = Thread::BlockTimeout(is_absolute, &ts_stimeout, nullptr, clock_id);
|
||||
}
|
||||
if (cmd == FUTEX_WAIT_BITSET && params.val3 == FUTEX_BITSET_MATCH_ANY)
|
||||
cmd = FUTEX_WAIT;
|
||||
break;
|
||||
case FUTEX_WAKE_BITSET:
|
||||
if (params.val3 == FUTEX_BITSET_MATCH_ANY)
|
||||
cmd = FUTEX_WAKE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
WaitQueue& wait_queue = futex_queue((FlatPtr)params.userspace_address);
|
||||
Thread::BlockResult result = wait_queue.wait_on(timeout, "Futex");
|
||||
if (result == Thread::BlockResult::InterruptedByTimeout) {
|
||||
bool is_private = (params.futex_op & FUTEX_PRIVATE_FLAG) != 0;
|
||||
auto& queue_lock = is_private ? m_futex_lock : g_global_futex_lock;
|
||||
auto user_address_or_offset = FlatPtr(params.userspace_address);
|
||||
auto user_address_or_offset2 = FlatPtr(params.userspace_address2);
|
||||
|
||||
// If this is a global lock, look up the underlying VMObject *before*
|
||||
// acquiring the queue lock
|
||||
RefPtr<VMObject> vmobject, vmobject2;
|
||||
if (!is_private) {
|
||||
if (!Kernel::is_user_range(VirtualAddress(user_address_or_offset), sizeof(u32)))
|
||||
return -EFAULT;
|
||||
auto region = MM.find_region_from_vaddr(*Process::current(), VirtualAddress(user_address_or_offset));
|
||||
if (!region)
|
||||
return -EFAULT;
|
||||
vmobject = region->vmobject();
|
||||
user_address_or_offset = region->offset_in_vmobject_from_vaddr(VirtualAddress(user_address_or_offset));
|
||||
|
||||
switch (cmd) {
|
||||
case FUTEX_REQUEUE:
|
||||
case FUTEX_CMP_REQUEUE:
|
||||
case FUTEX_WAKE_OP: {
|
||||
if (!Kernel::is_user_range(VirtualAddress(user_address_or_offset2), sizeof(u32)))
|
||||
return -EFAULT;
|
||||
auto region2 = MM.find_region_from_vaddr(*Process::current(), VirtualAddress(user_address_or_offset2));
|
||||
if (!region2)
|
||||
return -EFAULT;
|
||||
vmobject2 = region2->vmobject();
|
||||
user_address_or_offset2 = region->offset_in_vmobject_from_vaddr(VirtualAddress(user_address_or_offset2));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto find_global_futex_queues = [&](VMObject& vmobject, bool create_if_not_found) -> FutexQueues* {
|
||||
auto& global_queues = *g_global_futex_queues;
|
||||
auto it = global_queues.find(&vmobject);
|
||||
if (it != global_queues.end())
|
||||
return &it->value;
|
||||
if (create_if_not_found) {
|
||||
// TODO: is there a better way than setting and finding it again?
|
||||
auto result = global_queues.set(&vmobject, {});
|
||||
ASSERT(result == AK::HashSetResult::InsertedNewEntry);
|
||||
it = global_queues.find(&vmobject);
|
||||
ASSERT(it != global_queues.end());
|
||||
return &it->value;
|
||||
}
|
||||
return nullptr;
|
||||
};
|
||||
|
||||
auto find_futex_queue = [&](VMObject* vmobject, FlatPtr user_address_or_offset, bool create_if_not_found) -> RefPtr<FutexQueue> {
|
||||
ASSERT(is_private || vmobject);
|
||||
auto* queues = is_private ? &m_futex_queues : find_global_futex_queues(*vmobject, create_if_not_found);
|
||||
if (!queues)
|
||||
return {};
|
||||
auto it = queues->find(user_address_or_offset);
|
||||
if (it != queues->end())
|
||||
return it->value;
|
||||
if (create_if_not_found) {
|
||||
auto futex_queue = adopt(*new FutexQueue(user_address_or_offset, vmobject));
|
||||
auto result = queues->set(user_address_or_offset, futex_queue);
|
||||
ASSERT(result == AK::HashSetResult::InsertedNewEntry);
|
||||
return futex_queue;
|
||||
}
|
||||
return {};
|
||||
};
|
||||
|
||||
auto remove_futex_queue = [&](VMObject* vmobject, FlatPtr user_address_or_offset) {
|
||||
auto* queues = is_private ? &m_futex_queues : find_global_futex_queues(*vmobject, false);
|
||||
if (queues) {
|
||||
queues->remove(user_address_or_offset);
|
||||
if (!is_private && queues->is_empty())
|
||||
g_global_futex_queues->remove(vmobject);
|
||||
}
|
||||
};
|
||||
|
||||
auto do_wake = [&](VMObject* vmobject, FlatPtr user_address_or_offset, u32 count, Optional<u32> bitmask) -> int {
|
||||
if (count == 0)
|
||||
return 0;
|
||||
auto futex_queue = find_futex_queue(vmobject, user_address_or_offset, false);
|
||||
if (!futex_queue)
|
||||
return 0;
|
||||
bool is_empty;
|
||||
u32 woke_count = futex_queue->wake_n(count, bitmask, is_empty);
|
||||
if (is_empty) {
|
||||
// If there are no more waiters, we want to get rid of the futex!
|
||||
remove_futex_queue(vmobject, user_address_or_offset);
|
||||
}
|
||||
return (int)woke_count;
|
||||
};
|
||||
|
||||
ScopedSpinLock lock(queue_lock);
|
||||
|
||||
auto do_wait = [&](u32 bitset) -> int {
|
||||
auto user_value = user_atomic_load_relaxed(params.userspace_address);
|
||||
if (!user_value.has_value())
|
||||
return -EFAULT;
|
||||
if (user_value.value() != params.val) {
|
||||
dbg() << "futex wait: EAGAIN. user value: " << (void*)user_value.value() << " @ " << (void*)params.userspace_address << " != val: " << params.val;
|
||||
return -EAGAIN;
|
||||
}
|
||||
atomic_thread_fence(AK::MemoryOrder::memory_order_acquire);
|
||||
|
||||
auto futex_queue = find_futex_queue(vmobject.ptr(), user_address_or_offset, true);
|
||||
ASSERT(futex_queue);
|
||||
|
||||
// We need to release the lock before blocking. But we have a reference
|
||||
// to the FutexQueue so that we can keep it alive.
|
||||
lock.unlock();
|
||||
|
||||
Thread::BlockResult block_result = futex_queue->wait_on(timeout, bitset);
|
||||
|
||||
lock.lock();
|
||||
if (futex_queue->is_empty()) {
|
||||
// If there are no more waiters, we want to get rid of the futex!
|
||||
remove_futex_queue(vmobject, user_address_or_offset);
|
||||
}
|
||||
if (block_result == Thread::BlockResult::InterruptedByTimeout) {
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
return 0;
|
||||
};
|
||||
|
||||
break;
|
||||
}
|
||||
case FUTEX_WAKE:
|
||||
if (params.val == 0)
|
||||
return 0;
|
||||
if (params.val == 1) {
|
||||
futex_queue((FlatPtr)params.userspace_address).wake_one();
|
||||
} else {
|
||||
futex_queue((FlatPtr)params.userspace_address).wake_n(params.val);
|
||||
auto do_requeue = [&](Optional<u32> val3) -> int {
|
||||
auto user_value = user_atomic_load_relaxed(params.userspace_address);
|
||||
if (!user_value.has_value())
|
||||
return -EFAULT;
|
||||
if (val3.has_value() && val3.value() != user_value.value())
|
||||
return -EAGAIN;
|
||||
atomic_thread_fence(AK::MemoryOrder::memory_order_acquire);
|
||||
|
||||
int woken_or_requeued = 0;
|
||||
if (auto futex_queue = find_futex_queue(vmobject.ptr(), user_address_or_offset, false)) {
|
||||
RefPtr<FutexQueue> target_futex_queue;
|
||||
bool is_empty, is_target_empty;
|
||||
woken_or_requeued = futex_queue->wake_n_requeue(
|
||||
params.val, [&]() -> FutexQueue* {
|
||||
// NOTE: futex_queue's lock is being held while this callback is called
|
||||
// The reason we're doing this in a callback is that we don't want to always
|
||||
// create a target queue, only if we actually have anything to move to it!
|
||||
target_futex_queue = find_futex_queue(vmobject2.ptr(), user_address_or_offset2, true);
|
||||
return target_futex_queue.ptr();
|
||||
},
|
||||
params.val2, is_empty, is_target_empty);
|
||||
if (is_empty)
|
||||
remove_futex_queue(vmobject, user_address_or_offset);
|
||||
if (is_target_empty && target_futex_queue)
|
||||
remove_futex_queue(vmobject2, user_address_or_offset2);
|
||||
}
|
||||
break;
|
||||
return woken_or_requeued;
|
||||
};
|
||||
|
||||
switch (cmd) {
|
||||
case FUTEX_WAIT:
|
||||
return do_wait(0);
|
||||
|
||||
case FUTEX_WAKE:
|
||||
return do_wake(vmobject.ptr(), user_address_or_offset, params.val, {});
|
||||
|
||||
case FUTEX_WAKE_OP: {
|
||||
Optional<u32> oldval;
|
||||
u32 op_arg = _FUTEX_OP_ARG(params.val3);
|
||||
auto op = _FUTEX_OP(params.val3);
|
||||
if (op & FUTEX_OP_ARG_SHIFT) {
|
||||
op_arg = 1 << op_arg;
|
||||
op &= FUTEX_OP_ARG_SHIFT;
|
||||
}
|
||||
atomic_thread_fence(AK::MemoryOrder::memory_order_release);
|
||||
switch (op) {
|
||||
case FUTEX_OP_SET:
|
||||
oldval = user_atomic_exchange_relaxed(params.userspace_address2, op_arg);
|
||||
break;
|
||||
case FUTEX_OP_ADD:
|
||||
oldval = user_atomic_fetch_add_relaxed(params.userspace_address2, op_arg);
|
||||
break;
|
||||
case FUTEX_OP_OR:
|
||||
oldval = user_atomic_fetch_or_relaxed(params.userspace_address2, op_arg);
|
||||
break;
|
||||
case FUTEX_OP_ANDN:
|
||||
oldval = user_atomic_fetch_and_not_relaxed(params.userspace_address2, op_arg);
|
||||
break;
|
||||
case FUTEX_OP_XOR:
|
||||
oldval = user_atomic_fetch_xor_relaxed(params.userspace_address2, op_arg);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!oldval.has_value())
|
||||
return -EFAULT;
|
||||
atomic_thread_fence(AK::MemoryOrder::memory_order_acquire);
|
||||
int result = do_wake(vmobject.ptr(), user_address_or_offset, params.val, {});
|
||||
if (params.val2 > 0) {
|
||||
bool compare_result;
|
||||
switch (_FUTEX_CMP(params.val3)) {
|
||||
case FUTEX_OP_CMP_EQ:
|
||||
compare_result = (oldval.value() == _FUTEX_CMP_ARG(params.val3));
|
||||
break;
|
||||
case FUTEX_OP_CMP_NE:
|
||||
compare_result = (oldval.value() != _FUTEX_CMP_ARG(params.val3));
|
||||
break;
|
||||
case FUTEX_OP_CMP_LT:
|
||||
compare_result = (oldval.value() < _FUTEX_CMP_ARG(params.val3));
|
||||
break;
|
||||
case FUTEX_OP_CMP_LE:
|
||||
compare_result = (oldval.value() <= _FUTEX_CMP_ARG(params.val3));
|
||||
break;
|
||||
case FUTEX_OP_CMP_GT:
|
||||
compare_result = (oldval.value() > _FUTEX_CMP_ARG(params.val3));
|
||||
break;
|
||||
case FUTEX_OP_CMP_GE:
|
||||
compare_result = (oldval.value() >= _FUTEX_CMP_ARG(params.val3));
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
if (compare_result)
|
||||
result += do_wake(vmobject2.ptr(), user_address_or_offset2, params.val2, {});
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
return 0;
|
||||
case FUTEX_REQUEUE:
|
||||
return do_requeue({});
|
||||
|
||||
case FUTEX_CMP_REQUEUE:
|
||||
return do_requeue(params.val3);
|
||||
|
||||
case FUTEX_WAIT_BITSET:
|
||||
ASSERT(params.val3 != FUTEX_BITSET_MATCH_ANY); // we should have turned it into FUTEX_WAIT
|
||||
if (params.val3 == 0)
|
||||
return -EINVAL;
|
||||
return do_wait(params.val3);
|
||||
|
||||
case FUTEX_WAKE_BITSET:
|
||||
ASSERT(params.val3 != FUTEX_BITSET_MATCH_ANY); // we should have turned it into FUTEX_WAKE
|
||||
if (params.val3 == 0)
|
||||
return -EINVAL;
|
||||
return do_wake(vmobject.ptr(), user_address_or_offset, params.val, params.val3);
|
||||
}
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue