2026-03-15 22:22:58 +00:00
|
|
|
#include <kernel/address-space.h>
|
|
|
|
|
#include <kernel/futex.h>
|
2026-03-18 20:57:51 +00:00
|
|
|
#include <kernel/sched.h>
|
|
|
|
|
#include <kernel/task.h>
|
2026-03-15 22:22:58 +00:00
|
|
|
#include <mango/status.h>
|
|
|
|
|
|
2026-03-18 20:57:51 +00:00
|
|
|
#define FUTEX_CREATE 0x40u
|
2026-03-15 22:22:58 +00:00
|
|
|
|
2026-03-18 20:57:51 +00:00
|
|
|
static struct btree shared_futex_list = {0};
|
|
|
|
|
static spin_lock_t shared_futex_list_lock = SPIN_LOCK_INIT;
|
2026-03-15 22:22:58 +00:00
|
|
|
|
|
|
|
|
static struct vm_cache futex_cache = {
|
|
|
|
|
.c_name = "futex",
|
|
|
|
|
.c_obj_size = sizeof(struct futex),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
BTREE_DEFINE_SIMPLE_INSERT(struct futex, f_node, f_key, put_futex)
|
2026-03-18 20:57:51 +00:00
|
|
|
BTREE_DEFINE_SIMPLE_GET(struct futex, uintptr_t, f_node, f_key, get_futex)
|
2026-03-15 22:22:58 +00:00
|
|
|
|
|
|
|
|
kern_status_t futex_init(void)
|
|
|
|
|
{
|
|
|
|
|
vm_cache_init(&futex_cache);
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
2026-03-18 20:57:51 +00:00
|
|
|
static kern_status_t get_data(
|
|
|
|
|
futex_key_t key,
|
|
|
|
|
unsigned int flags,
|
|
|
|
|
struct futex **out,
|
|
|
|
|
spin_lock_t **out_lock,
|
|
|
|
|
unsigned long *irq_flags)
|
2026-03-15 22:22:58 +00:00
|
|
|
{
|
2026-03-18 20:57:51 +00:00
|
|
|
spin_lock_t *lock = NULL;
|
|
|
|
|
struct btree *futex_list = NULL;
|
|
|
|
|
if (flags & FUTEX_PRIVATE) {
|
|
|
|
|
struct task *self = current_task();
|
|
|
|
|
lock = &self->t_base.ob_lock;
|
|
|
|
|
futex_list = &self->t_futex;
|
|
|
|
|
} else if (flags & FUTEX_SHARED) {
|
|
|
|
|
lock = &shared_futex_list_lock;
|
|
|
|
|
futex_list = &shared_futex_list;
|
|
|
|
|
} else {
|
|
|
|
|
return KERN_INVALID_ARGUMENT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(lock, irq_flags);
|
|
|
|
|
struct futex *futex = get_futex(futex_list, key);
|
2026-03-15 22:22:58 +00:00
|
|
|
|
|
|
|
|
if (!futex && !(flags & FUTEX_CREATE)) {
|
2026-03-18 20:57:51 +00:00
|
|
|
spin_unlock_irqrestore(lock, *irq_flags);
|
|
|
|
|
return KERN_NO_ENTRY;
|
2026-03-15 22:22:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
futex = vm_cache_alloc(&futex_cache, VM_NORMAL);
|
2026-03-18 20:57:51 +00:00
|
|
|
if (!futex) {
|
|
|
|
|
spin_unlock_irqrestore(lock, *irq_flags);
|
|
|
|
|
return KERN_NO_MEMORY;
|
|
|
|
|
}
|
|
|
|
|
|
2026-03-15 22:22:58 +00:00
|
|
|
futex->f_key = key;
|
|
|
|
|
|
2026-03-18 20:57:51 +00:00
|
|
|
put_futex(futex_list, futex);
|
2026-03-15 22:22:58 +00:00
|
|
|
|
2026-03-18 20:57:51 +00:00
|
|
|
*out = futex;
|
|
|
|
|
*out_lock = lock;
|
|
|
|
|
return KERN_OK;
|
2026-03-15 22:22:58 +00:00
|
|
|
}
|
|
|
|
|
|
2026-03-18 20:57:51 +00:00
|
|
|
static kern_status_t cleanup_data(struct futex *futex, unsigned int flags)
|
|
|
|
|
{
|
|
|
|
|
struct btree *futex_list = NULL;
|
|
|
|
|
if (flags & FUTEX_PRIVATE) {
|
|
|
|
|
struct task *self = current_task();
|
|
|
|
|
futex_list = &self->t_futex;
|
|
|
|
|
} else if (flags & FUTEX_SHARED) {
|
|
|
|
|
futex_list = &shared_futex_list;
|
|
|
|
|
} else {
|
|
|
|
|
return KERN_INVALID_ARGUMENT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
btree_delete(futex_list, &futex->f_node);
|
|
|
|
|
vm_cache_free(&futex_cache, futex);
|
|
|
|
|
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static kern_status_t futex_get_shared(kern_futex_t *futex, futex_key_t *out)
|
2026-03-15 22:22:58 +00:00
|
|
|
{
|
2026-03-18 20:57:51 +00:00
|
|
|
struct task *self = current_task();
|
|
|
|
|
struct address_space *space = self->t_address_space;
|
|
|
|
|
|
2026-03-15 22:22:58 +00:00
|
|
|
unsigned long flags;
|
|
|
|
|
address_space_lock_irqsave(space, &flags);
|
|
|
|
|
kern_status_t status = address_space_translate(
|
|
|
|
|
space,
|
|
|
|
|
(virt_addr_t)futex,
|
|
|
|
|
out,
|
|
|
|
|
&flags);
|
|
|
|
|
address_space_unlock_irqrestore(space, flags);
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
2026-03-18 20:57:51 +00:00
|
|
|
static kern_status_t futex_get_private(kern_futex_t *futex, futex_key_t *out)
|
|
|
|
|
{
|
|
|
|
|
*out = (futex_key_t)futex;
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t futex_get(
|
|
|
|
|
kern_futex_t *futex,
|
|
|
|
|
futex_key_t *out,
|
|
|
|
|
unsigned int flags)
|
2026-03-15 22:22:58 +00:00
|
|
|
{
|
2026-03-18 20:57:51 +00:00
|
|
|
if (flags & FUTEX_PRIVATE) {
|
|
|
|
|
return futex_get_private(futex, out);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (flags & FUTEX_SHARED) {
|
|
|
|
|
return futex_get_shared(futex, out);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return KERN_INVALID_ARGUMENT;
|
2026-03-15 22:22:58 +00:00
|
|
|
}
|
|
|
|
|
|
2026-03-18 20:57:51 +00:00
|
|
|
static kern_status_t futex_read(
|
|
|
|
|
struct futex *futex,
|
|
|
|
|
unsigned int flags,
|
|
|
|
|
kern_futex_t *out)
|
2026-03-15 22:22:58 +00:00
|
|
|
{
|
2026-03-18 20:57:51 +00:00
|
|
|
if (flags & FUTEX_PRIVATE) {
|
|
|
|
|
virt_addr_t addr = futex->f_key;
|
|
|
|
|
*out = *(kern_futex_t *)addr;
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (flags & FUTEX_SHARED) {
|
|
|
|
|
phys_addr_t paddr = futex->f_key;
|
|
|
|
|
virt_addr_t vaddr = (virt_addr_t)vm_phys_to_virt(paddr);
|
|
|
|
|
if (!vaddr) {
|
|
|
|
|
return KERN_MEMORY_FAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*out = *(kern_futex_t *)vaddr;
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return KERN_INVALID_ARGUMENT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t futex_wait(
|
|
|
|
|
futex_key_t key,
|
|
|
|
|
kern_futex_t new_val,
|
|
|
|
|
unsigned int flags)
|
|
|
|
|
{
|
|
|
|
|
spin_lock_t *lock = NULL;
|
|
|
|
|
unsigned long irq_flags = 0;
|
|
|
|
|
struct futex *futex = NULL;
|
|
|
|
|
kern_status_t status = get_data(key, flags, &futex, &lock, &irq_flags);
|
|
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_futex_t current_val = 0;
|
|
|
|
|
status = futex_read(futex, flags, ¤t_val);
|
|
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
spin_unlock_irqrestore(lock, irq_flags);
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (current_val != new_val) {
|
|
|
|
|
spin_unlock_irqrestore(lock, irq_flags);
|
|
|
|
|
return KERN_BAD_STATE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct wait_item waiter;
|
|
|
|
|
thread_wait_begin(&waiter, &futex->f_waiters);
|
|
|
|
|
spin_unlock_irqrestore(lock, irq_flags);
|
|
|
|
|
|
|
|
|
|
schedule(SCHED_NORMAL);
|
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(lock, &irq_flags);
|
|
|
|
|
thread_wait_end(&waiter, &futex->f_waiters);
|
|
|
|
|
|
|
|
|
|
if (waitqueue_empty(&futex->f_waiters)) {
|
|
|
|
|
cleanup_data(futex, flags);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(lock, irq_flags);
|
|
|
|
|
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t futex_wake(futex_key_t key, size_t nwaiters, unsigned int flags)
|
|
|
|
|
{
|
|
|
|
|
spin_lock_t *lock = NULL;
|
|
|
|
|
unsigned long irq_flags = 0;
|
|
|
|
|
struct futex *futex = NULL;
|
|
|
|
|
kern_status_t status = get_data(key, flags, &futex, &lock, &irq_flags);
|
|
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (nwaiters == FUTEX_WAKE_ALL) {
|
|
|
|
|
wakeup_queue(&futex->f_waiters);
|
|
|
|
|
} else {
|
|
|
|
|
wakeup_n(&futex->f_waiters, nwaiters);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(lock, irq_flags);
|
|
|
|
|
return KERN_OK;
|
2026-03-15 22:22:58 +00:00
|
|
|
}
|