Files
mango/sched/thread.c

264 lines
5.7 KiB
C
Raw Permalink Normal View History

#include <kernel/bitmap.h>
#include <kernel/cpu.h>
#include <kernel/machine/thread.h>
#include <kernel/object.h>
#include <kernel/printk.h>
#include <kernel/task.h>
#include <kernel/thread.h>
#include <mango/signal.h>
#define THREAD_CAST(p) OBJECT_C_CAST(struct thread, tr_base, &thread_type, p)
static struct object_type thread_type = {
.ob_name = "thread",
.ob_size = sizeof(struct thread),
.ob_header_offset = offsetof(struct thread, tr_base),
};
kern_status_t thread_object_type_init(void)
{
return object_type_register(&thread_type);
}
struct thread *thread_cast(struct object *obj)
{
return THREAD_CAST(obj);
}
struct thread *thread_alloc(void)
{
struct object *thread_obj = object_create(&thread_type);
if (!thread_obj) {
return NULL;
}
struct thread *t = THREAD_CAST(thread_obj);
return t;
}
kern_status_t thread_init_kernel(struct thread *thr, virt_addr_t ip)
{
thr->tr_id = thr->tr_parent->t_next_thread_id++;
thr->tr_prio = PRIO_NORMAL;
thr->tr_state = THREAD_READY;
thr->tr_quantum_target = default_quantum();
thr->tr_kstack = vm_page_alloc(THREAD_KSTACK_ORDER, VM_NORMAL);
if (!thr->tr_kstack) {
return KERN_NO_MEMORY;
}
thr->tr_sp = (uintptr_t)vm_page_get_vaddr(thr->tr_kstack)
+ vm_page_order_to_bytes(THREAD_KSTACK_ORDER);
thr->tr_bp = thr->tr_sp;
ml_thread_prepare_kernel_context(ip, &thr->tr_sp);
return KERN_OK;
}
kern_status_t thread_init_user(
struct thread *thr,
virt_addr_t ip,
virt_addr_t sp,
const uintptr_t *args,
size_t nr_args)
{
thr->tr_state = THREAD_READY;
thr->tr_quantum_target = default_quantum();
thr->tr_kstack = vm_page_alloc(THREAD_KSTACK_ORDER, VM_NORMAL);
if (!thr->tr_kstack) {
return KERN_NO_MEMORY;
}
thr->tr_sp = (uintptr_t)vm_page_get_vaddr(thr->tr_kstack)
+ vm_page_order_to_bytes(THREAD_KSTACK_ORDER);
thr->tr_bp = thr->tr_sp;
thr->tr_cpu_kernel_sp = thr->tr_sp;
/* the new thread needs two contextx:
* 1) to get the thread running in kernel mode, so that it can
* execute ml_thread_switch_user
* 2) to allow ml_thread_switch_user to jump to the correct place
* in usermode (and with the correct stack).
*
* these two contexts are constructed on the thread's kernel stack
* in reverse order.
*/
/* this context will be used by ml_user_return to jump to userspace
* with the specified instruction pointer and user stack */
ml_thread_prepare_user_context(ip, sp, &thr->tr_sp, args, nr_args);
/* this context will be used by the scheduler and ml_thread_switch to
* jump to ml_user_return in kernel mode with the thread's kernel stack.
*/
ml_thread_prepare_kernel_context(
(uintptr_t)ml_thread_switch_user,
&thr->tr_sp);
return KERN_OK;
}
void thread_free(struct thread *thr)
{
}
struct thread *current_thread(void)
{
struct cpu_data *cpu = get_this_cpu();
if (!cpu) {
return NULL;
}
struct thread *out = cpu->c_rq.rq_cur;
put_cpu(cpu);
return out;
}
bool need_resched(void)
{
return (current_thread()->tr_flags & THREAD_F_NEED_RESCHED) != 0;
}
int thread_priority(struct thread *thr)
{
return thr->tr_prio;
}
void thread_awaken(struct thread *thr)
{
struct runqueue *rq = thr->tr_rq;
if (!rq) {
rq = cpu_rq(this_cpu());
}
thr->tr_state = THREAD_READY;
unsigned long flags;
rq_lock(rq, &flags);
rq_enqueue(rq, thr);
rq_unlock(rq, flags);
}
void thread_exit(void)
{
struct thread *self = current_thread();
unsigned long flags;
thread_lock_irqsave(self, &flags);
self->tr_state = THREAD_STOPPED;
object_assert_signal(&self->tr_base, THREAD_SIGNAL_STOPPED);
tracek("thread %s[%u.%u] exited",
self->tr_parent->t_name,
self->tr_parent->t_id,
self->tr_id);
thread_unlock_irqrestore(self, flags);
while (1) {
schedule(SCHED_NORMAL);
}
}
void thread_join(struct thread *thread, unsigned long *irq_flags)
{
while (1) {
if (thread->tr_state == THREAD_STOPPED) {
break;
}
object_wait_signal(
&thread->tr_base,
THREAD_SIGNAL_STOPPED,
irq_flags);
}
}
void thread_kill(struct thread *thread)
{
thread->tr_state = THREAD_STOPPED;
if (thread->tr_rq) {
unsigned long flags;
rq_lock(thread->tr_rq, &flags);
rq_remove_thread(thread->tr_rq, thread);
rq_unlock(thread->tr_rq, flags);
}
object_assert_signal(&thread->tr_base, THREAD_SIGNAL_STOPPED);
tracek("thread %s[%u.%u] killed",
thread->tr_parent->t_name,
thread->tr_parent->t_id,
thread->tr_id);
}
struct thread *create_kernel_thread(void (*fn)(void))
{
struct task *kernel = kernel_task();
struct thread *thr = thread_alloc();
thr->tr_id = kernel->t_next_thread_id++;
thr->tr_parent = kernel;
thr->tr_prio = PRIO_NORMAL;
thr->tr_state = THREAD_READY;
thr->tr_quantum_target = default_quantum();
thread_init_kernel(thr, (uintptr_t)fn);
unsigned long flags;
task_lock_irqsave(kernel, &flags);
queue_push_back(&kernel->t_threads, &thr->tr_parent_entry);
task_unlock_irqrestore(kernel, flags);
schedule_thread_on_cpu(thr);
return thr;
}
struct thread *create_idle_thread(void)
{
struct task *idle = idle_task();
struct thread *thr = thread_alloc();
thr->tr_id = idle->t_next_thread_id++;
thr->tr_parent = idle;
thr->tr_prio = PRIO_NORMAL;
thr->tr_state = THREAD_READY;
thr->tr_quantum_target = default_quantum();
unsigned long flags;
task_lock_irqsave(idle, &flags);
queue_push_back(&idle->t_threads, &thr->tr_parent_entry);
task_unlock_irqrestore(idle, flags);
return thr;
}
kern_status_t thread_config_get(
struct thread *thread,
kern_config_key_t key,
void *out,
size_t max)
{
switch (key) {
default:
break;
}
return ml_thread_config_get(thread, key, out, max);
}
kern_status_t thread_config_set(
struct thread *thread,
kern_config_key_t key,
const void *ptr,
size_t len)
{
switch (key) {
default:
break;
}
return ml_thread_config_set(thread, key, ptr, len);
}