vm: controller: implement asynchronous DETACH page requests

This commit is contained in:
2026-03-24 20:21:30 +00:00
parent 7dc0c742fa
commit 4be642f2e5
3 changed files with 140 additions and 101 deletions

View File

@@ -13,13 +13,11 @@ enum page_request_status {
PAGE_REQUEST_PENDING = 0, PAGE_REQUEST_PENDING = 0,
PAGE_REQUEST_IN_PROGRESS, PAGE_REQUEST_IN_PROGRESS,
PAGE_REQUEST_COMPLETE, PAGE_REQUEST_COMPLETE,
PAGE_REQUEST_ASYNC,
}; };
struct vm_controller { struct vm_controller {
struct object vc_base; struct object vc_base;
/* tree of struct vm_objects bound to this controller, keyed with the
* equeue_key_t specified when the object(s) were created. */
struct btree vc_objects;
/* tree of pending page requests */ /* tree of pending page requests */
struct btree vc_requests; struct btree vc_requests;
/* the equeue to send async page requests to */ /* the equeue to send async page requests to */
@@ -36,7 +34,7 @@ struct page_request {
enum page_request_status req_status; enum page_request_status req_status;
kern_status_t req_result; kern_status_t req_result;
spin_lock_t req_lock; spin_lock_t req_lock;
struct vm_object *req_object; equeue_key_t req_object;
struct thread *req_sender; struct thread *req_sender;
struct btree_node req_node; struct btree_node req_node;
off_t req_offset; off_t req_offset;

View File

@@ -23,8 +23,14 @@ static struct object_type vm_controller_type = {
.ob_header_offset = offsetof(struct vm_controller, vc_base), .ob_header_offset = offsetof(struct vm_controller, vc_base),
}; };
static struct vm_cache page_request_cache = {
.c_name = "page-request",
.c_obj_size = sizeof(struct page_request),
};
kern_status_t vm_controller_type_init(void) kern_status_t vm_controller_type_init(void)
{ {
vm_cache_init(&page_request_cache);
return object_type_register(&vm_controller_type); return object_type_register(&vm_controller_type);
} }
@@ -52,10 +58,17 @@ static struct page_request *get_next_request(struct vm_controller *ctrl)
struct page_request *req struct page_request *req
= BTREE_CONTAINER(struct page_request, req_node, cur); = BTREE_CONTAINER(struct page_request, req_node, cur);
spin_lock(&req->req_lock); spin_lock(&req->req_lock);
if (req->req_status == PAGE_REQUEST_PENDING) { switch (req->req_status) {
case PAGE_REQUEST_PENDING:
req->req_status = PAGE_REQUEST_IN_PROGRESS; req->req_status = PAGE_REQUEST_IN_PROGRESS;
ctrl->vc_requests_waiting--; ctrl->vc_requests_waiting--;
return req; return req;
case PAGE_REQUEST_ASYNC:
btree_delete(&ctrl->vc_requests, &req->req_node);
ctrl->vc_requests_waiting--;
return req;
default:
break;
} }
spin_unlock(&req->req_lock); spin_unlock(&req->req_lock);
@@ -65,98 +78,6 @@ static struct page_request *get_next_request(struct vm_controller *ctrl)
return NULL; return NULL;
} }
kern_status_t vm_controller_recv(
struct vm_controller *ctrl,
equeue_packet_page_request_t *out)
{
struct page_request *req = NULL;
req = get_next_request(ctrl);
if (!req) {
return KERN_NO_ENTRY;
}
if (ctrl->vc_requests_waiting == 0) {
object_clear_signal(
&ctrl->vc_base,
VM_CONTROLLER_SIGNAL_REQUEST_RECEIVED);
}
out->req_vmo = req->req_object->vo_key;
out->req_type = req->req_type;
out->req_offset = req->req_offset;
out->req_length = req->req_length;
spin_unlock(&req->req_lock);
return KERN_OK;
}
kern_status_t vm_controller_recv_async(
struct vm_controller *ctrl,
struct equeue *eq,
equeue_key_t key)
{
if (ctrl->vc_eq) {
object_unref(&ctrl->vc_eq->eq_base);
}
object_ref(&eq->eq_base);
ctrl->vc_eq = eq;
ctrl->vc_eq_key = key;
return KERN_OK;
}
kern_status_t vm_controller_create_object(
struct vm_controller *ctrl,
const char *name,
size_t name_len,
equeue_key_t key,
size_t data_len,
vm_prot_t prot,
struct vm_object **out)
{
struct vm_object *vmo = get_object(&ctrl->vc_objects, key);
if (vmo) {
return KERN_NAME_EXISTS;
}
vmo = vm_object_create(name, name_len, data_len, prot);
if (!vmo) {
return KERN_NO_MEMORY;
}
object_ref(&ctrl->vc_base);
object_ref(&vmo->vo_base);
vmo->vo_flags |= VMO_CONTROLLER;
vmo->vo_ctrl = ctrl;
vmo->vo_key = key;
put_object(&ctrl->vc_objects, vmo);
*out = vmo;
return KERN_OK;
}
kern_status_t vm_controller_detach_object(
struct vm_controller *ctrl,
struct vm_object *vmo)
{
if (vmo->vo_ctrl != ctrl) {
return KERN_INVALID_ARGUMENT;
}
vmo->vo_ctrl = NULL;
vmo->vo_key = 0;
btree_delete(&ctrl->vc_objects, &vmo->vo_ctrl_node);
object_unref(&ctrl->vc_base);
object_unref(&vmo->vo_base);
return KERN_OK;
}
static kern_status_t try_enqueue(struct btree *tree, struct page_request *req) static kern_status_t try_enqueue(struct btree *tree, struct page_request *req)
{ {
if (!tree->b_root) { if (!tree->b_root) {
@@ -196,6 +117,118 @@ static kern_status_t try_enqueue(struct btree *tree, struct page_request *req)
return true; return true;
} }
static kern_status_t send_request_async(
struct vm_controller *ctrl,
struct page_request *req)
{
fill_random(&req->req_id, sizeof req->req_id);
while (!try_enqueue(&ctrl->vc_requests, req)) {
req->req_id++;
}
ctrl->vc_requests_waiting++;
object_assert_signal(
&ctrl->vc_base,
VM_CONTROLLER_SIGNAL_REQUEST_RECEIVED);
return KERN_OK;
}
kern_status_t vm_controller_recv(
struct vm_controller *ctrl,
equeue_packet_page_request_t *out)
{
struct page_request *req = NULL;
req = get_next_request(ctrl);
if (!req) {
return KERN_NO_ENTRY;
}
if (ctrl->vc_requests_waiting == 0) {
object_clear_signal(
&ctrl->vc_base,
VM_CONTROLLER_SIGNAL_REQUEST_RECEIVED);
}
out->req_vmo = req->req_object;
out->req_type = req->req_type;
out->req_offset = req->req_offset;
out->req_length = req->req_length;
spin_unlock(&req->req_lock);
if (req->req_status == PAGE_REQUEST_ASYNC) {
vm_cache_free(&page_request_cache, req);
}
return KERN_OK;
}
kern_status_t vm_controller_recv_async(
struct vm_controller *ctrl,
struct equeue *eq,
equeue_key_t key)
{
if (ctrl->vc_eq) {
object_unref(&ctrl->vc_eq->eq_base);
}
object_ref(&eq->eq_base);
ctrl->vc_eq = eq;
ctrl->vc_eq_key = key;
return KERN_OK;
}
kern_status_t vm_controller_create_object(
struct vm_controller *ctrl,
const char *name,
size_t name_len,
equeue_key_t key,
size_t data_len,
vm_prot_t prot,
struct vm_object **out)
{
struct vm_object *vmo
= vm_object_create(name, name_len, data_len, prot);
if (!vmo) {
return KERN_NO_MEMORY;
}
object_ref(&ctrl->vc_base);
vmo->vo_flags |= VMO_CONTROLLER;
vmo->vo_ctrl = ctrl;
vmo->vo_key = key;
*out = vmo;
return KERN_OK;
}
kern_status_t vm_controller_detach_object(
struct vm_controller *ctrl,
struct vm_object *vmo)
{
if (vmo->vo_ctrl != ctrl) {
return KERN_INVALID_ARGUMENT;
}
struct page_request *req
= vm_cache_alloc(&page_request_cache, VM_NORMAL);
req->req_type = PAGE_REQUEST_DETACH;
req->req_status = PAGE_REQUEST_ASYNC;
req->req_object = vmo->vo_key;
req->req_sender = current_thread();
send_request_async(ctrl, req);
vmo->vo_ctrl = NULL;
vmo->vo_key = 0;
object_unref(&ctrl->vc_base);
return KERN_OK;
}
static void wait_for_reply( static void wait_for_reply(
struct vm_controller *ctrl, struct vm_controller *ctrl,
struct page_request *req, struct page_request *req,
@@ -221,7 +254,7 @@ static void wait_for_reply(
static void fulfill_requests( static void fulfill_requests(
struct vm_controller *ctrl, struct vm_controller *ctrl,
struct vm_object *obj, equeue_key_t object,
off_t offset, off_t offset,
size_t length, size_t length,
kern_status_t result) kern_status_t result)
@@ -242,7 +275,7 @@ static void fulfill_requests(
match = true; match = true;
} }
if (req->req_object != obj) { if (req->req_object != object) {
match = false; match = false;
} }
@@ -280,7 +313,7 @@ kern_status_t vm_controller_supply_pages(
src_offset, src_offset,
count, count,
NULL); NULL);
fulfill_requests(ctrl, dst, dst_offset, count, status); fulfill_requests(ctrl, dst->vo_key, dst_offset, count, status);
return status; return status;
} }

View File

@@ -29,6 +29,13 @@ static kern_status_t vm_object_cleanup(struct object *obj)
cur = next; cur = next;
} }
if (vmo->vo_ctrl) {
unsigned long flags;
vm_controller_lock_irqsave(vmo->vo_ctrl, &flags);
vm_controller_detach_object(vmo->vo_ctrl, vmo);
vm_controller_unlock_irqrestore(vmo->vo_ctrl, flags);
}
return KERN_OK; return KERN_OK;
} }
@@ -366,12 +373,13 @@ static kern_status_t request_page(
struct vm_controller *ctrl = vo->vo_ctrl; struct vm_controller *ctrl = vo->vo_ctrl;
struct page_request req = {0}; struct page_request req = {0};
req.req_status = PAGE_REQUEST_PENDING; req.req_status = PAGE_REQUEST_PENDING;
req.req_type = PAGE_REQUEST_READ;
req.req_offset = offset; req.req_offset = offset;
req.req_length = vm_page_order_to_bytes(VM_PAGE_4K); req.req_length = vm_page_order_to_bytes(VM_PAGE_4K);
req.req_sender = current_thread(); req.req_sender = current_thread();
object_ref(&vo->vo_base); object_ref(&vo->vo_base);
req.req_object = vo; req.req_object = vo->vo_key;
vm_object_unlock_irqrestore(vo, *irq_flags); vm_object_unlock_irqrestore(vo, *irq_flags);
vm_controller_lock_irqsave(ctrl, irq_flags); vm_controller_lock_irqsave(ctrl, irq_flags);