Skip to content

Commit 8159ac3

Browse files
committed
kernel+x86_64: implement token extraction at page fault time
1 parent 51d1473 commit 8159ac3

File tree

4 files changed

+102
-5
lines changed

4 files changed

+102
-5
lines changed

oro-arch-x86_64/src/interrupt/isr_page_fault.rs

+28-5
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,13 @@
22
33
use core::arch::asm;
44
use oro_kernel::scheduler::PageFaultType;
5+
use oro_mem::mapper::AddressSpace;
56
use oro_sync::Lock;
7+
use crate::mem::address_space::AddressSpaceLayout;
68

79
crate::isr! {
810
/// The ISR (Interrupt Service Routine) for page fault exceptions.
9-
unsafe fn isr_page_fault(kernel, _user_task, err_code) -> Option<Switch> {
11+
unsafe fn isr_page_fault(kernel, user_task, err_code) -> Option<Switch> {
1012
let cr2: usize;
1113
// SAFETY: `cr2` is a register that holds the faulting address. It is safe to read.
1214
unsafe {
@@ -31,9 +33,30 @@ crate::isr! {
3133
PageFaultType::Read
3234
};
3335

34-
// SAFETY: `event_page_fault` specifies that, in the event we return back to the task,
35-
// SAFETY: that the task has been instructed to re-try the memory operation. x86_64
36-
// SAFETY: does this by design, so we must do no special handling here.
37-
Some(unsafe { kernel.scheduler().lock().event_page_fault(err_type, cr2) })
36+
// Try to fetch the page table entry for the faulting address.
37+
user_task
38+
.as_ref()
39+
.and_then(|t| t.with(|task|
40+
AddressSpaceLayout::user_data()
41+
.try_get_nonpresent_bits(&task.handle().mapper, cr2)
42+
))
43+
.and_then(|nonpresent_bits| {
44+
if nonpresent_bits > 0 {
45+
// NOTE(qix-): Page table entries' present bit is bit 0.
46+
// NOTE(qix-): We can thus shift the non-present bits to the right
47+
// NOTE(qix-): and set the highest bit (63) to 1 to get the token ID.
48+
// NOTE(qix-): This follows the guaranteed spec of Tab IDs, which are in turn
49+
// NOTE(qix-): token IDs (as specified by the kernel).
50+
let token_id = (nonpresent_bits >> 1) | (1 << 63);
51+
Some(unsafe { kernel.scheduler().lock().event_page_fault_token(err_type, cr2, token_id) })
52+
} else {
53+
None
54+
}
55+
}).or_else(|| {
56+
// SAFETY: `event_page_fault` specifies that, in the event we return back to the task,
57+
// SAFETY: that the task has been instructed to re-try the memory operation. x86_64
58+
// SAFETY: does this by design, so we must do no special handling here.
59+
Some(unsafe { kernel.scheduler().lock().event_page_fault(err_type, cr2) })
60+
})
3861
}
3962
}

oro-arch-x86_64/src/mem/paging.rs

+4
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,10 @@ impl PageTableEntry {
117117
/// Resets the entry to its default state.
118118
#[inline]
119119
pub fn reset(&mut self) {
120+
// NOTE(qix-): It is IMPERATIVE that this is set to 0 and not some other
121+
// NOTE(qix-): value, even if it's not marked "present". We use non-present
122+
// NOTE(qix-): page table entries as a way to mark lazy-allocated pages, the
123+
// NOTE(qix-): unused bits referring to a token ID.
120124
self.0 = 0;
121125
}
122126

oro-arch-x86_64/src/mem/segment.rs

+39
Original file line numberDiff line numberDiff line change
@@ -128,6 +128,45 @@ impl AddressSegment {
128128
Ok(entry)
129129
}
130130

131+
/// Attempts to fetch the token data for the given virtual address.
132+
///
133+
/// If the page table entry is not present, returns the non-present bits
134+
/// of the entry. If the entry is present, or if any intermediate page table
135+
/// entries are missing, returns `None`.
136+
///
137+
/// Also returns `None` if the virtual address falls outside of the segment.
138+
///
139+
/// **Does not allocate** anything for intermediate page tables.
140+
pub(crate) fn try_get_nonpresent_bits(
141+
&self,
142+
space: &AddressSpaceHandle,
143+
virt: usize,
144+
) -> Option<u64> {
145+
// SAFETY: We know that the base physical address is a valid page table.
146+
let mut current_page_table = unsafe { space.base_phys().as_ref_unchecked::<PageTable>() };
147+
148+
for level in (1..space.paging_level().as_usize()).rev() {
149+
let index = (virt >> (12 + level * 9)) & 0x1FF;
150+
let entry = &current_page_table[index];
151+
152+
if !entry.present() {
153+
return None;
154+
}
155+
156+
// SAFETY: Barring a bug, we know that the physical address is valid.
157+
current_page_table =
158+
unsafe { Phys::from_address_unchecked(entry.address()).as_ref_unchecked() };
159+
}
160+
161+
let entry = (*current_page_table)[(virt >> 12) & 0x1FF];
162+
163+
if entry.present() {
164+
None
165+
} else {
166+
Some(entry.into())
167+
}
168+
}
169+
131170
/// Attempts to unmap a virtual address from the segment, returning the
132171
/// physical address that was previously mapped. Assumes that the CPU
133172
/// is in a 4-level paging mode.

oro-kernel/src/scheduler.rs

+31
Original file line numberDiff line numberDiff line change
@@ -273,6 +273,37 @@ impl<A: Arch> Scheduler<A> {
273273
self.kernel.handle().schedule_timer(1000);
274274
switch
275275
}
276+
277+
/// Indicates to the kernel that a page fault has occurred whereby a reserved
278+
/// page table entry token was used.
279+
///
280+
/// If the token is valid, the kernel will handle issuing mapping requests
281+
/// or other asynchronous operations to (eventually) resolve the fault.
282+
///
283+
/// # Safety
284+
/// Calling architectures **must** treat "return back to same task"
285+
/// [`Switch`]es as to mean "retry the faulting memory operation". The
286+
/// kernel will NOT attempt to recover from fatal or unexpected page faults.
287+
///
288+
/// **No locks may be held at this boundary.** The architecture **MUST**
289+
/// free up any task- or memory-related locks before calling this function.
290+
/// The exception, of course, is the scheduler lock itself, which is held
291+
/// by the architecture.
292+
#[expect(clippy::missing_panics_doc)]
293+
#[must_use]
294+
pub unsafe fn event_page_fault_token(
295+
&mut self,
296+
fault_type: PageFaultType,
297+
vaddr: usize,
298+
token_id: u64,
299+
) -> Switch<A> {
300+
if let Some(_token) = crate::tab::get().lookup::<crate::token::Token>(token_id) {
301+
todo!("handle page fault token: {token_id:#016X}");
302+
} else {
303+
// Normal page fault; forward it on.
304+
self.event_page_fault(fault_type, vaddr)
305+
}
306+
}
276307
}
277308

278309
impl<A: Arch> Drop for Scheduler<A> {

0 commit comments

Comments
 (0)