Skip to content

Commit

Permalink
sync+kernel: add ReentrantMutex
Browse files Browse the repository at this point in the history
  • Loading branch information
Qix- committed Jan 12, 2025
1 parent d8c7109 commit 6ee585d
Show file tree
Hide file tree
Showing 4 changed files with 177 additions and 11 deletions.
29 changes: 26 additions & 3 deletions oro-kernel/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,12 @@ use self::{arch::Arch, scheduler::Scheduler};
/// **The fields of this structure are NOT accessible
/// between cores.** Taking references to fields of this structure
/// and passing them between cores is **undefined behavior**.
///
/// The generic type `A` **must** be the same type across
/// all cores in the system, else undefined behavior WILL occur.
pub struct Kernel<A: Arch> {
/// The core's ID.
id: usize,
id: u32,
/// Global reference to the shared kernel state.
state: &'static KernelState<A>,
/// The kernel scheduler.
Expand All @@ -71,6 +74,21 @@ pub struct Kernel<A: Arch> {
handle: A::CoreHandle,
}

// NOTE(qix-): This is an ergonomics hack to avoid `A: Arch` in a lot of places.
#[doc(hidden)]
static mut KERNEL_ID_FN: MaybeUninit<fn() -> u32> = MaybeUninit::uninit();

#[doc(hidden)]
#[no_mangle]
unsafe extern "C" fn oro_sync_current_core_id() -> u32 {
KERNEL_ID_FN.assume_init()()
}

#[doc(hidden)]
fn get_arch_kernel_id<A: Arch>() -> u32 {
Kernel::<A>::get().id()
}

impl<A: Arch> Kernel<A> {
/// Initializes a new core-local instance of the Oro kernel.
///
Expand All @@ -92,12 +110,17 @@ impl<A: Arch> Kernel<A> {
/// address space mapper handle for the kernel to use. It must
/// be the final one that will be used for the lifetime of the core.
pub unsafe fn initialize_for_core(
id: usize,
id: u32,
global_state: &'static KernelState<A>,
handle: A::CoreHandle,
) -> Result<&'static Self, MapError> {
assert::fits::<Self, 4096>();

#[expect(static_mut_refs)]
{
KERNEL_ID_FN.write(get_arch_kernel_id::<A>);
}

let mapper = AddressSpace::<A>::current_supervisor_space();
let core_local_segment = AddressSpace::<A>::kernel_core_local();

Expand Down Expand Up @@ -145,7 +168,7 @@ impl<A: Arch> Kernel<A> {

/// Returns the core's ID.
#[must_use]
pub fn id(&self) -> usize {
pub fn id(&self) -> u32 {
self.id
}

Expand Down
16 changes: 8 additions & 8 deletions oro-kernel/src/thread.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@ enum State {
/// The thread is stopped.
Stopped,
/// The thread is paused on the given core, awaiting a new time slice.
Paused(usize),
Paused(u32),
/// The thread is running on the given core.
Running(usize),
Running(u32),
/// The thread invoked a system call, which is blocked and awaiting
/// a response.
PausedSystemCall(SystemCallRequest),
Expand Down Expand Up @@ -205,7 +205,7 @@ impl<A: Arch> Thread<A> {
/// The caller must **infallibly** consume any handles passed back
/// in an `Ok` result, else they are forever lost, since this method
/// advances the state machine and consumes the handle.
pub unsafe fn try_schedule(&mut self, core_id: usize) -> Result<ScheduleAction, ScheduleError> {
pub unsafe fn try_schedule(&mut self, core_id: u32) -> Result<ScheduleAction, ScheduleError> {
match &self.state {
State::Terminated => Err(ScheduleError::Terminated),
State::Running(core) => Err(ScheduleError::AlreadyRunning(*core)),
Expand Down Expand Up @@ -239,7 +239,7 @@ impl<A: Arch> Thread<A> {
///
/// The thread must already be running on the given core,
/// else an error is returned.
pub fn try_pause(&mut self, core_id: usize) -> Result<(), PauseError> {
pub fn try_pause(&mut self, core_id: u32) -> Result<(), PauseError> {
match &self.state {
State::Terminated => Err(PauseError::Terminated),
State::Running(core) => {
Expand All @@ -263,7 +263,7 @@ impl<A: Arch> Thread<A> {
#[expect(clippy::needless_pass_by_value)]
pub fn try_system_call(
&mut self,
core_id: usize,
core_id: u32,
request: SystemCallRequest,
) -> Result<SystemCallAction, PauseError> {
match &self.state {
Expand Down Expand Up @@ -350,14 +350,14 @@ pub enum SystemCallAction {
#[derive(Debug)]
pub enum ScheduleError {
/// The thread is already running on the given core.
AlreadyRunning(usize),
AlreadyRunning(u32),
/// The thread is terminated.
Terminated,
/// The thread needs an explicit response to an application request
/// and cannot be scheduled normally.
AwaitingResponse,
/// The thread is paused on another core.
Paused(usize),
Paused(u32),
/// The thread is stopped.
Stopped,
}
Expand All @@ -369,7 +369,7 @@ pub enum PauseError {
/// not terminated**.
NotRunning,
/// The thread is allocated to another core.
WrongCore(usize),
WrongCore(u32),
/// The thread is terminated.
Terminated,
}
Expand Down
4 changes: 4 additions & 0 deletions oro-sync/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,10 @@ homepage = "https://oro.sh"
repository = "https://github.com/oro-os/kernel"
license = "MPL-2.0"

[features]
default = ["reentrant_mutex"]
reentrant_mutex = []

[lib]
doctest = false

Expand Down
139 changes: 139 additions & 0 deletions oro-sync/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -247,3 +247,142 @@ impl<T: Send + 'static> DerefMut for TicketMutexGuard<'_, T> {
unsafe { &mut *self.lock.value.get() }
}
}

#[doc(hidden)]
#[cfg(feature = "reentrant_mutex")]
mod reentrant {
use core::{
cell::UnsafeCell,
sync::atomic::{
AtomicU64,
Ordering::{Acquire, Relaxed, Release},
},
};

use super::Lock;

unsafe extern "C" {
/// Returns the current core ID.
///
/// # Safety
/// This function must always return a valid core ID for the currently
/// running execution context. If this value has not yet been set up,
/// reentrant mutexes should NOT be locked (though they can be created).
///
/// The value returned by this function MUST always be the same for the same
/// core.
unsafe fn oro_sync_current_core_id() -> u32;
}

/// A reentrant mutex implementation.
///
/// This mutex allows the same core to lock the mutex multiple times.
///
/// **NOTE:** This implementation spins (and does not lock) if the refcount
/// reaches `u32::MAX`. This is usually not a problem.
pub struct ReentrantMutex<T: Send + 'static> {
/// The inner value.
inner: UnsafeCell<T>,
/// The lock state.
///
/// The upper 32 bits are the core ID of the lock holder, and the lower 32 bits
/// are the lock count.
lock: AtomicU64,
}

impl<T: Send + 'static> ReentrantMutex<T> {
/// Constructs a new reentrant mutex.
pub const fn new(inner: T) -> Self {
Self {
inner: UnsafeCell::new(inner),
lock: AtomicU64::new(0),
}
}
}

impl<T: Send + 'static> Lock for ReentrantMutex<T> {
/// The lock guard type used by the lock implementation.
type Guard<'a> = ReentrantMutexGuard<'a, Self::Target>;
/// The target type of value being guarded.
type Target = T;

fn lock(&self) -> Self::Guard<'_> {
// SAFETY: The safety requirements for this function are offloaded to the
// SAFETY: implementation; it's marked unsafe as a requirement by Rust.
let core_id = unsafe { oro_sync_current_core_id() };

loop {
let current = self.lock.load(Acquire);
let current_core = (current >> 32) as u32;
let current_count = (current & 0xFFFF_FFFF) as u32;

if (current == 0 || current_core == core_id)
&& self
.lock
.compare_exchange_weak(
current,
(u64::from(core_id) << 32) | u64::from(current_count + 1),
Release,
Relaxed,
)
.is_ok()
{
return ReentrantMutexGuard { inner: self };
}
}
}
}

/// A guard for a reentrant mutex.
pub struct ReentrantMutexGuard<'a, T: Send + 'static> {
inner: &'a ReentrantMutex<T>,
}

impl<T: Send + 'static> core::ops::Deref for ReentrantMutexGuard<'_, T> {
type Target = T;

fn deref(&self) -> &Self::Target {
// SAFETY: The guard is only created if the lock is held.
unsafe { &*self.inner.inner.get() }
}
}

impl<T: Send + 'static> core::ops::DerefMut for ReentrantMutexGuard<'_, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
// SAFETY: The guard is only created if the lock is held.
unsafe { &mut *self.inner.inner.get() }
}
}

impl<T: Send + 'static> Drop for ReentrantMutexGuard<'_, T> {
fn drop(&mut self) {
loop {
let current = self.inner.lock.load(Relaxed);
let current_count = current & 0xFFFF_FFFF;

debug_assert_eq!(
(current >> 32) as u32,
unsafe { oro_sync_current_core_id() },
"re-entrant lock held lock by another core upon drop"
);

if self
.inner
.lock
.compare_exchange(
current,
if current_count == 1 { 0 } else { current - 1 },
Release,
Relaxed,
)
.is_ok()
{
break;
}
}
}
}
}

#[cfg(feature = "reentrant_mutex")]
pub use reentrant::*;

0 comments on commit 6ee585d

Please sign in to comment.