Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 20 additions & 15 deletions examples/armv4t_multicore/emu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,11 @@ pub enum Event {
WatchRead(u32),
}

#[derive(PartialEq)]
pub enum ExecMode {
Step,
Continue,
Stop,
}

/// incredibly barebones armv4t-based emulator
Expand Down Expand Up @@ -187,6 +189,10 @@ impl Emu {
let mut evt = None;

for id in [CpuId::Cpu, CpuId::Cop].iter().copied() {
if matches!(self.exec_mode.get(&id), Some(ExecMode::Stop)) {
continue;
}

if let Some(event) = self.step_core(id) {
if evt.is_none() {
evt = Some((event, id));
Expand All @@ -198,26 +204,25 @@ impl Emu {
}

pub fn run(&mut self, mut poll_incoming_data: impl FnMut() -> bool) -> RunEvent {
// the underlying armv4t_multicore emulator runs both cores in lock step, so
// when GDB requests a specific core to single-step, all we need to do is jot
// down that we want to single-step the system, as there is no way to
// single-step a single core while the other runs.
// The underlying armv4t_multicore emulator cycles all cores in lock-step.
//
// In more complex emulators / implementations, this simplification is _not_
// valid, and you should track which specific TID the GDB client requested to be
// single-stepped, and run them appropriately.

let should_single_step = matches!(
self.exec_mode
.get(&CpuId::Cpu)
.or_else(|| self.exec_mode.get(&CpuId::Cop)),
Some(&ExecMode::Step)
);
// Inside `self.step()`, we iterate through all cores and only invoke
// `step_core` if that core's `ExecMode` is not `Stop`.

let should_single_step = self.exec_mode.values().any(|mode| mode == &ExecMode::Step);

match should_single_step {
true => match self.step() {
Some((event, id)) => RunEvent::Event(event, id),
None => RunEvent::Event(Event::DoneStep, CpuId::Cpu),
None => {
let stepping_core = self
.exec_mode
.iter()
.find(|&(_, mode)| mode == &ExecMode::Step)
.map(|(id, _)| *id)
.unwrap_or(CpuId::Cpu);
RunEvent::Event(Event::DoneStep, stepping_core)
}
},
false => {
let mut cycles = 0;
Expand Down
16 changes: 16 additions & 0 deletions examples/armv4t_multicore/gdb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,13 @@ impl MultiThreadResume for Emu {

Ok(())
}

#[inline(always)]
fn support_scheduler_locking(
&mut self,
) -> Option<target::ext::base::multithread::MultiThreadSchedulerLockingOps<'_, Self>> {
Some(self)
}
}

impl target::ext::base::multithread::MultiThreadSingleStep for Emu {
Expand Down Expand Up @@ -294,6 +301,15 @@ impl target::ext::thread_extra_info::ThreadExtraInfo for Emu {
}
}

impl target::ext::base::multithread::MultiThreadSchedulerLocking for Emu {
fn set_resume_action_scheduler_lock(&mut self) -> Result<(), Self::Error> {
for id in [CpuId::Cpu, CpuId::Cop] {
self.exec_mode.entry(id).or_insert(ExecMode::Stop);
}
Ok(())
}
}

/// Copy all bytes of `data` to `buf`.
/// Return the size of data copied.
pub fn copy_to_buf(data: &[u8], buf: &mut [u8]) -> usize {
Expand Down
17 changes: 17 additions & 0 deletions src/stub/core_impl/resume.rs
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,13 @@ impl<T: Target, C: Connection> GdbStubImpl<T, C> {
) -> Result<(), Error<T::Error, C::Error>> {
ops.clear_resume_actions().map_err(Error::TargetError)?;

// Track whether the packet contains a wildcard/default continue action
// (e.g., `c` or `c:-1`).
//
// Presence of this action implies "Scheduler Locking" is OFF.
// Absence implies "Scheduler Locking" is ON.
let mut has_wildcard_continue = false;

for action in actions.iter() {
use crate::protocol::commands::_vCont::VContKind;

Expand All @@ -185,6 +192,7 @@ impl<T: Target, C: Connection> GdbStubImpl<T, C> {
None | Some(SpecificIdKind::All) => {
// Target API contract specifies that the default
// resume action for all threads is continue.
has_wildcard_continue = true;
}
Some(SpecificIdKind::WithId(tid)) => ops
.set_resume_action_continue(tid, signal)
Expand Down Expand Up @@ -251,6 +259,15 @@ impl<T: Target, C: Connection> GdbStubImpl<T, C> {
}
}

if !has_wildcard_continue {
let Some(locking_ops) = ops.support_scheduler_locking() else {
return Err(Error::MissingMultiThreadSchedulerLocking);
};
locking_ops
.set_resume_action_scheduler_lock()
.map_err(Error::TargetError)?;
}

ops.resume().map_err(Error::TargetError)
}

Expand Down
2 changes: 2 additions & 0 deletions src/stub/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ pub(crate) enum InternalError<T, C> {
MissingCurrentActivePidImpl,
TracepointFeatureUnimplemented(u8),
TracepointUnsupportedSourceEnumeration,
MissingMultiThreadSchedulerLocking,

// Internal - A non-fatal error occurred (with errno-style error code)
//
Expand Down Expand Up @@ -147,6 +148,7 @@ where
MissingCurrentActivePidImpl => write!(f, "GDB client attempted to attach to a new process, but the target has not implemented support for `ExtendedMode::support_current_active_pid`"),
TracepointFeatureUnimplemented(feat) => write!(f, "GDB client sent us a tracepoint packet using feature {}, but `gdbstub` doesn't implement it. If this is something you require, please file an issue at https://github.com/daniel5151/gdbstub/issues", *feat as char),
TracepointUnsupportedSourceEnumeration => write!(f, "The target doesn't support the gdbstub TracepointSource extension, but attempted to transition to enumerating tracepoint sources"),
MissingMultiThreadSchedulerLocking => write!(f, "GDB requested Scheduler Locking, but the Target does not implement the `MultiThreadSchedulerLocking` IDET"),

NonFatalError(_) => write!(f, "Internal non-fatal error. You should never see this! Please file an issue if you do!"),
}
Expand Down
52 changes: 49 additions & 3 deletions src/target/ext/base/multithread.rs
Original file line number Diff line number Diff line change
Expand Up @@ -134,9 +134,21 @@ pub trait MultiThreadResume: Target {
/// GDB client had specified using any of the `set_resume_action_XXX`
/// methods.
///
/// Any thread that wasn't explicitly resumed by a `set_resume_action_XXX`
/// method should be resumed as though it was resumed with
/// `set_resume_action_continue`.
/// # Default Resume Behavior
///
/// By default, any thread that wasn't explicitly resumed by a
/// `set_resume_action_XXX` method should be resumed as though it was
/// resumed with `set_resume_action_continue`.
///
/// **However**, if [`support_scheduler_locking`] is implemented and
/// [`set_resume_action_scheduler_lock`] has been called for the current
/// resume cycle, this default changes: **unmentioned threads must remain
/// stopped.**
///
/// [`support_scheduler_locking`]: Self::support_scheduler_locking
/// [`set_resume_action_scheduler_lock`]: MultiThreadSchedulerLocking::set_resume_action_scheduler_lock
///
/// # Protocol Extensions
///
/// A basic target implementation only needs to implement support for
/// `set_resume_action_continue`, with all other resume actions requiring
Expand All @@ -146,14 +158,17 @@ pub trait MultiThreadResume: Target {
/// ----------------------------|------------------------------
/// Optimized [Single Stepping] | See [`support_single_step()`]
/// Optimized [Range Stepping] | See [`support_range_step()`]
/// [Scheduler Locking] | See [`support_scheduler_locking()`]
/// "Stop" | Used in "Non-Stop" mode \*
///
/// \* "Non-Stop" mode is currently unimplemented in `gdbstub`
///
/// [Single stepping]: https://sourceware.org/gdb/current/onlinedocs/gdb/Continuing-and-Stepping.html#index-stepi
/// [Range Stepping]: https://sourceware.org/gdb/current/onlinedocs/gdb/Continuing-and-Stepping.html#range-stepping
/// [Scheduler Locking]: https://sourceware.org/gdb/current/onlinedocs/gdb#index-scheduler-locking-mode
/// [`support_single_step()`]: Self::support_single_step
/// [`support_range_step()`]: Self::support_range_step
/// [`support_scheduler_locking()`]: Self::support_scheduler_locking
///
/// # Additional Considerations
///
Expand Down Expand Up @@ -233,6 +248,14 @@ pub trait MultiThreadResume: Target {
) -> Option<super::reverse_exec::ReverseContOps<'_, Tid, Self>> {
None
}

/// Support for [scheduler locking].
///
/// [scheduler locking]: https://sourceware.org/gdb/current/onlinedocs/gdb#index-scheduler-locking-mode
#[inline(always)]
fn support_scheduler_locking(&mut self) -> Option<MultiThreadSchedulerLockingOps<'_, Self>> {
None
}
}

define_ext!(MultiThreadResumeOps, MultiThreadResume);
Expand Down Expand Up @@ -290,3 +313,26 @@ pub trait MultiThreadRangeStepping: Target + MultiThreadResume {
}

define_ext!(MultiThreadRangeSteppingOps, MultiThreadRangeStepping);

/// Target Extension - support for GDB's "Scheduler Locking" mode.
/// See [`MultiThreadResume::support_scheduler_locking`].
pub trait MultiThreadSchedulerLocking: Target + MultiThreadResume {
/// Configure the target to enable "Scheduler Locking" for the upcoming
/// resume.
///
/// This method is invoked when the GDB client expects only a specific set
/// of threads to run, while all other threads remain frozen. This behavior
/// is typically toggled in GDB using the `set scheduler-locking on`
/// command.
///
/// When this method is called, the implementation must ensure that any
/// threads not explicitly resumed via previous `set_resume_action_...`
/// calls **remain stopped**.
///
/// This prevents any "implicit continue" behavior for unmentioned threads,
/// satisfying GDB's expectation that only the designated threads will
/// advance during the next resume.
fn set_resume_action_scheduler_lock(&mut self) -> Result<(), Self::Error>;
}

define_ext!(MultiThreadSchedulerLockingOps, MultiThreadSchedulerLocking);
Loading