Skip to content

Commit 8ef120b

Browse files
committed
More clippy fixes for hyperlight_guest_bin
Signed-off-by: Mark Rossett <[email protected]>
1 parent cf38285 commit 8ef120b

File tree

2 files changed

+58
-31
lines changed

2 files changed

+58
-31
lines changed

src/hyperlight_guest_bin/src/exceptions/handler.rs

Lines changed: 6 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,6 @@ use hyperlight_common::flatbuffer_wrappers::guest_error::ErrorCode;
2121
use hyperlight_common::outb::Exception;
2222
use hyperlight_guest::exit::abort_with_code_and_message;
2323

24-
use crate::paging;
25-
2624
/// See AMD64 Architecture Programmer's Manual, Volume 2
2725
/// §8.9.3 Interrupt Stack Frame, pp. 283--284
2826
/// Figure 8-14: Long-Mode Stack After Interrupt---Same Privilege,
@@ -56,9 +54,9 @@ const _: () = assert!(size_of::<Context>() == 152 + 512);
5654

5755
// TODO: This will eventually need to end up in a per-thread context,
5856
// when there are threads.
59-
pub static handlers: [core::sync::atomic::AtomicU64; 31] =
57+
pub static HANDLERS: [core::sync::atomic::AtomicU64; 31] =
6058
[const { core::sync::atomic::AtomicU64::new(0) }; 31];
61-
type handler_t = fn(n: u64, info: *mut ExceptionInfo, ctx: *mut Context, pf_addr: u64) -> bool;
59+
pub type HandlerT = fn(n: u64, info: *mut ExceptionInfo, ctx: *mut Context, pf_addr: u64) -> bool;
6260

6361
/// Exception handler
6462
#[unsafe(no_mangle)]
@@ -89,15 +87,12 @@ pub extern "C" fn hl_exception_handler(
8987
// vectors (0-31)
9088
if exception_number < 31 {
9189
let handler =
92-
handlers[exception_number as usize].load(core::sync::atomic::Ordering::Acquire);
90+
HANDLERS[exception_number as usize].load(core::sync::atomic::Ordering::Acquire);
9391
if handler != 0
9492
&& unsafe {
95-
core::mem::transmute::<_, handler_t>(handler)(
96-
exception_number,
97-
exn_info,
98-
ctx,
99-
page_fault_address,
100-
)
93+
core::mem::transmute::<u64, fn(u64, *mut ExceptionInfo, *mut Context, u64) -> bool>(
94+
handler,
95+
)(exception_number, exn_info, ctx, page_fault_address)
10196
}
10297
{
10398
return;

src/hyperlight_guest_bin/src/paging.rs

Lines changed: 52 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -53,8 +53,16 @@ struct MapResponse {
5353
}
5454

5555
/// Assumption: all are page-aligned
56+
/// # Safety
57+
/// This function modifies pages backing a virtual memory range which is inherently unsafe w.r.t.
58+
/// the Rust memory model.
59+
/// When using this function note:
60+
/// - No locking is performed before toching page table data structures,
61+
/// as such do not use concurrently with any other page table operations
62+
/// - TLB invalidation is not performed,
63+
/// if previously-unmapped ranges are not being mapped, TLB invalidation may need to be performed afterwards.
5664
pub unsafe fn map_region(phys_base: u64, virt_base: *mut u8, len: u64) {
57-
let mut pml4_base: u64 = 0;
65+
let mut pml4_base: u64;
5866
unsafe {
5967
asm!("mov {}, cr3", out(reg) pml4_base);
6068
}
@@ -71,12 +79,18 @@ pub unsafe fn map_region(phys_base: u64, virt_base: *mut u8, len: u64) {
7179
.map(|r| unsafe { alloc_pte_if_needed(r) })
7280
.flat_map(modify_ptes::<20, 12>)
7381
.map(|r| map_normal(phys_base, virt_base, r))
74-
.collect::<()>();
82+
.for_each(drop);
7583
}
7684

7785
#[allow(unused)]
7886
/// This function is not presently used for anything, but is useful
7987
/// for debugging
88+
/// # Safety
89+
/// This function traverses page table data structures, and should not be called concurrently
90+
/// with any other operations that modify the page table.
91+
/// # Panics
92+
/// This function will panic if:
93+
/// - A page map request resolves to multiple page table entries
8094
pub unsafe fn dbg_print_address_pte(address: u64) -> u64 {
8195
let mut pml4_base: u64 = 0;
8296
unsafe {
@@ -105,11 +119,18 @@ pub unsafe fn dbg_print_address_pte(address: u64) -> u64 {
105119
if addrs.len() != 1 {
106120
panic!("impossible: 1 page map request resolved to multiple PTEs");
107121
}
108-
return addrs[0];
122+
addrs[0]
109123
}
110124

111125
/// Allocate n contiguous physical pages and return the physical
112126
/// addresses of the pages in question.
127+
/// # Safety
128+
/// This function is not inherently unsafe but will likely become so in the future
129+
/// when a real physical page allocator is implemented.
130+
/// # Panics
131+
/// This function will panic if:
132+
/// - The Layout creation fails
133+
/// - Memory allocation fails
113134
pub unsafe fn alloc_phys_pages(n: u64) -> u64 {
114135
// Currently, since all of main memory is idmap'd, we can just
115136
// allocate any appropriately aligned section of memory.
@@ -125,8 +146,11 @@ pub unsafe fn alloc_phys_pages(n: u64) -> u64 {
125146
}
126147
}
127148

128-
pub unsafe fn require_pte_exist(x: MapResponse) -> MapRequest {
129-
let mut pte: u64 = 0;
149+
/// # Safety
150+
/// This function traverses page table data structures, and should not be called concurrently
151+
/// with any other operations that modify the page table.
152+
unsafe fn require_pte_exist(x: MapResponse) -> MapRequest {
153+
let mut pte: u64;
130154
unsafe {
131155
asm!("mov {}, qword ptr [{}]", out(reg) pte, in(reg) x.entry_ptr);
132156
}
@@ -141,9 +165,12 @@ pub unsafe fn require_pte_exist(x: MapResponse) -> MapRequest {
141165
}
142166
}
143167

144-
/// Page-mapping callback to allocate a next-level page table if necessary
145-
pub unsafe fn alloc_pte_if_needed(x: MapResponse) -> MapRequest {
146-
let mut pte: u64 = 0;
168+
/// Page-mapping callback to allocate a next-level page table if necessary.
169+
/// # Safety
170+
/// This function modifies page table data structures, and should not be called concurrently
171+
/// with any other operations that modify the page table.
172+
unsafe fn alloc_pte_if_needed(x: MapResponse) -> MapRequest {
173+
let mut pte: u64;
147174
unsafe {
148175
asm!("mov {}, qword ptr [{}]", out(reg) pte, in(reg) x.entry_ptr);
149176
}
@@ -157,6 +184,9 @@ pub unsafe fn alloc_pte_if_needed(x: MapResponse) -> MapRequest {
157184
}
158185
let page_addr = unsafe { alloc_phys_pages(1) };
159186
unsafe { ptov(page_addr).write_bytes(0u8, OS_PAGE_SIZE as usize) };
187+
188+
#[allow(clippy::identity_op)]
189+
#[allow(clippy::precedence)]
160190
let pte = page_addr |
161191
1 << 5 | // A - we don't track accesses at table level
162192
0 << 4 | // PCD - leave caching enabled
@@ -178,6 +208,8 @@ pub unsafe fn alloc_pte_if_needed(x: MapResponse) -> MapRequest {
178208
///
179209
/// TODO: support permissions; currently mapping is always RWX
180210
fn map_normal(phys_base: u64, virt_base: *mut u8, r: MapResponse) {
211+
#[allow(clippy::identity_op)]
212+
#[allow(clippy::precedence)]
181213
let pte = (phys_base + (r.vmin as u64 - virt_base as u64)) |
182214
1 << 6 | // D - we don't presently track dirty state for anything
183215
1 << 5 | // A - we don't presently track access for anything
@@ -194,27 +226,27 @@ fn map_normal(phys_base: u64, virt_base: *mut u8, r: MapResponse) {
194226
#[inline(always)]
195227
/// Utility function to extract an (inclusive on both ends) bit range
196228
/// from a quadword.
197-
fn bits<const high_bit: u8, const low_bit: u8>(x: u64) -> u64 {
198-
(x & ((1 << (high_bit + 1)) - 1)) >> low_bit
229+
fn bits<const HIGH_BIT: u8, const LOW_BIT: u8>(x: u64) -> u64 {
230+
(x & ((1 << (HIGH_BIT + 1)) - 1)) >> LOW_BIT
199231
}
200232

201-
struct ModifyPteIterator<const high_bit: u8, const low_bit: u8> {
233+
struct ModifyPteIterator<const HIGH_BIT: u8, const LOW_BIT: u8> {
202234
request: MapRequest,
203235
n: u64,
204236
}
205-
impl<const high_bit: u8, const low_bit: u8> Iterator for ModifyPteIterator<high_bit, low_bit> {
237+
impl<const HIGH_BIT: u8, const LOW_BIT: u8> Iterator for ModifyPteIterator<HIGH_BIT, LOW_BIT> {
206238
type Item = MapResponse;
207239
fn next(&mut self) -> Option<Self::Item> {
208-
if (self.n << low_bit) >= self.request.len {
240+
if (self.n << LOW_BIT) >= self.request.len {
209241
return None;
210242
}
211243
// next stage parameters
212-
let next_vmin = self.request.vmin.wrapping_add((self.n << low_bit) as usize);
244+
let next_vmin = self.request.vmin.wrapping_add((self.n << LOW_BIT) as usize);
213245
let entry_ptr = ptov(self.request.table_base)
214-
.wrapping_add((bits::<high_bit, low_bit>(next_vmin as u64) << 3) as usize)
246+
.wrapping_add((bits::<HIGH_BIT, LOW_BIT>(next_vmin as u64) << 3) as usize)
215247
as *mut u64;
216-
let len_from_here = self.request.len - (self.n << low_bit);
217-
let next_len = core::cmp::min(len_from_here, 1 << low_bit);
248+
let len_from_here = self.request.len - (self.n << LOW_BIT);
249+
let next_len = core::cmp::min(len_from_here, 1 << LOW_BIT);
218250

219251
// update our state
220252
self.n += 1;
@@ -226,17 +258,17 @@ impl<const high_bit: u8, const low_bit: u8> Iterator for ModifyPteIterator<high_
226258
})
227259
}
228260
}
229-
fn modify_ptes<const high_bit: u8, const low_bit: u8>(
261+
fn modify_ptes<const HIGH_BIT: u8, const LOW_BIT: u8>(
230262
r: MapRequest,
231-
) -> ModifyPteIterator<high_bit, low_bit> {
263+
) -> ModifyPteIterator<HIGH_BIT, LOW_BIT> {
232264
ModifyPteIterator { request: r, n: 0 }
233265
}
234266

235267
pub fn flush_tlb() {
236268
// Currently this just always flips CR4.PGE back and forth to
237269
// trigger a tlb flush. We should use a faster approach where
238270
// available
239-
let mut orig_cr4: u64 = 0;
271+
let mut orig_cr4: u64;
240272
unsafe {
241273
asm!("mov {}, cr4", out(reg) orig_cr4);
242274
}

0 commit comments

Comments
 (0)