@@ -53,8 +53,16 @@ struct MapResponse {
53
53
}
54
54
55
55
/// Assumption: all are page-aligned
56
+ /// # Safety
57
+ /// This function modifies pages backing a virtual memory range which is inherently unsafe w.r.t.
58
+ /// the Rust memory model.
59
+ /// When using this function note:
60
+ /// - No locking is performed before toching page table data structures,
61
+ /// as such do not use concurrently with any other page table operations
62
+ /// - TLB invalidation is not performed,
63
+ /// if previously-unmapped ranges are not being mapped, TLB invalidation may need to be performed afterwards.
56
64
pub unsafe fn map_region ( phys_base : u64 , virt_base : * mut u8 , len : u64 ) {
57
- let mut pml4_base: u64 = 0 ;
65
+ let mut pml4_base: u64 ;
58
66
unsafe {
59
67
asm ! ( "mov {}, cr3" , out( reg) pml4_base) ;
60
68
}
@@ -71,12 +79,18 @@ pub unsafe fn map_region(phys_base: u64, virt_base: *mut u8, len: u64) {
71
79
. map ( |r| unsafe { alloc_pte_if_needed ( r) } )
72
80
. flat_map ( modify_ptes :: < 20 , 12 > )
73
81
. map ( |r| map_normal ( phys_base, virt_base, r) )
74
- . collect :: < ( ) > ( ) ;
82
+ . for_each ( drop ) ;
75
83
}
76
84
77
85
#[ allow( unused) ]
78
86
/// This function is not presently used for anything, but is useful
79
87
/// for debugging
88
+ /// # Safety
89
+ /// This function traverses page table data structures, and should not be called concurrently
90
+ /// with any other operations that modify the page table.
91
+ /// # Panics
92
+ /// This function will panic if:
93
+ /// - A page map request resolves to multiple page table entries
80
94
pub unsafe fn dbg_print_address_pte ( address : u64 ) -> u64 {
81
95
let mut pml4_base: u64 = 0 ;
82
96
unsafe {
@@ -105,11 +119,18 @@ pub unsafe fn dbg_print_address_pte(address: u64) -> u64 {
105
119
if addrs. len ( ) != 1 {
106
120
panic ! ( "impossible: 1 page map request resolved to multiple PTEs" ) ;
107
121
}
108
- return addrs[ 0 ] ;
122
+ addrs[ 0 ]
109
123
}
110
124
111
125
/// Allocate n contiguous physical pages and return the physical
112
126
/// addresses of the pages in question.
127
+ /// # Safety
128
+ /// This function is not inherently unsafe but will likely become so in the future
129
+ /// when a real physical page allocator is implemented.
130
+ /// # Panics
131
+ /// This function will panic if:
132
+ /// - The Layout creation fails
133
+ /// - Memory allocation fails
113
134
pub unsafe fn alloc_phys_pages ( n : u64 ) -> u64 {
114
135
// Currently, since all of main memory is idmap'd, we can just
115
136
// allocate any appropriately aligned section of memory.
@@ -125,8 +146,11 @@ pub unsafe fn alloc_phys_pages(n: u64) -> u64 {
125
146
}
126
147
}
127
148
128
- pub unsafe fn require_pte_exist ( x : MapResponse ) -> MapRequest {
129
- let mut pte: u64 = 0 ;
149
+ /// # Safety
150
+ /// This function traverses page table data structures, and should not be called concurrently
151
+ /// with any other operations that modify the page table.
152
+ unsafe fn require_pte_exist ( x : MapResponse ) -> MapRequest {
153
+ let mut pte: u64 ;
130
154
unsafe {
131
155
asm ! ( "mov {}, qword ptr [{}]" , out( reg) pte, in( reg) x. entry_ptr) ;
132
156
}
@@ -141,9 +165,12 @@ pub unsafe fn require_pte_exist(x: MapResponse) -> MapRequest {
141
165
}
142
166
}
143
167
144
- /// Page-mapping callback to allocate a next-level page table if necessary
145
- pub unsafe fn alloc_pte_if_needed ( x : MapResponse ) -> MapRequest {
146
- let mut pte: u64 = 0 ;
168
+ /// Page-mapping callback to allocate a next-level page table if necessary.
169
+ /// # Safety
170
+ /// This function modifies page table data structures, and should not be called concurrently
171
+ /// with any other operations that modify the page table.
172
+ unsafe fn alloc_pte_if_needed ( x : MapResponse ) -> MapRequest {
173
+ let mut pte: u64 ;
147
174
unsafe {
148
175
asm ! ( "mov {}, qword ptr [{}]" , out( reg) pte, in( reg) x. entry_ptr) ;
149
176
}
@@ -157,6 +184,9 @@ pub unsafe fn alloc_pte_if_needed(x: MapResponse) -> MapRequest {
157
184
}
158
185
let page_addr = unsafe { alloc_phys_pages ( 1 ) } ;
159
186
unsafe { ptov ( page_addr) . write_bytes ( 0u8 , OS_PAGE_SIZE as usize ) } ;
187
+
188
+ #[ allow( clippy:: identity_op) ]
189
+ #[ allow( clippy:: precedence) ]
160
190
let pte = page_addr |
161
191
1 << 5 | // A - we don't track accesses at table level
162
192
0 << 4 | // PCD - leave caching enabled
@@ -178,6 +208,8 @@ pub unsafe fn alloc_pte_if_needed(x: MapResponse) -> MapRequest {
178
208
///
179
209
/// TODO: support permissions; currently mapping is always RWX
180
210
fn map_normal ( phys_base : u64 , virt_base : * mut u8 , r : MapResponse ) {
211
+ #[ allow( clippy:: identity_op) ]
212
+ #[ allow( clippy:: precedence) ]
181
213
let pte = ( phys_base + ( r. vmin as u64 - virt_base as u64 ) ) |
182
214
1 << 6 | // D - we don't presently track dirty state for anything
183
215
1 << 5 | // A - we don't presently track access for anything
@@ -194,27 +226,27 @@ fn map_normal(phys_base: u64, virt_base: *mut u8, r: MapResponse) {
194
226
#[ inline( always) ]
195
227
/// Utility function to extract an (inclusive on both ends) bit range
196
228
/// from a quadword.
197
- fn bits < const high_bit : u8 , const low_bit : u8 > ( x : u64 ) -> u64 {
198
- ( x & ( ( 1 << ( high_bit + 1 ) ) - 1 ) ) >> low_bit
229
+ fn bits < const HIGH_BIT : u8 , const LOW_BIT : u8 > ( x : u64 ) -> u64 {
230
+ ( x & ( ( 1 << ( HIGH_BIT + 1 ) ) - 1 ) ) >> LOW_BIT
199
231
}
200
232
201
- struct ModifyPteIterator < const high_bit : u8 , const low_bit : u8 > {
233
+ struct ModifyPteIterator < const HIGH_BIT : u8 , const LOW_BIT : u8 > {
202
234
request : MapRequest ,
203
235
n : u64 ,
204
236
}
205
- impl < const high_bit : u8 , const low_bit : u8 > Iterator for ModifyPteIterator < high_bit , low_bit > {
237
+ impl < const HIGH_BIT : u8 , const LOW_BIT : u8 > Iterator for ModifyPteIterator < HIGH_BIT , LOW_BIT > {
206
238
type Item = MapResponse ;
207
239
fn next ( & mut self ) -> Option < Self :: Item > {
208
- if ( self . n << low_bit ) >= self . request . len {
240
+ if ( self . n << LOW_BIT ) >= self . request . len {
209
241
return None ;
210
242
}
211
243
// next stage parameters
212
- let next_vmin = self . request . vmin . wrapping_add ( ( self . n << low_bit ) as usize ) ;
244
+ let next_vmin = self . request . vmin . wrapping_add ( ( self . n << LOW_BIT ) as usize ) ;
213
245
let entry_ptr = ptov ( self . request . table_base )
214
- . wrapping_add ( ( bits :: < high_bit , low_bit > ( next_vmin as u64 ) << 3 ) as usize )
246
+ . wrapping_add ( ( bits :: < HIGH_BIT , LOW_BIT > ( next_vmin as u64 ) << 3 ) as usize )
215
247
as * mut u64 ;
216
- let len_from_here = self . request . len - ( self . n << low_bit ) ;
217
- let next_len = core:: cmp:: min ( len_from_here, 1 << low_bit ) ;
248
+ let len_from_here = self . request . len - ( self . n << LOW_BIT ) ;
249
+ let next_len = core:: cmp:: min ( len_from_here, 1 << LOW_BIT ) ;
218
250
219
251
// update our state
220
252
self . n += 1 ;
@@ -226,17 +258,17 @@ impl<const high_bit: u8, const low_bit: u8> Iterator for ModifyPteIterator<high_
226
258
} )
227
259
}
228
260
}
229
- fn modify_ptes < const high_bit : u8 , const low_bit : u8 > (
261
+ fn modify_ptes < const HIGH_BIT : u8 , const LOW_BIT : u8 > (
230
262
r : MapRequest ,
231
- ) -> ModifyPteIterator < high_bit , low_bit > {
263
+ ) -> ModifyPteIterator < HIGH_BIT , LOW_BIT > {
232
264
ModifyPteIterator { request : r, n : 0 }
233
265
}
234
266
235
267
pub fn flush_tlb ( ) {
236
268
// Currently this just always flips CR4.PGE back and forth to
237
269
// trigger a tlb flush. We should use a faster approach where
238
270
// available
239
- let mut orig_cr4: u64 = 0 ;
271
+ let mut orig_cr4: u64 ;
240
272
unsafe {
241
273
asm ! ( "mov {}, cr4" , out( reg) orig_cr4) ;
242
274
}
0 commit comments