Skip to content

Commit c41a05d

Browse files
committed
td-payload: add shadow pages for shared memory
Data in shared memory provided by VMM is untrusted so from a security perspective the data shall be consumed after being copied into private memory. Private shadow for `SharedMemory` is provided to add a more easy and functional safe mechanism to use the untrusted data. It is 1:1 allocated for a shared memory request, and users can use the data with `copy_to_private_shadow` directly to get a private copy with additional memory allocation/free operations. Signed-off-by: Jiaqi Gao <[email protected]>
1 parent 9b4e454 commit c41a05d

File tree

1 file changed

+61
-19
lines changed

1 file changed

+61
-19
lines changed

td-payload/src/mm/shared.rs

+61-19
Original file line numberDiff line numberDiff line change
@@ -9,31 +9,52 @@ use super::SIZE_4K;
99
use crate::arch::shared::decrypt;
1010

1111
static SHARED_MEMORY_ALLOCATOR: LockedHeap = LockedHeap::empty();
12+
static PRIVATE_SHADOW_ALLOCATOR: LockedHeap = LockedHeap::empty();
1213

1314
pub fn init_shared_memory(start: u64, size: usize) {
15+
let shadow_size = size / 2;
16+
let shared_start = start + shadow_size as u64;
17+
let shared_size = size - shadow_size;
18+
1419
// Set the shared memory region to be shared
15-
decrypt(start, size);
20+
decrypt(shared_start, shared_size);
1621
// Initialize the shared memory allocator
1722
unsafe {
18-
SHARED_MEMORY_ALLOCATOR.lock().init(start as *mut u8, size);
23+
SHARED_MEMORY_ALLOCATOR
24+
.lock()
25+
.init(shared_start as *mut u8, shared_size);
26+
PRIVATE_SHADOW_ALLOCATOR
27+
.lock()
28+
.init(start as *mut u8, shadow_size);
1929
}
2030
}
2131

2232
pub struct SharedMemory {
2333
addr: usize,
34+
shadow_addr: usize,
2435
size: usize,
2536
}
2637

2738
impl SharedMemory {
2839
pub fn new(num_page: usize) -> Option<Self> {
2940
let addr = unsafe { alloc_shared_pages(num_page)? };
41+
let shadow_addr = unsafe { alloc_private_shadow_pages(num_page)? };
3042

3143
Some(Self {
3244
addr,
45+
shadow_addr,
3346
size: num_page * SIZE_4K,
3447
})
3548
}
3649

50+
pub fn copy_to_private_shadow(&mut self) -> &[u8] {
51+
let shadow =
52+
unsafe { core::slice::from_raw_parts_mut(self.shadow_addr as *mut u8, self.size) };
53+
shadow.copy_from_slice(self.as_bytes());
54+
55+
shadow
56+
}
57+
3758
pub fn as_bytes(&self) -> &[u8] {
3859
unsafe { core::slice::from_raw_parts(self.addr as *const u8, self.size) }
3960
}
@@ -45,24 +66,15 @@ impl SharedMemory {
4566

4667
impl Drop for SharedMemory {
4768
fn drop(&mut self) {
69+
unsafe { free_private_shadow_pages(self.shadow_addr, self.size / SIZE_4K) }
4870
unsafe { free_shared_pages(self.addr, self.size / SIZE_4K) }
4971
}
5072
}
5173

5274
/// # Safety
5375
/// The caller needs to explicitly call the `free_shared_pages` function after use
5476
pub unsafe fn alloc_shared_pages(num: usize) -> Option<usize> {
55-
let size = SIZE_4K.checked_mul(num)?;
56-
57-
let addr = SHARED_MEMORY_ALLOCATOR
58-
.lock()
59-
.allocate_first_fit(Layout::from_size_align(size, SIZE_4K).ok()?)
60-
.map(|ptr| ptr.as_ptr() as usize)
61-
.ok()?;
62-
63-
core::slice::from_raw_parts_mut(addr as *mut u8, size).fill(0);
64-
65-
Some(addr)
77+
allocator_alloc(&SHARED_MEMORY_ALLOCATOR, num)
6678
}
6779

6880
/// # Safety
@@ -74,16 +86,46 @@ pub unsafe fn alloc_shared_page() -> Option<usize> {
7486
/// # Safety
7587
/// The caller needs to ensure the correctness of the addr and page num
7688
pub unsafe fn free_shared_pages(addr: usize, num: usize) {
77-
let size = SIZE_4K.checked_mul(num).expect("Invalid page num");
78-
79-
SHARED_MEMORY_ALLOCATOR.lock().deallocate(
80-
NonNull::new(addr as *mut u8).unwrap(),
81-
Layout::from_size_align(size, SIZE_4K).unwrap(),
82-
);
89+
allocator_free(&SHARED_MEMORY_ALLOCATOR, addr, num)
8390
}
8491

8592
/// # Safety
8693
/// The caller needs to ensure the correctness of the addr
8794
pub unsafe fn free_shared_page(addr: usize) {
8895
free_shared_pages(addr, 1)
8996
}
97+
98+
/// # Safety
99+
/// The caller needs to explicitly call the `free_private_shadow_pages` function after use
100+
unsafe fn alloc_private_shadow_pages(num: usize) -> Option<usize> {
101+
allocator_alloc(&PRIVATE_SHADOW_ALLOCATOR, num)
102+
}
103+
104+
/// # Safety
105+
/// The caller needs to ensure the correctness of the addr and page num
106+
unsafe fn free_private_shadow_pages(addr: usize, num: usize) {
107+
allocator_free(&PRIVATE_SHADOW_ALLOCATOR, addr, num)
108+
}
109+
110+
unsafe fn allocator_alloc(allocator: &LockedHeap, num: usize) -> Option<usize> {
111+
let size = SIZE_4K.checked_mul(num)?;
112+
113+
let addr = allocator
114+
.lock()
115+
.allocate_first_fit(Layout::from_size_align(size, SIZE_4K).ok()?)
116+
.map(|ptr| ptr.as_ptr() as usize)
117+
.ok()?;
118+
119+
core::slice::from_raw_parts_mut(addr as *mut u8, size).fill(0);
120+
121+
Some(addr)
122+
}
123+
124+
unsafe fn allocator_free(allocator: &LockedHeap, addr: usize, num: usize) {
125+
let size = SIZE_4K.checked_mul(num).expect("Invalid page num");
126+
127+
allocator.lock().deallocate(
128+
NonNull::new(addr as *mut u8).unwrap(),
129+
Layout::from_size_align(size, SIZE_4K).unwrap(),
130+
);
131+
}

0 commit comments

Comments
 (0)