Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions Cargo.lock
Original file line number Diff line number Diff line change
Expand Up @@ -4815,6 +4815,7 @@ dependencies = [
"inspect",
"mesh",
"open_enum",
"static_assertions",
"storage_string",
"zerocopy 0.8.25",
]
Expand Down Expand Up @@ -4994,6 +4995,7 @@ dependencies = [
"memory_range",
"mesh",
"page_pool_alloc",
"tracing",
"user_driver",
"virt",
"vmcore",
Expand Down
10 changes: 10 additions & 0 deletions openhcl/openhcl_boot/src/cmdline.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,16 @@ const ENABLE_VTL2_GPA_POOL: &str = "OPENHCL_ENABLE_VTL2_GPA_POOL=";
/// * `log`: Enable sidecar logging.
const SIDECAR: &str = "OPENHCL_SIDECAR=";

/// Disable NVME keep alive regardless if the host supports it.
const DISABLE_NVME_KEEP_ALIVE: &str = "OPENHCL_DISABLE_NVME_KEEP_ALIVE=";

#[derive(Debug, PartialEq)]
pub struct BootCommandLineOptions {
pub confidential_debug: bool,
pub enable_vtl2_gpa_pool: Option<u64>,
pub sidecar: bool,
pub sidecar_logging: bool,
pub disable_nvme_keep_alive: bool,
}

impl BootCommandLineOptions {
Expand All @@ -41,6 +45,7 @@ impl BootCommandLineOptions {
enable_vtl2_gpa_pool: None,
sidecar: true, // sidecar is enabled by default
sidecar_logging: false,
disable_nvme_keep_alive: false,
}
}
}
Expand Down Expand Up @@ -72,6 +77,11 @@ impl BootCommandLineOptions {
}
}
}
} else if arg.starts_with(DISABLE_NVME_KEEP_ALIVE) {
let arg = arg.split_once('=').map(|(_, arg)| arg);
if arg.is_some_and(|a| a != "0") {
self.disable_nvme_keep_alive = true;
}
}
}
}
Expand Down
43 changes: 30 additions & 13 deletions openhcl/openhcl_boot/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,16 +76,32 @@ impl From<core::fmt::Error> for CommandLineTooLong {
}
}

/// Read and setup the underhill kernel command line into the specified buffer.
fn build_kernel_command_line(
params: &ShimParams,
cmdline: &mut ArrayString<COMMAND_LINE_SIZE>,
partition_info: &PartitionInfo,
struct BuildKernelCommandLineParams<'a> {
params: &'a ShimParams,
cmdline: &'a mut ArrayString<COMMAND_LINE_SIZE>,
partition_info: &'a PartitionInfo,
can_trust_host: bool,
is_confidential_debug: bool,
sidecar: Option<&SidecarConfig<'_>>,
sidecar: Option<&'a SidecarConfig<'a>>,
vtl2_pool_supported: bool,
disable_keep_alive: bool,
}

/// Read and setup the underhill kernel command line into the specified buffer.
fn build_kernel_command_line(
fn_params: BuildKernelCommandLineParams<'_>,
) -> Result<(), CommandLineTooLong> {
let BuildKernelCommandLineParams {
params,
cmdline,
partition_info,
can_trust_host,
is_confidential_debug,
sidecar,
vtl2_pool_supported,
disable_keep_alive,
} = fn_params;

// For reference:
// https://www.kernel.org/doc/html/v5.15/admin-guide/kernel-parameters.html
const KERNEL_PARAMETERS: &[&str] = &[
Expand Down Expand Up @@ -275,7 +291,7 @@ fn build_kernel_command_line(

// Only when explicitly supported by Host.
// TODO: Move from command line to device tree when stabilized.
if partition_info.nvme_keepalive && vtl2_pool_supported {
if partition_info.nvme_keepalive && vtl2_pool_supported && !disable_keep_alive {
write!(cmdline, "OPENHCL_NVME_KEEP_ALIVE=1 ")?;
}

Expand Down Expand Up @@ -633,15 +649,16 @@ fn shim_main(shim_params_raw_offset: isize) -> ! {
let address_space: &AddressSpaceManager = address_space;

let mut cmdline = off_stack!(ArrayString<COMMAND_LINE_SIZE>, ArrayString::new_const());
build_kernel_command_line(
&p,
&mut cmdline,
build_kernel_command_line(BuildKernelCommandLineParams {
params: &p,
cmdline: &mut cmdline,
partition_info,
can_trust_host,
is_confidential_debug,
sidecar.as_ref(),
address_space.has_vtl2_pool(),
)
sidecar: sidecar.as_ref(),
vtl2_pool_supported: address_space.has_vtl2_pool(),
disable_keep_alive: partition_info.boot_options.disable_nvme_keep_alive,
})
.unwrap();

let mut fdt = off_stack!(Fdt, zeroed());
Expand Down
1 change: 1 addition & 0 deletions openhcl/openhcl_dma_manager/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ virt.workspace = true
vmcore.workspace = true

anyhow.workspace = true
tracing.workspace = true

[lints]
workspace = true
34 changes: 31 additions & 3 deletions openhcl/openhcl_dma_manager/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -286,12 +286,20 @@ impl DmaManagerInner {
allocation_visibility: AllocationVisibility::Private,
persistent_allocations: false,
shared_spawner: _,
private_spawner: _,
private_spawner,
} => match lower_vtl_policy {
LowerVtlPermissionPolicy::Any => {
// No persistence needed means the `LockedMemorySpawner`
// using normal VTL2 ram is fine.
DmaClientBacking::LockedMemory(LockedMemorySpawner)
match private_spawner {
Some(private) => DmaClientBacking::PrivatePoolWithFallback((
private
.allocator(device_name.into())
.context("failed to create private allocator")?,
LockedMemorySpawner,
)),
None => DmaClientBacking::LockedMemory(LockedMemorySpawner),
}
}
LowerVtlPermissionPolicy::Vtl0 => {
// `LockedMemorySpawner` uses private VTL2 ram, so
Expand Down Expand Up @@ -416,6 +424,7 @@ enum DmaClientBacking {
SharedPool(#[inspect(skip)] PagePoolAllocator),
PrivatePool(#[inspect(skip)] PagePoolAllocator),
LockedMemory(#[inspect(skip)] LockedMemorySpawner),
PrivatePoolWithFallback(#[inspect(skip)] (PagePoolAllocator, LockedMemorySpawner)),
PrivatePoolLowerVtl(#[inspect(skip)] LowerVtlMemorySpawner<PagePoolAllocator>),
LockedMemoryLowerVtl(#[inspect(skip)] LowerVtlMemorySpawner<LockedMemorySpawner>),
}
Expand All @@ -429,6 +438,16 @@ impl DmaClientBacking {
DmaClientBacking::SharedPool(allocator) => allocator.allocate_dma_buffer(total_size),
DmaClientBacking::PrivatePool(allocator) => allocator.allocate_dma_buffer(total_size),
DmaClientBacking::LockedMemory(spawner) => spawner.allocate_dma_buffer(total_size),
DmaClientBacking::PrivatePoolWithFallback((allocator, spawner)) => {
allocator.allocate_dma_buffer(total_size).or_else(|err| {
tracing::warn!(
size = total_size,
error = ?err,
"falling back to locked memory for dma allocation"
);
spawner.allocate_dma_buffer(total_size)
})
}
DmaClientBacking::PrivatePoolLowerVtl(spawner) => {
spawner.allocate_dma_buffer(total_size)
}
Expand All @@ -442,7 +461,16 @@ impl DmaClientBacking {
match self {
DmaClientBacking::SharedPool(allocator) => allocator.attach_pending_buffers(),
DmaClientBacking::PrivatePool(allocator) => allocator.attach_pending_buffers(),
DmaClientBacking::LockedMemory(spawner) => spawner.attach_pending_buffers(),
DmaClientBacking::PrivatePoolWithFallback(_) => {
anyhow::bail!("cannot attach pending buffers with fallback allocator")
}
DmaClientBacking::LockedMemory(_) => {
anyhow::bail!(
"attaching pending buffers is not supported with locked memory; \
this client type does not maintain a pool of pending allocations. \
To use attach_pending_buffers, create a client backed by a shared or private pool."
)
}
DmaClientBacking::PrivatePoolLowerVtl(spawner) => spawner.attach_pending_buffers(),
DmaClientBacking::LockedMemoryLowerVtl(spawner) => spawner.attach_pending_buffers(),
}
Expand Down
15 changes: 14 additions & 1 deletion vm/devices/storage/disk_nvme/nvme_driver/src/driver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -302,6 +302,9 @@ impl<T: DeviceBacking> NvmeDriver<T> {
)
.context("failed to create admin queue pair")?;

let admin_sqes = admin.sq_entries();
let admin_cqes = admin.cq_entries();

let admin = worker.admin.insert(admin);

// Register the admin queue with the controller.
Expand Down Expand Up @@ -429,6 +432,13 @@ impl<T: DeviceBacking> NvmeDriver<T> {
let io_cqsize = (QueuePair::MAX_CQ_ENTRIES - 1).min(worker.registers.cap.mqes_z()) + 1;
let io_sqsize = (QueuePair::MAX_SQ_ENTRIES - 1).min(worker.registers.cap.mqes_z()) + 1;

tracing::debug!(
io_cqsize,
io_sqsize,
hw_size = worker.registers.cap.mqes_z(),
"io queue sizes"
);

// Some hardware (such as ASAP) require that the sq and cq have the same size.
io_cqsize.min(io_sqsize)
};
Expand Down Expand Up @@ -607,7 +617,7 @@ impl<T: DeviceBacking> NvmeDriver<T> {
admin: None, // Updated below.
identify: Some(Arc::new(
spec::IdentifyController::read_from_bytes(saved_state.identify_ctrl.as_bytes())
.map_err(|_| RestoreError::InvalidData)?, // TODO: zerocopy: map_err (https://github.com/microsoft/openvmm/issues/759)
.map_err(|_| RestoreError::InvalidData)?,
)),
driver: driver.clone(),
io_issuers,
Expand Down Expand Up @@ -916,6 +926,9 @@ impl<T: DeviceBacking> DriverWorkerTask<T> {
)
.with_context(|| format!("failed to create io queue pair {qid}"))?;

assert_eq!(queue.sq_entries(), queue.cq_entries());
state.qsize = queue.sq_entries();

let io_sq_addr = queue.sq_addr();
let io_cq_addr = queue.cq_addr();

Expand Down
Loading
Loading