Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
104 changes: 103 additions & 1 deletion openvmm/openvmm_entry/src/cli_args.rs
Original file line number Diff line number Diff line change
Expand Up @@ -165,9 +165,11 @@ valid disk kinds:
flags:
`ro` open disk as read-only
`vtl2` assign this disk to VTL2
`max_ioqpairs=UINT32` maximum number of I/O queue pairs (default: 64)
`controller=UINT32` controller ID (default: 0, allows multiple controllers)
"#)]
#[clap(long)]
pub nvme: Vec<DiskCli>,
pub nvme: Vec<NvmeCli>,

/// number of sub-channels for the SCSI controller
#[clap(long, value_name = "COUNT", default_value = "0")]
Expand Down Expand Up @@ -923,6 +925,66 @@ impl FromStr for DiskCli {
}
}

// NVMe-specific disk configuration with controller options
#[derive(Clone)]
pub struct NvmeCli {
pub vtl: DeviceVtl,
pub kind: DiskCliKind,
pub read_only: bool,
pub underhill: Option<UnderhillDiskSource>,
pub max_ioqpairs: Option<u32>,
pub controller_id: u32,
}

impl FromStr for NvmeCli {
type Err = anyhow::Error;

fn from_str(s: &str) -> anyhow::Result<Self> {
let mut opts = s.split(',');
let kind = opts.next().unwrap().parse()?;

let mut read_only = false;
let mut underhill = None;
let mut vtl = DeviceVtl::Vtl0;
let mut max_ioqpairs = None;
let mut controller_id = 0; // Default controller ID

for opt in opts {
let mut s = opt.split('=');
let opt = s.next().unwrap();
match opt {
"ro" => read_only = true,
"vtl2" => {
vtl = DeviceVtl::Vtl2;
}
"uh-nvme" => underhill = Some(UnderhillDiskSource::Nvme),
"max_ioqpairs" => {
let val = s.next().ok_or_else(|| anyhow::anyhow!("max_ioqpairs requires a value"))?;
max_ioqpairs = Some(val.parse().map_err(|_| anyhow::anyhow!("invalid max_ioqpairs value: {}", val))?);
}
"controller" => {
let val = s.next().ok_or_else(|| anyhow::anyhow!("controller requires a value"))?;
controller_id = val.parse().map_err(|_| anyhow::anyhow!("invalid controller value: {}", val))?;
}
opt => anyhow::bail!("unknown option: '{opt}'"),
}
}

if underhill.is_some() && vtl != DeviceVtl::Vtl0 {
anyhow::bail!("`uh-nvme` is incompatible with `vtl2`");
}

Ok(NvmeCli {
vtl,
kind,
read_only,
underhill,
max_ioqpairs,
controller_id,
})
}
}

// <kind>[,ro,s]
#[derive(Clone)]
pub struct IdeDiskCli {
Expand Down Expand Up @@ -1837,4 +1899,44 @@ mod tests {
assert!(FloppyDiskCli::from_str("").is_err());
assert!(FloppyDiskCli::from_str("file:/path/to/floppy.img,invalid").is_err());
}

#[test]
fn test_nvme_cli_from_str() {
use hvlite_defs::config::DeviceVtl;

// Test basic nvme disk without options
let nvme = NvmeCli::from_str("file:/path/to/disk.vhd").unwrap();
assert_eq!(nvme.max_ioqpairs, None);
assert!(!nvme.read_only);
assert_eq!(nvme.vtl, DeviceVtl::Vtl0);
assert_eq!(nvme.controller_id, 0);

// Test with max_ioqpairs
let nvme = NvmeCli::from_str("file:/path/to/disk.vhd,max_ioqpairs=128").unwrap();
assert_eq!(nvme.max_ioqpairs, Some(128));
assert_eq!(nvme.controller_id, 0);

// Test with controller ID
let nvme = NvmeCli::from_str("file:/path/to/disk.vhd,controller=1").unwrap();
assert_eq!(nvme.controller_id, 1);
assert_eq!(nvme.max_ioqpairs, None);

// Test with multiple options including max_ioqpairs and controller
let nvme = NvmeCli::from_str("file:/path/to/disk.vhd,ro,max_ioqpairs=256,controller=2").unwrap();
assert_eq!(nvme.max_ioqpairs, Some(256));
assert!(nvme.read_only);
assert_eq!(nvme.controller_id, 2);

// Test with vtl2, max_ioqpairs, and controller
let nvme = NvmeCli::from_str("file:/path/to/disk.vhd,vtl2,max_ioqpairs=64,controller=0").unwrap();
assert_eq!(nvme.max_ioqpairs, Some(64));
assert_eq!(nvme.vtl, DeviceVtl::Vtl2);
assert_eq!(nvme.controller_id, 0);

// Test error cases
assert!(NvmeCli::from_str("file:/path/to/disk.vhd,max_ioqpairs").is_err()); // missing value
assert!(NvmeCli::from_str("file:/path/to/disk.vhd,max_ioqpairs=abc").is_err()); // invalid value
assert!(NvmeCli::from_str("file:/path/to/disk.vhd,controller").is_err()); // missing value
assert!(NvmeCli::from_str("file:/path/to/disk.vhd,controller=abc").is_err()); // invalid value
}
}
10 changes: 6 additions & 4 deletions openvmm/openvmm_entry/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -531,21 +531,23 @@ fn vm_config_from_command_line(
)?;
}

for &cli_args::DiskCli {
for &cli_args::NvmeCli {
vtl,
ref kind,
read_only,
is_dvd,
underhill,
max_ioqpairs,
controller_id,
} in &opt.nvme
{
storage.add(
storage.add_nvme(
vtl,
underhill,
storage_builder::DiskLocation::Nvme(None),
kind,
is_dvd,
read_only,
max_ioqpairs,
controller_id,
)?;
}

Expand Down
70 changes: 62 additions & 8 deletions openvmm/openvmm_entry/src/storage_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,22 @@ use vtl2_settings_proto::Lun;
use vtl2_settings_proto::StorageController;
use vtl2_settings_proto::storage_controller;

use std::collections::HashMap;

#[derive(Clone, Debug)]
pub struct NvmeControllerConfig {
pub namespaces: Vec<NamespaceDefinition>,
pub max_ioqpairs: u16,
pub instance_id: Guid,
}

pub(super) struct StorageBuilder {
vtl0_ide_disks: Vec<IdeDeviceConfig>,
vtl0_scsi_devices: Vec<ScsiDeviceAndPath>,
vtl2_scsi_devices: Vec<ScsiDeviceAndPath>,
vtl0_nvme_namespaces: Vec<NamespaceDefinition>,
vtl2_nvme_namespaces: Vec<NamespaceDefinition>,
// Map from controller_id to controller config
vtl0_nvme_controllers: HashMap<u32, NvmeControllerConfig>,
vtl2_nvme_controllers: HashMap<u32, NvmeControllerConfig>,
underhill_scsi_luns: Vec<Lun>,
underhill_nvme_luns: Vec<Lun>,
openhcl_vtl: Option<DeviceVtl>,
Expand Down Expand Up @@ -70,8 +80,8 @@ impl StorageBuilder {
vtl0_ide_disks: Vec::new(),
vtl0_scsi_devices: Vec::new(),
vtl2_scsi_devices: Vec::new(),
vtl0_nvme_namespaces: Vec::new(),
vtl2_nvme_namespaces: Vec::new(),
vtl0_nvme_controllers: HashMap::new(),
vtl2_nvme_controllers: HashMap::new(),
underhill_scsi_luns: Vec::new(),
underhill_nvme_luns: Vec::new(),
openhcl_vtl,
Expand All @@ -82,6 +92,10 @@ impl StorageBuilder {
!self.vtl0_nvme_namespaces.is_empty() || !self.underhill_nvme_luns.is_empty()
}

pub fn has_vtl0_nvme(&self) -> bool {
!self.vtl0_nvme_controllers.is_empty() || !self.underhill_nvme_luns.is_empty()
}

pub fn add(
&mut self,
vtl: DeviceVtl,
Expand All @@ -102,6 +116,46 @@ impl StorageBuilder {
Ok(())
}

pub fn add_nvme(
&mut self,
vtl: DeviceVtl,
underhill: Option<UnderhillDiskSource>,
target: DiskLocation,
kind: &DiskCliKind,
read_only: bool,
max_ioqpairs: Option<u32>,
) -> anyhow::Result<()> {
// Validate and set max_ioqpairs for this VTL
if let Some(max_ioqpairs) = max_ioqpairs {
let max_ioqpairs = max_ioqpairs.min(u16::MAX as u32) as u16;
let current_max = match vtl {
DeviceVtl::Vtl0 => &mut self.vtl0_nvme_max_ioqpairs,
DeviceVtl::Vtl2 => &mut self.vtl2_nvme_max_ioqpairs,
DeviceVtl::Vtl1 => anyhow::bail!("vtl1 unsupported"),
};

match current_max {
None => *current_max = Some(max_ioqpairs),
Some(existing) if *existing != max_ioqpairs => {
anyhow::bail!("conflicting max_ioqpairs values for VTL{}: {} vs {}",
match vtl { DeviceVtl::Vtl0 => 0, DeviceVtl::Vtl2 => 2, _ => unreachable!() },
existing, max_ioqpairs);
}
_ => {} // Same value, no conflict
}
}

if let Some(source) = underhill {
if vtl != DeviceVtl::Vtl0 {
anyhow::bail!("underhill can only offer devices to vtl0");
}
self.add_underhill(source.into(), target, kind, false, read_only)?;
} else {
self.add_inner(vtl, target, kind, false, read_only)?;
}
Ok(())
}

/// Returns the "sub device path" for assigning this into Underhill, or
/// `None` if Underhill can't use this device as a source.
fn add_inner(
Expand Down Expand Up @@ -335,8 +389,8 @@ impl StorageBuilder {
resource: NvmeControllerHandle {
subsystem_id: NVME_VTL0_INSTANCE_ID,
namespaces: std::mem::take(&mut self.vtl0_nvme_namespaces),
max_io_queues: 64,
msix_count: 64,
max_io_queues: self.vtl0_nvme_max_ioqpairs.unwrap_or(64),
msix_count: self.vtl0_nvme_max_ioqpairs.unwrap_or(64),
}
.into_resource(),
});
Expand Down Expand Up @@ -367,8 +421,8 @@ impl StorageBuilder {
resource: NvmeControllerHandle {
subsystem_id: NVME_VTL2_INSTANCE_ID,
namespaces: std::mem::take(&mut self.vtl2_nvme_namespaces),
max_io_queues: 64,
msix_count: 64,
max_io_queues: self.vtl2_nvme_max_ioqpairs.unwrap_or(64),
msix_count: self.vtl2_nvme_max_ioqpairs.unwrap_or(64),
}
.into_resource(),
});
Expand Down