diff --git a/openvmm/openvmm_entry/src/cli_args.rs b/openvmm/openvmm_entry/src/cli_args.rs index 6720f94518..bb92629f4b 100644 --- a/openvmm/openvmm_entry/src/cli_args.rs +++ b/openvmm/openvmm_entry/src/cli_args.rs @@ -165,9 +165,11 @@ valid disk kinds: flags: `ro` open disk as read-only `vtl2` assign this disk to VTL2 + `max_ioqpairs=UINT32` maximum number of I/O queue pairs (default: 64) + `controller=UINT32` controller ID (default: 0, allows multiple controllers) "#)] #[clap(long)] - pub nvme: Vec, + pub nvme: Vec, /// number of sub-channels for the SCSI controller #[clap(long, value_name = "COUNT", default_value = "0")] @@ -923,6 +925,66 @@ impl FromStr for DiskCli { } } +// NVMe-specific disk configuration with controller options +#[derive(Clone)] +pub struct NvmeCli { + pub vtl: DeviceVtl, + pub kind: DiskCliKind, + pub read_only: bool, + pub underhill: Option, + pub max_ioqpairs: Option, + pub controller_id: u32, +} + +impl FromStr for NvmeCli { + type Err = anyhow::Error; + + fn from_str(s: &str) -> anyhow::Result { + let mut opts = s.split(','); + let kind = opts.next().unwrap().parse()?; + + let mut read_only = false; + let mut underhill = None; + let mut vtl = DeviceVtl::Vtl0; + let mut max_ioqpairs = None; + let mut controller_id = 0; // Default controller ID + + for opt in opts { + let mut s = opt.split('='); + let opt = s.next().unwrap(); + match opt { + "ro" => read_only = true, + "vtl2" => { + vtl = DeviceVtl::Vtl2; + } + "uh-nvme" => underhill = Some(UnderhillDiskSource::Nvme), + "max_ioqpairs" => { + let val = s.next().ok_or_else(|| anyhow::anyhow!("max_ioqpairs requires a value"))?; + max_ioqpairs = Some(val.parse().map_err(|_| anyhow::anyhow!("invalid max_ioqpairs value: {}", val))?); + } + "controller" => { + let val = s.next().ok_or_else(|| anyhow::anyhow!("controller requires a value"))?; + controller_id = val.parse().map_err(|_| anyhow::anyhow!("invalid controller value: {}", val))?; + } + opt => anyhow::bail!("unknown option: '{opt}'"), + } + } + + if underhill.is_some() && vtl != DeviceVtl::Vtl0 { + anyhow::bail!("`uh-nvme` is incompatible with `vtl2`"); + } + + Ok(NvmeCli { + vtl, + kind, + read_only, + underhill, + max_ioqpairs, + controller_id, + }) + } +} + // [,ro,s] #[derive(Clone)] pub struct IdeDiskCli { @@ -1837,4 +1899,44 @@ mod tests { assert!(FloppyDiskCli::from_str("").is_err()); assert!(FloppyDiskCli::from_str("file:/path/to/floppy.img,invalid").is_err()); } + + #[test] + fn test_nvme_cli_from_str() { + use hvlite_defs::config::DeviceVtl; + + // Test basic nvme disk without options + let nvme = NvmeCli::from_str("file:/path/to/disk.vhd").unwrap(); + assert_eq!(nvme.max_ioqpairs, None); + assert!(!nvme.read_only); + assert_eq!(nvme.vtl, DeviceVtl::Vtl0); + assert_eq!(nvme.controller_id, 0); + + // Test with max_ioqpairs + let nvme = NvmeCli::from_str("file:/path/to/disk.vhd,max_ioqpairs=128").unwrap(); + assert_eq!(nvme.max_ioqpairs, Some(128)); + assert_eq!(nvme.controller_id, 0); + + // Test with controller ID + let nvme = NvmeCli::from_str("file:/path/to/disk.vhd,controller=1").unwrap(); + assert_eq!(nvme.controller_id, 1); + assert_eq!(nvme.max_ioqpairs, None); + + // Test with multiple options including max_ioqpairs and controller + let nvme = NvmeCli::from_str("file:/path/to/disk.vhd,ro,max_ioqpairs=256,controller=2").unwrap(); + assert_eq!(nvme.max_ioqpairs, Some(256)); + assert!(nvme.read_only); + assert_eq!(nvme.controller_id, 2); + + // Test with vtl2, max_ioqpairs, and controller + let nvme = NvmeCli::from_str("file:/path/to/disk.vhd,vtl2,max_ioqpairs=64,controller=0").unwrap(); + assert_eq!(nvme.max_ioqpairs, Some(64)); + assert_eq!(nvme.vtl, DeviceVtl::Vtl2); + assert_eq!(nvme.controller_id, 0); + + // Test error cases + assert!(NvmeCli::from_str("file:/path/to/disk.vhd,max_ioqpairs").is_err()); // missing value + assert!(NvmeCli::from_str("file:/path/to/disk.vhd,max_ioqpairs=abc").is_err()); // invalid value + assert!(NvmeCli::from_str("file:/path/to/disk.vhd,controller").is_err()); // missing value + assert!(NvmeCli::from_str("file:/path/to/disk.vhd,controller=abc").is_err()); // invalid value + } } diff --git a/openvmm/openvmm_entry/src/lib.rs b/openvmm/openvmm_entry/src/lib.rs index ac823d160a..9985613177 100644 --- a/openvmm/openvmm_entry/src/lib.rs +++ b/openvmm/openvmm_entry/src/lib.rs @@ -531,21 +531,23 @@ fn vm_config_from_command_line( )?; } - for &cli_args::DiskCli { + for &cli_args::NvmeCli { vtl, ref kind, read_only, - is_dvd, underhill, + max_ioqpairs, + controller_id, } in &opt.nvme { - storage.add( + storage.add_nvme( vtl, underhill, storage_builder::DiskLocation::Nvme(None), kind, - is_dvd, read_only, + max_ioqpairs, + controller_id, )?; } diff --git a/openvmm/openvmm_entry/src/storage_builder.rs b/openvmm/openvmm_entry/src/storage_builder.rs index 130ea03e6f..3cdea8e003 100644 --- a/openvmm/openvmm_entry/src/storage_builder.rs +++ b/openvmm/openvmm_entry/src/storage_builder.rs @@ -28,12 +28,22 @@ use vtl2_settings_proto::Lun; use vtl2_settings_proto::StorageController; use vtl2_settings_proto::storage_controller; +use std::collections::HashMap; + +#[derive(Clone, Debug)] +pub struct NvmeControllerConfig { + pub namespaces: Vec, + pub max_ioqpairs: u16, + pub instance_id: Guid, +} + pub(super) struct StorageBuilder { vtl0_ide_disks: Vec, vtl0_scsi_devices: Vec, vtl2_scsi_devices: Vec, - vtl0_nvme_namespaces: Vec, - vtl2_nvme_namespaces: Vec, + // Map from controller_id to controller config + vtl0_nvme_controllers: HashMap, + vtl2_nvme_controllers: HashMap, underhill_scsi_luns: Vec, underhill_nvme_luns: Vec, openhcl_vtl: Option, @@ -70,8 +80,8 @@ impl StorageBuilder { vtl0_ide_disks: Vec::new(), vtl0_scsi_devices: Vec::new(), vtl2_scsi_devices: Vec::new(), - vtl0_nvme_namespaces: Vec::new(), - vtl2_nvme_namespaces: Vec::new(), + vtl0_nvme_controllers: HashMap::new(), + vtl2_nvme_controllers: HashMap::new(), underhill_scsi_luns: Vec::new(), underhill_nvme_luns: Vec::new(), openhcl_vtl, @@ -82,6 +92,10 @@ impl StorageBuilder { !self.vtl0_nvme_namespaces.is_empty() || !self.underhill_nvme_luns.is_empty() } + pub fn has_vtl0_nvme(&self) -> bool { + !self.vtl0_nvme_controllers.is_empty() || !self.underhill_nvme_luns.is_empty() + } + pub fn add( &mut self, vtl: DeviceVtl, @@ -102,6 +116,46 @@ impl StorageBuilder { Ok(()) } + pub fn add_nvme( + &mut self, + vtl: DeviceVtl, + underhill: Option, + target: DiskLocation, + kind: &DiskCliKind, + read_only: bool, + max_ioqpairs: Option, + ) -> anyhow::Result<()> { + // Validate and set max_ioqpairs for this VTL + if let Some(max_ioqpairs) = max_ioqpairs { + let max_ioqpairs = max_ioqpairs.min(u16::MAX as u32) as u16; + let current_max = match vtl { + DeviceVtl::Vtl0 => &mut self.vtl0_nvme_max_ioqpairs, + DeviceVtl::Vtl2 => &mut self.vtl2_nvme_max_ioqpairs, + DeviceVtl::Vtl1 => anyhow::bail!("vtl1 unsupported"), + }; + + match current_max { + None => *current_max = Some(max_ioqpairs), + Some(existing) if *existing != max_ioqpairs => { + anyhow::bail!("conflicting max_ioqpairs values for VTL{}: {} vs {}", + match vtl { DeviceVtl::Vtl0 => 0, DeviceVtl::Vtl2 => 2, _ => unreachable!() }, + existing, max_ioqpairs); + } + _ => {} // Same value, no conflict + } + } + + if let Some(source) = underhill { + if vtl != DeviceVtl::Vtl0 { + anyhow::bail!("underhill can only offer devices to vtl0"); + } + self.add_underhill(source.into(), target, kind, false, read_only)?; + } else { + self.add_inner(vtl, target, kind, false, read_only)?; + } + Ok(()) + } + /// Returns the "sub device path" for assigning this into Underhill, or /// `None` if Underhill can't use this device as a source. fn add_inner( @@ -335,8 +389,8 @@ impl StorageBuilder { resource: NvmeControllerHandle { subsystem_id: NVME_VTL0_INSTANCE_ID, namespaces: std::mem::take(&mut self.vtl0_nvme_namespaces), - max_io_queues: 64, - msix_count: 64, + max_io_queues: self.vtl0_nvme_max_ioqpairs.unwrap_or(64), + msix_count: self.vtl0_nvme_max_ioqpairs.unwrap_or(64), } .into_resource(), }); @@ -367,8 +421,8 @@ impl StorageBuilder { resource: NvmeControllerHandle { subsystem_id: NVME_VTL2_INSTANCE_ID, namespaces: std::mem::take(&mut self.vtl2_nvme_namespaces), - max_io_queues: 64, - msix_count: 64, + max_io_queues: self.vtl2_nvme_max_ioqpairs.unwrap_or(64), + msix_count: self.vtl2_nvme_max_ioqpairs.unwrap_or(64), } .into_resource(), });