Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions openvmm/openvmm_entry/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1424,6 +1424,7 @@ async fn vm_config_from_command_line(
root_path: args.path.clone(),
mount_options: args.options.clone(),
},
num_request_queues: None,
}
.into_resource();
if let Some(pcie_port) = &args.pcie_port {
Expand All @@ -1442,6 +1443,7 @@ async fn vm_config_from_command_line(
fs: virtio_resources::fs::VirtioFsBackend::SectionFs {
root_path: args.path.clone(),
},
num_request_queues: None,
}
.into_resource();
if let Some(pcie_port) = &args.pcie_port {
Expand Down
1 change: 1 addition & 0 deletions openvmm/openvmm_entry/src/ttrpc/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -650,6 +650,7 @@ impl VmService {
root_path: virtiofs.root_path,
mount_options: String::new(),
},
num_request_queues: None,
}
.into_resource();
// Use VPCI when possible (currently only on Windows and macOS due
Expand Down
3 changes: 3 additions & 0 deletions vm/devices/virtio/virtio_resources/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,9 @@ pub mod fs {
pub struct VirtioFsHandle {
pub tag: String,
pub fs: VirtioFsBackend,
/// Number of request queues to advertise. If `None`, defaults to the
/// number of available CPUs (capped at 16).
pub num_request_queues: Option<u32>,
}

#[derive(MeshPayload)]
Expand Down
44 changes: 43 additions & 1 deletion vm/devices/virtio/virtiofs/src/integration_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
//! as a guest kernel would.

use crate::VirtioFs;
use crate::virtio::MAX_REQUEST_QUEUES;
use crate::virtio::VirtioFsDevice;
use fuse::protocol::*;
use guestmem::GuestMemory;
Expand Down Expand Up @@ -71,7 +72,7 @@ impl TestHarness {

let fs = VirtioFs::new(tmpdir.path(), None).unwrap();
let driver_source = VmTaskDriverSource::new(SingleDriverBackend::new(driver.clone()));
let device = VirtioFsDevice::new(&driver_source, "testfs", fs, 0, None);
let device = VirtioFsDevice::new(&driver_source, "testfs", fs, 0, None, Some(1));

let queue_event = Event::new();
let interrupt_event = Event::new();
Expand Down Expand Up @@ -533,3 +534,44 @@ async fn init_negotiates_direct_io_allow_mmap(driver: DefaultDriver) {
"VirtioFs should request FUSE_DIRECT_IO_ALLOW_MMAP_FLAG2 when kernel advertises it"
);
}

#[async_test]
async fn num_request_queues_default_is_clamped(driver: DefaultDriver) {
let tmpdir = tempfile::tempdir().unwrap();
let fs = VirtioFs::new(tmpdir.path(), None).unwrap();
let driver_source = VmTaskDriverSource::new(SingleDriverBackend::new(driver));
let device = VirtioFsDevice::new(&driver_source, "testfs", fs, 0, None, None);

let traits = device.traits();
// num_request_queues should be at least 1 and at most MAX_REQUEST_QUEUES.
// max_queues = num_request_queues + 1 (hiprio queue).
assert!(traits.max_queues >= 2, "at least 1 request queue + hiprio");
assert!(
traits.max_queues <= MAX_REQUEST_QUEUES as u16 + 1,
"at most MAX_REQUEST_QUEUES request queues + hiprio"
);
}

#[async_test]
async fn num_request_queues_explicit_zero_is_clamped_to_one(driver: DefaultDriver) {
let tmpdir = tempfile::tempdir().unwrap();
let fs = VirtioFs::new(tmpdir.path(), None).unwrap();
let driver_source = VmTaskDriverSource::new(SingleDriverBackend::new(driver));
let device = VirtioFsDevice::new(&driver_source, "testfs", fs, 0, None, Some(0));

let traits = device.traits();
// Some(0) should be clamped to 1 → max_queues = 2
assert_eq!(traits.max_queues, 2);
}

#[async_test]
async fn num_request_queues_explicit_overflow_is_clamped(driver: DefaultDriver) {
let tmpdir = tempfile::tempdir().unwrap();
let fs = VirtioFs::new(tmpdir.path(), None).unwrap();
let driver_source = VmTaskDriverSource::new(SingleDriverBackend::new(driver));
let device = VirtioFsDevice::new(&driver_source, "testfs", fs, 0, None, Some(100));

let traits = device.traits();
// Some(100) should be clamped to MAX_REQUEST_QUEUES → max_queues = MAX + 1
assert_eq!(traits.max_queues, MAX_REQUEST_QUEUES as u16 + 1);
}
28 changes: 17 additions & 11 deletions vm/devices/virtio/virtiofs/src/resolver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,24 +35,30 @@ impl ResolveResource<VirtioDeviceHandle, VirtioFsHandle> for VirtioFsResolver {
VirtioFsBackend::HostFs {
root_path,
mount_options,
} => VirtioFsDevice::new(
input.driver_source,
&resource.tag,
VirtioFs::new(
root_path,
Some(&LxVolumeOptions::from_option_string(mount_options)),
)?,
0,
None,
),
} => {
let notify_corruption = None;
VirtioFsDevice::new(
input.driver_source,
&resource.tag,
VirtioFs::new(
root_path,
Some(&LxVolumeOptions::from_option_string(mount_options)),
)?,
0,
notify_corruption,
resource.num_request_queues,
)
}
#[cfg(windows)]
VirtioFsBackend::SectionFs { root_path } => {
let notify_corruption = None;
VirtioFsDevice::new(
input.driver_source,
&resource.tag,
crate::SectionFs::new(root_path)?,
8 * 1024 * 1024 * 1024, // 8GB of shared memory,
None,
notify_corruption,
resource.num_request_queues,
)
}
#[cfg(not(windows))]
Expand Down
23 changes: 21 additions & 2 deletions vm/devices/virtio/virtiofs/src/virtio.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,9 @@ use zerocopy::Immutable;
use zerocopy::IntoBytes;
use zerocopy::KnownLayout;

/// Maximum number of request queues to advertise.
pub(crate) const MAX_REQUEST_QUEUES: u32 = 16;

/// PCI configuration space values for virtio-fs devices.
#[repr(C)]
#[derive(IntoBytes, Immutable, KnownLayout)]
Expand Down Expand Up @@ -58,19 +61,35 @@ pub struct VirtioFsDevice {

impl VirtioFsDevice {
/// Creates a new `VirtioFsDevice` with the specified mount tag.
///
/// `num_request_queues` controls how many virtio request queues the device
/// advertises. The Linux guest kernel (≥6.11) distributes I/O across
/// queues on a per-CPU basis, so this is best set to the guest vCPU count.
/// If `None`, defaults to the number of host CPUs (capped at
/// `MAX_REQUEST_QUEUES`). All values (including `Some(0)`) are clamped
/// to `1..=MAX_REQUEST_QUEUES`.
pub fn new<Fs>(
driver_source: &VmTaskDriverSource,
tag: &str,
fs: Fs,
shmem_size: u64,
notify_corruption: Option<Arc<dyn Fn() + Sync + Send>>,
num_request_queues: Option<u32>,
) -> Self
where
Fs: 'static + fuse::Fuse + Send + Sync,
{
let num_request_queues = num_request_queues
.unwrap_or_else(|| {
std::thread::available_parallelism()
Comment thread
benhillis marked this conversation as resolved.
.map(|n| n.get() as u32)
.unwrap_or(1)
})
.clamp(1, MAX_REQUEST_QUEUES);

let mut config = VirtioFsDeviceConfig {
tag: [0; 36],
num_request_queues: 1,
num_request_queues,
};

let notify_corruption = if let Some(notify) = notify_corruption {
Expand Down Expand Up @@ -104,7 +123,7 @@ impl VirtioDevice for VirtioFsDevice {
.with_ring_event_idx(true)
.with_ring_indirect_desc(true)
.with_ring_packed(true),
max_queues: 2,
max_queues: self.config.num_request_queues as u16 + 1,
device_register_length: self.config.as_bytes().len() as u32,
shared_memory: DeviceTraitsSharedMemory {
id: 0,
Expand Down
Loading