Skip to content

Commit 8ef2c5d

Browse files
committed
Update vcpu index to u64 and fix tests
- Change PVTime::vcpu_count to u64 to avoid casts - Fix integration tests to check steal_time increase Signed-off-by: Dakshin Devanand <[email protected]>
1 parent 3ef992f commit 8ef2c5d

File tree

3 files changed

+34
-66
lines changed

3 files changed

+34
-66
lines changed

src/vmm/src/arch/aarch64/pvtime.rs

+11-13
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ pub const STEALTIME_STRUCT_MEM_SIZE: u64 = 64;
1818
#[derive(Debug)]
1919
pub struct PVTime {
2020
/// Number of vCPUs
21-
vcpu_count: u8,
21+
vcpu_count: u64,
2222
/// The base IPA of the shared memory region
2323
base_ipa: GuestAddress,
2424
}
@@ -29,25 +29,25 @@ pub enum PVTimeError {
2929
/// Failed to allocate memory region: {0}
3030
AllocationFailed(vm_allocator::Error),
3131
/// Invalid VCPU ID: {0}
32-
InvalidVcpuIndex(u8),
32+
InvalidVcpuIndex(u64),
3333
/// Error while setting or getting device attributes for vCPU: {0}
3434
DeviceAttribute(kvm_ioctls::Error),
3535
}
3636

3737
impl PVTime {
3838
/// Helper function to get the IPA of the steal_time region for a given vCPU
39-
fn get_steal_time_region_addr(&self, vcpu_index: u8) -> Result<GuestAddress, PVTimeError> {
39+
fn get_steal_time_region_addr(&self, vcpu_index: u64) -> Result<GuestAddress, PVTimeError> {
4040
if vcpu_index >= self.vcpu_count {
4141
return Err(PVTimeError::InvalidVcpuIndex(vcpu_index));
4242
}
4343
Ok(GuestAddress(
44-
self.base_ipa.0 + (vcpu_index as u64 * STEALTIME_STRUCT_MEM_SIZE),
44+
self.base_ipa.0 + (vcpu_index * STEALTIME_STRUCT_MEM_SIZE),
4545
))
4646
}
4747

4848
/// Create a new PVTime device given a base addr
4949
/// - Assumes total shared memory region from base addr is already allocated
50-
fn from_base(base_ipa: GuestAddress, vcpu_count: u8) -> Self {
50+
fn from_base(base_ipa: GuestAddress, vcpu_count: u64) -> Self {
5151
PVTime {
5252
vcpu_count,
5353
base_ipa,
@@ -57,13 +57,13 @@ impl PVTime {
5757
/// Creates a new PVTime device by allocating new system memory for all vCPUs
5858
pub fn new(
5959
resource_allocator: &mut ResourceAllocator,
60-
vcpu_count: u8,
60+
vcpu_count: u64,
6161
) -> Result<Self, PVTimeError> {
6262
// This returns the IPA of the start of our shared memory region for all vCPUs.
6363
let base_ipa: GuestAddress = GuestAddress(
6464
resource_allocator
6565
.allocate_system_memory(
66-
STEALTIME_STRUCT_MEM_SIZE * vcpu_count as u64,
66+
STEALTIME_STRUCT_MEM_SIZE * vcpu_count,
6767
STEALTIME_STRUCT_MEM_SIZE,
6868
vm_allocator::AllocPolicy::LastMatch,
6969
)
@@ -89,7 +89,7 @@ impl PVTime {
8989
/// Register a vCPU with its pre-allocated steal time region
9090
fn register_vcpu(
9191
&self,
92-
vcpu_index: u8,
92+
vcpu_index: u64,
9393
vcpu_fd: &kvm_ioctls::VcpuFd,
9494
) -> Result<(), PVTimeError> {
9595
// Get IPA of the steal_time region for this vCPU
@@ -114,9 +114,7 @@ impl PVTime {
114114
pub fn register_all_vcpus(&self, vcpus: &mut [Vcpu]) -> Result<(), PVTimeError> {
115115
// Register the vcpu with the pvtime device to map its steal time region
116116
for (i, vcpu) in vcpus.iter().enumerate() {
117-
#[allow(clippy::cast_possible_truncation)]
118-
// We know vcpu_count is u8 according to VcpuConfig
119-
self.register_vcpu(i as u8, &vcpu.kvm_vcpu.fd)?;
117+
self.register_vcpu(i as u64, &vcpu.kvm_vcpu.fd)?;
120118
}
121119
Ok(())
122120
}
@@ -135,7 +133,7 @@ pub struct PVTimeConstructorArgs<'a> {
135133
/// For steal_time shared memory region
136134
pub resource_allocator: &'a mut ResourceAllocator,
137135
/// Number of vCPUs (should be consistent with pre-snapshot state)
138-
pub vcpu_count: u8,
136+
pub vcpu_count: u64,
139137
}
140138

141139
impl<'a> Persist<'a> for PVTime {
@@ -158,7 +156,7 @@ impl<'a> Persist<'a> for PVTime {
158156
constructor_args
159157
.resource_allocator
160158
.allocate_system_memory(
161-
STEALTIME_STRUCT_MEM_SIZE * constructor_args.vcpu_count as u64,
159+
STEALTIME_STRUCT_MEM_SIZE * constructor_args.vcpu_count,
162160
STEALTIME_STRUCT_MEM_SIZE,
163161
vm_allocator::AllocPolicy::ExactMatch(state.base_ipa),
164162
)

src/vmm/src/builder.rs

+5-8
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,6 @@ use std::sync::{Arc, Mutex};
1111
use event_manager::{MutEventSubscriber, SubscriberOps};
1212
use libc::EFD_NONBLOCK;
1313
use linux_loader::cmdline::Cmdline as LoaderKernelCmdline;
14-
#[cfg(target_arch = "aarch64")]
15-
use log::warn;
1614
use userfaultfd::Uffd;
1715
use utils::time::TimestampUs;
1816
#[cfg(target_arch = "aarch64")]
@@ -303,7 +301,9 @@ pub fn build_microvm_for_boot(
303301
vmm.pv_time = if PVTime::is_supported(&vcpus[0].kvm_vcpu.fd) {
304302
Some(setup_pv_time(&mut vmm, vcpus.as_mut())?)
305303
} else {
306-
warn!("PVTime is not supported by KVM. Steal time will not be reported to the guest.");
304+
log::warn!(
305+
"PVTime is not supported by KVM. Steal time will not be reported to the guest."
306+
);
307307
None
308308
};
309309
}
@@ -484,11 +484,9 @@ pub fn build_microvm_from_snapshot(
484484
{
485485
let pvtime_state = microvm_state.pvtime_state;
486486
if let Some(pvtime_state) = pvtime_state {
487-
#[allow(clippy::cast_possible_truncation)]
488-
// We know vcpu_count is u8 according to VcpuConfig
489487
let pvtime_ctor_args = PVTimeConstructorArgs {
490488
resource_allocator: &mut vmm.resource_allocator,
491-
vcpu_count: vcpus.len() as u8,
489+
vcpu_count: vcpus.len() as u64,
492490
};
493491
vmm.pv_time = Some(
494492
PVTime::restore(pvtime_ctor_args, &pvtime_state)
@@ -603,8 +601,7 @@ fn setup_pv_time(vmm: &mut Vmm, vcpus: &mut [Vcpu]) -> Result<PVTime, StartMicro
603601
use crate::arch::aarch64::pvtime::PVTime;
604602

605603
// Create the pvtime device
606-
#[allow(clippy::cast_possible_truncation)] // We know vcpu_count is u8 according to VcpuConfig
607-
let pv_time = PVTime::new(&mut vmm.resource_allocator, vcpus.len() as u8)
604+
let pv_time = PVTime::new(&mut vmm.resource_allocator, vcpus.len() as u64)
608605
.map_err(StartMicrovmError::CreatePVTime)?;
609606

610607
// Register all vcpus with pvtime device

tests/integration_tests/functional/test_pvtime.py renamed to tests/integration_tests/functional/test_steal_time.py

+18-45
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,21 @@
33

44
"""Tests for verifying the PVTime device behavior under contention and across snapshots."""
55

6-
import re
76
import time
87

98
import pytest
109

1110
from framework.properties import global_props
1211

1312

13+
def get_steal_time_ms(vm):
14+
"""Returns total steal time of vCPUs in VM in milliseconds"""
15+
_, out, _ = vm.ssh.run("grep -w '^cpu' /proc/stat")
16+
steal_time_tck = int(out.strip().split()[8])
17+
clk_tck = int(vm.ssh.run("getconf CLK_TCK").stdout)
18+
return steal_time_tck / clk_tck * 1000
19+
20+
1421
@pytest.mark.skipif(
1522
global_props.cpu_architecture != "aarch64", reason="Only run in aarch64"
1623
)
@@ -43,35 +50,20 @@ def test_pvtime_steal_time_increases(uvm_plain):
4350
# Pin both vCPUs to the same physical CPU to induce contention
4451
vm.pin_vcpu(0, 0)
4552
vm.pin_vcpu(1, 0)
46-
vm.pin_vmm(1)
47-
vm.pin_api(2)
4853

4954
# Start two infinite loops to hog CPU time
5055
hog_cmd = "nohup bash -c 'while true; do :; done' >/dev/null 2>&1 &"
5156
vm.ssh.run(hog_cmd)
5257
vm.ssh.run(hog_cmd)
5358

59+
# Measure before and after steal time
60+
steal_before = get_steal_time_ms(vm)
5461
time.sleep(2)
55-
56-
# Measure steal time before
57-
_, out_before, _ = vm.ssh.run("grep '^cpu[0-9]' /proc/stat")
58-
steal_before = sum(
59-
int(re.split(r"\s+", line.strip())[8])
60-
for line in out_before.strip().splitlines()
61-
)
62-
63-
time.sleep(2)
64-
65-
# Measure steal time after
66-
_, out_after, _ = vm.ssh.run("grep '^cpu[0-9]' /proc/stat")
67-
steal_after = sum(
68-
int(re.split(r"\s+", line.strip())[8])
69-
for line in out_after.strip().splitlines()
70-
)
62+
steal_after = get_steal_time_ms(vm)
7163

7264
# Require increase in steal time
7365
assert (
74-
steal_after - steal_before >= 200
66+
steal_after > steal_before
7567
), f"Steal time did not increase as expected. Before: {steal_before}, After: {steal_after}"
7668

7769

@@ -88,21 +80,13 @@ def test_pvtime_snapshot(uvm_plain, microvm_factory):
8880

8981
vm.pin_vcpu(0, 0)
9082
vm.pin_vcpu(1, 0)
91-
vm.pin_vmm(1)
92-
vm.pin_api(2)
9383

9484
hog_cmd = "nohup bash -c 'while true; do :; done' >/dev/null 2>&1 &"
9585
vm.ssh.run(hog_cmd)
9686
vm.ssh.run(hog_cmd)
9787

98-
time.sleep(1)
99-
10088
# Snapshot pre-steal time
101-
_, out_before_snap, _ = vm.ssh.run("grep '^cpu[0-9]' /proc/stat")
102-
steal_before = [
103-
int(re.split(r"\s+", line.strip())[8])
104-
for line in out_before_snap.strip().splitlines()
105-
]
89+
steal_before = get_steal_time_ms(vm)
10690

10791
snapshot = vm.snapshot_full()
10892
vm.kill()
@@ -115,31 +99,20 @@ def test_pvtime_snapshot(uvm_plain, microvm_factory):
11599

116100
restored_vm.pin_vcpu(0, 0)
117101
restored_vm.pin_vcpu(1, 0)
118-
restored_vm.pin_vmm(1)
119-
restored_vm.pin_api(2)
120102
restored_vm.resume()
121103

122-
time.sleep(1)
123-
124104
# Steal time just after restoring
125-
_, out_after_snap, _ = restored_vm.ssh.run("grep '^cpu[0-9]' /proc/stat")
126-
steal_after_snap = [
127-
int(re.split(r"\s+", line.strip())[8])
128-
for line in out_after_snap.strip().splitlines()
129-
]
105+
steal_after_snap = get_steal_time_ms(restored_vm)
130106

131107
time.sleep(2)
132108

133109
# Steal time after running resumed VM
134-
_, out_after_resume, _ = restored_vm.ssh.run("grep '^cpu[0-9]' /proc/stat")
135-
steal_after_resume = [
136-
int(re.split(r"\s+", line.strip())[8])
137-
for line in out_after_resume.strip().splitlines()
138-
]
110+
steal_after_resume = get_steal_time_ms(restored_vm)
139111

140112
# Ensure steal time persisted and continued increasing
141-
persisted = sum(steal_before) + 100 <= sum(steal_after_snap)
142-
increased = sum(steal_after_resume) > sum(steal_after_snap)
113+
tolerance = 1500 # 1.5 seconds tolerance for persistence check
114+
persisted = steal_after_snap - steal_before < tolerance
115+
increased = steal_after_resume > steal_after_snap
143116

144117
assert (
145118
persisted and increased

0 commit comments

Comments
 (0)