Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

NVMe and resource disc testcase fixes #3432

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 18 additions & 11 deletions lisa/features/disks.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,7 @@

import re
from functools import partial
from typing import Any, Dict, List, Optional, Type

from assertpy.assertpy import assert_that
from typing import Any, Dict, List, Optional, Type, Union

from lisa import schema
from lisa.feature import Feature
Expand All @@ -27,22 +25,25 @@ def can_disable(cls) -> bool:
def enabled(self) -> bool:
return True

def get_partition_with_mount_point(self, mount_point: str) -> PartitionInfo:
def get_partition_with_mount_point(
self, mount_point: str
) -> Union[PartitionInfo, None]:
partition_info = self._node.tools[Mount].get_partition_info()
matched_partitions = [
partition
for partition in partition_info
if partition.mount_point == mount_point
]
assert_that(
matched_partitions,
f"Exactly one partition with mount point {mount_point} should be present",
).is_length(1)

partition = matched_partitions[0]
self._log.debug(f"disk: {partition}, mount_point: {mount_point}")
if matched_partitions:
partition = matched_partitions[0]
self._log.debug(f"disk: {partition}, mount_point: {mount_point}")
return partition
else:
return None

return partition
def check_resource_disk_mounted(self) -> bool:
return False

def get_raw_data_disks(self) -> List[str]:
raise NotImplementedError
Expand Down Expand Up @@ -71,6 +72,12 @@ def _initialize(self, *args: Any, **kwargs: Any) -> None:
def get_resource_disk_mount_point(self) -> str:
raise NotImplementedError

def get_resource_disks(self) -> List[str]:
squirrelsc marked this conversation as resolved.
Show resolved Hide resolved
return []

def get_resource_disk_type(self) -> str:
return ""
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It should be RESOURCE_DISK_TYPE_SCSI by default. The empty string is confusing. Actually, an enumeration is better to prevent misuse of a type.


def get_luns(self) -> Dict[str, int]:
raise NotImplementedError

Expand Down
53 changes: 39 additions & 14 deletions lisa/features/nvme.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from lisa.tools import Ls, Lspci, Nvmecli
from lisa.tools.lspci import PciDevice
from lisa.util import field_metadata, get_matched_str
from lisa.util.constants import DEVICE_TYPE_NVME


class Nvme(Feature):
Expand All @@ -42,6 +43,9 @@ class Nvme(Feature):
# /dev/nvme0n1p15 -> /dev/nvme0n1
NVME_NAMESPACE_PATTERN = re.compile(r"/dev/nvme[0-9]+n[0-9]+", re.M)

# /dev/nvme0n1p15 -> /dev/nvme0n1
NVME_DEVICE_PATTERN = re.compile(r"/dev/nvme[0-9]+", re.M)

_pci_device_name = "Non-Volatile memory controller"
_ls_devices: str = ""

Expand All @@ -63,7 +67,7 @@ def get_devices(self) -> List[str]:
matched_result = self._device_pattern.match(row)
if matched_result:
devices_list.append(matched_result.group("device_name"))
return devices_list
return self._remove_nvme_os_disk(devices_list)

def get_namespaces(self) -> List[str]:
namespaces = []
Expand All @@ -75,10 +79,28 @@ def get_namespaces(self) -> List[str]:
matched_result = self._namespace_pattern.match(row)
if matched_result:
namespaces.append(matched_result.group("namespace"))
return namespaces
return self._remove_nvme_os_disk(namespaces)

# With disk controller type NVMe, OS disk along with all remote iSCSI devices
# appears as NVMe.
# Removing OS disk from the list of NVMe devices will remove all the
# remote non-NVME disks.
def _remove_nvme_os_disk(self, disk_list: List[str]) -> List[str]:
if (
self._node.features[Disk].get_os_disk_controller_type()
== schema.DiskControllerType.NVME
):
os_disk_nvme_device = self._get_os_disk_nvme_device()
# Removing OS disk/device from the list.
for disk in disk_list:
if os_disk_nvme_device in disk:
disk_list.remove(disk)
break
return disk_list

def get_namespaces_from_cli(self) -> List[str]:
return self._node.tools[Nvmecli].get_namespaces()
namespaces_list = self._node.tools[Nvmecli].get_namespaces()
return self._remove_nvme_os_disk(namespaces_list)

def get_os_disk_nvme_namespace(self) -> str:
node_disk = self._node.features[Disk]
Expand All @@ -93,10 +115,23 @@ def get_os_disk_nvme_namespace(self) -> str:
)
return os_partition_namespace

# This method returns NVMe device name of the OS disk.
def _get_os_disk_nvme_device(self) -> str:
os_disk_nvme_device = ""
os_disk_nvme_namespace = self.get_os_disk_nvme_namespace()
# Sample os_boot_partition when disc controller type is NVMe:
# name: /dev/nvme0n1p15, disk: nvme, mount_point: /boot/efi, type: vfat
if os_disk_nvme_namespace:
os_disk_nvme_device = get_matched_str(
os_disk_nvme_namespace,
self.NVME_DEVICE_PATTERN,
)
return os_disk_nvme_device

def get_devices_from_lspci(self) -> List[PciDevice]:
devices_from_lspci = []
lspci_tool = self._node.tools[Lspci]
device_list = lspci_tool.get_devices()
device_list = lspci_tool.get_devices_by_type(DEVICE_TYPE_NVME)
devices_from_lspci = [
x for x in device_list if self._pci_device_name == x.device_class
]
Expand All @@ -108,16 +143,6 @@ def get_raw_data_disks(self) -> List[str]:
def get_raw_nvme_disks(self) -> List[str]:
# This routine returns Local NVMe devices as a list.
nvme_namespaces = self.get_namespaces()

# With disk controller type NVMe, OS disk appears as NVMe.
# It should be removed from the list of disks for NVMe tests as it is
# not an actual NVMe device.
# disk_controller_type == NVME
node_disk = self._node.features[Disk]
if node_disk.get_os_disk_controller_type() == schema.DiskControllerType.NVME:
os_disk_nvme_namespace = self.get_os_disk_nvme_namespace()
# Removing OS disk from the list.
nvme_namespaces.remove(os_disk_nvme_namespace)
return nvme_namespaces

def _get_device_from_ls(self, force_run: bool = False) -> None:
Expand Down
36 changes: 36 additions & 0 deletions lisa/sut_orchestrator/azure/features.py
Original file line number Diff line number Diff line change
Expand Up @@ -1897,6 +1897,42 @@ def remove_data_disk(self, names: Optional[List[str]] = None) -> None:
self._node.capability.disk.data_disk_count -= len(names)
self._node.close()

# verify that resource disk is mounted
# function returns successfully if disk matching mount point is present.
# raises exception if the resource disk is not mounted
# in Azure only SCSI disks are mounted but not NVMe disks
def check_resource_disk_mounted(self) -> bool:
resource_disk_mount_point = self.get_resource_disk_mount_point()
resourcedisk = self.get_partition_with_mount_point(resource_disk_mount_point)
if not resourcedisk:
raise LisaException(
f"Resource disk is not mounted at {resource_disk_mount_point}"
)
return True

# get resource disk type
# function returns the type of resource disk/disks available on the VM
# raises exception if no resource disk is available
def get_resource_disk_type(self) -> str:
resource_disks = self.get_resource_disks()
if not resource_disks:
raise LisaException("No Resource disks are available on VM")
if constants.RESOURCE_DISK_TYPE_NVME in resource_disks[0]:
return constants.RESOURCE_DISK_TYPE_NVME
return constants.RESOURCE_DISK_TYPE_SCSI

def get_resource_disks(self) -> List[str]:
resource_disk_list = []
resource_disk_mount_point = self.get_resource_disk_mount_point()
resourcedisk = self._node.features[Disk].get_partition_with_mount_point(
resource_disk_mount_point
)
if resourcedisk:
resource_disk_list = [resourcedisk.name]
else:
resource_disk_list = self._node.features[Nvme].get_raw_nvme_disks()
return resource_disk_list

def get_resource_disk_mount_point(self) -> str:
# get customize mount point from cloud-init configuration file from /etc/cloud/
# if not found, use default mount point /mnt for cloud-init
Expand Down
4 changes: 4 additions & 0 deletions lisa/util/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,3 +178,7 @@
SIGINT = 2
SIGTERM = 15
SIGKILL = 9

# Resource Disk Types
RESOURCE_DISK_TYPE_NVME = "nvme"
RESOURCE_DISK_TYPE_SCSI = "scsi"
31 changes: 23 additions & 8 deletions microsoft/testsuites/core/azure_image_standard.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
PassedException,
SkippedException,
UnsupportedDistroException,
constants,
find_patterns_in_lines,
get_matched_str,
)
Expand Down Expand Up @@ -1115,11 +1116,19 @@ def verify_no_pre_exist_users(self, node: Node) -> None:
),
)
def verify_resource_disk_readme_file(self, node: RemoteNode) -> None:
resource_disk_mount_point = node.features[Disk].get_resource_disk_mount_point()
if (
constants.RESOURCE_DISK_TYPE_NVME
== node.features[Disk].get_resource_disk_type()
):
raise SkippedException(
"Resource disk type is NVMe. NVMe disks are not formatted or mounted by"
" default and readme file wont be available"
)

# verify that resource disk is mounted
# function returns successfully if disk matching mount point is present
node.features[Disk].get_partition_with_mount_point(resource_disk_mount_point)
# verify that resource disk is mounted. raise exception if not
node.features[Disk].check_resource_disk_mounted()

resource_disk_mount_point = node.features[Disk].get_resource_disk_mount_point()

# Verify lost+found folder exists
# Skip this step for BSD as it does not have lost+found folder
Expand Down Expand Up @@ -1159,13 +1168,19 @@ def verify_resource_disk_readme_file(self, node: RemoteNode) -> None:
),
)
def verify_resource_disk_file_system(self, node: RemoteNode) -> None:
resource_disk_mount_point = node.features[Disk].get_resource_disk_mount_point()
node.features[Disk].get_partition_with_mount_point(resource_disk_mount_point)
node_disc = node.features[Disk]
if constants.RESOURCE_DISK_TYPE_NVME == node_disc.get_resource_disk_type():
raise SkippedException(
"Resource disk type is NVMe. NVMe disks are not formatted or mounted by default" # noqa: E501
)
# verify that resource disk is mounted. raise exception if not
node_disc.check_resource_disk_mounted()
resource_disk_mount_point = node_disc.get_resource_disk_mount_point()
disk_info = node.tools[Lsblk].find_disk_by_mountpoint(resource_disk_mount_point)
for partition in disk_info.partitions:
# by default, resource disk comes with ntfs type
# waagent or cloud-init will format it unless there are some commands hung
# or interrupt
# waagent or cloud-init will format it unless there are some commands
# hung or interrupt
assert_that(
partition.fstype,
"Resource disk file system type should not equal to ntfs",
Expand Down
45 changes: 33 additions & 12 deletions microsoft/testsuites/core/storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
BadEnvironmentStateException,
LisaException,
SkippedException,
constants,
generate_random_chars,
get_matched_str,
)
Expand Down Expand Up @@ -160,14 +161,24 @@ def verify_disks_device_timeout_setting(
),
)
def verify_resource_disk_mounted(self, node: RemoteNode) -> None:
if (
constants.RESOURCE_DISK_TYPE_NVME
== node.features[Disk].get_resource_disk_type()
):
raise SkippedException(
"Resource disk type is NVMe. NVMe disks are not mounted by default"
)

# get the mount point for the resource disk
resource_disk_mount_point = node.features[Disk].get_resource_disk_mount_point()
# os disk(root disk) is the entry with mount point `/' in the output
# of `mount` command
os_disk = (
node.features[Disk]
.get_partition_with_mount_point(self.os_disk_mount_point)
.disk
os_disk_partition = node.features[Disk].get_partition_with_mount_point(
self.os_disk_mount_point
)
if os_disk_partition:
os_disk = os_disk_partition.disk

if isinstance(node.os, BSD):
partition_info = node.tools[Mount].get_partition_info()
resource_disk_from_mtab = [
Expand Down Expand Up @@ -199,7 +210,7 @@ def verify_resource_disk_mounted(self, node: RemoteNode) -> None:
priority=1,
requirement=simple_requirement(
supported_platform_type=[AZURE],
unsupported_os=[BSD, Windows]
unsupported_os=[BSD, Windows],
# This test is skipped as waagent does not support freebsd fully
),
)
Expand Down Expand Up @@ -229,11 +240,19 @@ def verify_swap(self, node: RemoteNode) -> None:
),
)
def verify_resource_disk_io(self, node: RemoteNode) -> None:
if (
constants.RESOURCE_DISK_TYPE_NVME
== node.features[Disk].get_resource_disk_type()
):
raise SkippedException(
"Resource disk type is NVMe. NVMe has 'verify_nvme_function' and "
"'verify_nvme_function_unpartitioned' testcases to validate IO operations." # noqa: E501
)

resource_disk_mount_point = node.features[Disk].get_resource_disk_mount_point()

# verify that resource disk is mounted
# function returns successfully if disk matching mount point is present
node.features[Disk].get_partition_with_mount_point(resource_disk_mount_point)
# verify that resource disk is mounted. raise exception if not
node.features[Disk].check_resource_disk_mounted()

file_path = f"{resource_disk_mount_point}/sample.txt"
original_text = "Writing to resource disk!!!"
Expand Down Expand Up @@ -302,11 +321,13 @@ def verify_nvme_disk_controller_type(self, node: RemoteNode) -> None:
)
def verify_os_partition_identifier(self, log: Logger, node: RemoteNode) -> None:
# get information of root disk from blkid
os_partition = (
node.features[Disk]
.get_partition_with_mount_point(self.os_disk_mount_point)
.name
os_disk_partition = node.features[Disk].get_partition_with_mount_point(
self.os_disk_mount_point
)
if not os_disk_partition:
raise LisaException("Failed to get os disk partition")

os_partition = os_disk_partition.name
os_partition_info = node.tools[Blkid].get_partition_info_by_name(os_partition)

# check if cvm
Expand Down
Loading
Loading