Skip to content

Commit

Permalink
tests: update unit tests for slurmconfig editor
Browse files Browse the repository at this point in the history
* Use from_dict method to create data models.

Signed-off-by: Jason C. Nucciarone <[email protected]>
  • Loading branch information
NucciTheBoss committed Aug 26, 2024
1 parent a446a68 commit 09564f6
Showing 1 changed file with 94 additions and 81 deletions.
175 changes: 94 additions & 81 deletions tests/unit/editors/test_slurmconfig.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from pathlib import Path

from slurmutils.editors import slurmconfig
from slurmutils.models import DownNodes, DownNodesList, Node, NodeMap, Partition, PartitionMap
from slurmutils.models import DownNodes, Node, Partition

example_slurm_conf = """#
# `slurm.conf` file generated at 2024-01-30 17:18:36.171652 by slurmutils.
Expand Down Expand Up @@ -87,32 +87,30 @@ def test_loads(self) -> None:
self.assertEqual(config.slurmd_spool_dir, "/var/spool/slurmd.spool")
self.assertEqual(config.scheduler_type, "sched/backfill")

nodes = config.nodes
for node in nodes:
self.assertIn(
node.node_name,
for name, params in config.nodes.items():
self.assertIn( # codespell:ignore
name,
{"juju-c9fc6f-2", "juju-c9fc6f-3", "juju-c9fc6f-4", "juju-c9fc6f-5"},
)
self.assertIn(
node.node_addr, {"10.152.28.48", "10.152.28.49", "10.152.28.50", "10.152.28.51"}
self.assertIn( # codespell:ignore
params["NodeAddr"],
{"10.152.28.48", "10.152.28.49", "10.152.28.50", "10.152.28.51"},
)
self.assertEqual(node.cpus, "1")
self.assertEqual(node.real_memory, "1000")
self.assertEqual(node.tmp_disk, "10000")
self.assertEqual(params["CPUs"], "1")
self.assertEqual(params["RealMemory"], "1000")
self.assertEqual(params["TmpDisk"], "10000")

down_nodes = config.down_nodes
for entry in down_nodes:
self.assertEqual(entry.down_nodes[0], "juju-c9fc6f-5")
self.assertEqual(entry.state, "DOWN")
self.assertEqual(entry.reason, "Maintenance Mode")
for entry in config.down_nodes:
self.assertEqual(entry["DownNodes"][0], "juju-c9fc6f-5")
self.assertEqual(entry["State"], "DOWN")
self.assertEqual(entry["Reason"], "Maintenance Mode")

partitions = config.partitions
for part in partitions:
self.assertIn(part.partition_name, {"DEFAULT", "batch"})
for partition in config.partitions:
self.assertIn(partition, {"DEFAULT", "batch"}) # codespell:ignore

batch = partitions["batch"]
batch = config.partitions["batch"]
self.assertListEqual(
batch.nodes, ["juju-c9fc6f-2", "juju-c9fc6f-3", "juju-c9fc6f-4", "juju-c9fc6f-5"]
batch["Nodes"], ["juju-c9fc6f-2", "juju-c9fc6f-3", "juju-c9fc6f-4", "juju-c9fc6f-5"]
)

def test_dumps(self) -> None:
Expand All @@ -130,10 +128,9 @@ def test_edit(self) -> None:
config.max_job_count = 20000
config.proctrack_type = "proctrack/linuxproc"
config.plugin_dir.append("/snap/slurm/current/plugins")
node = config.nodes["juju-c9fc6f-2"]
new_node = Node(NodeName="batch-0", **config.nodes["juju-c9fc6f-2"])
del config.nodes["juju-c9fc6f-2"]
node.node_name = "batch-0"
config.nodes[node.node_name] = node
config.nodes.update(new_node.dict())

config = slurmconfig.load("slurm.conf")
self.assertIsNone(config.inactive_limit)
Expand All @@ -143,9 +140,8 @@ def test_edit(self) -> None:
config.plugin_dir,
["/usr/local/lib", "/usr/local/slurm/lib", "/snap/slurm/current/plugins"],
)
self.assertEqual(config.nodes["batch-0"].node_addr, "10.152.28.48")
self.assertEqual(config.nodes["batch-0"]["NodeAddr"], "10.152.28.48")

# Test pocket (`nodes`, `frontend_nodes`, ...) descriptors.
with slurmconfig.edit("slurm.conf") as config:
del config.nodes
del config.frontend_nodes
Expand All @@ -154,74 +150,91 @@ def test_edit(self) -> None:
del config.partitions

config = slurmconfig.load("slurm.conf")
self.assertDictEqual(config.nodes.data, {})
self.assertDictEqual(config.frontend_nodes.data, {})
self.assertListEqual(config.down_nodes.data, [])
self.assertDictEqual(config.node_sets.data, {})
self.assertDictEqual(config.partitions.data, {})

node_list = [
Node(
NodeName="juju-c9fc6f-2",
NodeAddr="10.152.28.48",
CPUs="1",
RealMemory="1000",
TmpDisk="10000",
self.assertDictEqual(config.nodes, {})
self.assertDictEqual(config.frontend_nodes, {})
self.assertListEqual(config.down_nodes, [])
self.assertDictEqual(config.node_sets, {})
self.assertDictEqual(config.partitions, {})

new_nodes = [
Node.from_dict(
{
"juju-c9fc6f-2": {
"NodeAddr": "10.152.28.48",
"CPUs": "1",
"RealMemory": "1000",
"TmpDisk": "10000",
}
}
),
Node(
NodeName="juju-c9fc6f-3",
NodeAddr="10.152.28.49",
CPUs="1",
RealMemory="1000",
TmpDisk="10000",
Node.from_dict(
{
"juju-c9fc6f-3": {
"NodeAddr": "10.152.28.49",
"CPUs": "1",
"RealMemory": "1000",
"TmpDisk": "10000",
}
}
),
Node(
NodeName="juju-c9fc6f-4",
NodeAddr="10.152.28.50",
CPUs="1",
RealMemory="1000",
TmpDisk="10000",
Node.from_dict(
{
"juju-c9fc6f-4": {
"NodeAddr": "10.152.28.50",
"CPUs": "1",
"RealMemory": "1000",
"TmpDisk": "10000",
}
}
),
Node(
NodeName="juju-c9fc6f-5",
NodeAddr="10.152.28.51",
CPUs="1",
RealMemory="1000",
TmpDisk="10000",
Node.from_dict(
{
"juju-c9fc6f-5": {
"NodeAddr": "10.152.28.51",
"CPUs": "1",
"RealMemory": "1000",
"TmpDisk": "10000",
}
}
),
]
down_nodes = [
DownNodes(DownNodes=["juju-c9fc6f-5"], State="DOWN", Reason="Maintenance Mode")
new_down_nodes = [
DownNodes.from_dict(
{
"DownNodes": ["juju-c9fc6f-5"],
"State": "DOWN",
"Reason": "Maintenance Mode",
}
)
]
partition_list = [
Partition(PartitionName="DEFAULT", MaxTime="30", MaxNodes="10", State="UP"),
Partition(
PartitionName="batch",
Nodes=["juju-c9fc6f-2", "juju-c9fc6f-3", "juju-c9fc6f-4", "juju-c9fc6f-5"],
MinNodes="4",
MaxTime="120",
AllowGroups=["admin"],
new_partitions = [
Partition.from_dict({"DEFAULT": {"MaxTime": "30", "MaxNodes": "10", "State": "UP"}}),
Partition.from_dict(
{
"batch": {
"Nodes": [
"juju-c9fc6f-2",
"juju-c9fc6f-3",
"juju-c9fc6f-4",
"juju-c9fc6f-5",
],
"MinNodes": "4",
"MaxTime": "120",
"AllowGroups": ["admin"],
}
}
),
]

with slurmconfig.edit("slurm.conf") as config:
node_map = NodeMap()
for node in node_list:
node_map[node.node_name] = node
config.nodes = node_map

down_nodes_list = DownNodesList()
down_nodes_list.extend(down_nodes)
config.down_nodes = down_nodes_list
for node in new_nodes:
config.nodes.update(node.dict())

partition_map = PartitionMap()
for part in partition_list:
partition_map[part.partition_name] = part
config.partitions = partition_map
for down_node in new_down_nodes:
config.down_nodes.append(down_node.dict())

config = slurmconfig.load("slurm.conf")
with self.assertRaises(TypeError):
config.frontend_nodes = "yowzah"
for partition in new_partitions:
config.partitions.update(partition.dict())

def tearDown(self):
Path("slurm.conf").unlink()

0 comments on commit 09564f6

Please sign in to comment.