Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding a total_iodepth option that can be used with librbdfio #324

Draft
wants to merge 3 commits into
base: master
Choose a base branch
from
Draft
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
77 changes: 71 additions & 6 deletions benchmark/librbdfio.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
import settings
import monitoring

from typing import Optional
from .benchmark import Benchmark

logger = logging.getLogger("cbt")
Expand Down Expand Up @@ -50,6 +51,14 @@ def __init__(self, archive_dir, cluster, config):
self.rate_iops = config.get('rate_iops', None)
self.fio_out_format = config.get('fio_out_format', 'json,normal')
self.data_pool = None

self._ioddepth_per_volume: dict[int, int] = {}
total_iodepth: Optional[str] = config.get("total_iodepth", None)
if total_iodepth is not None:
self._ioddepth_per_volume = self._calculate_iodepth_per_volume(
int(self.volumes_per_client), int(total_iodepth)
)

# use_existing_volumes needs to be true to set the pool and rbd names
self.use_existing_volumes = bool(config.get('use_existing_volumes', False))
self.no_sudo = bool(config.get('no_sudo', False))
Expand Down Expand Up @@ -163,7 +172,18 @@ def run_workloads(self):
enable_monitor = bool(test['monitor'])
# TODO: simplify this loop to have a single iterator for general queu depth
for job in test['numjobs']:
for iod in test['iodepth']:
iodepth: list[str] = []
use_total_iodepth: bool = False
if "total_iodepth" in test.keys():
iodepth = test["total_iodepth"]
use_total_iodepth = True
else:
iodepth = test["iodepth"]
for iod in iodepth:
if use_total_iodepth:
self._ioddepth_per_volume = self._calculate_iodepth_per_volume(
int(self.volumes_per_client), int(iod)
)
self.mode = test['mode']
if 'op_size' in test:
self.op_size = test['op_size']
Expand All @@ -174,7 +194,10 @@ def run_workloads(self):
f'iodepth-{int(self.iodepth):03d}/numjobs-{int(self.numjobs):03d}' )
common.make_remote_dir(self.run_dir)

for i in range(self.volumes_per_client):
number_of_volumes: int = int(self.volumes_per_client)
if use_total_iodepth:
number_of_volumes = len(self._ioddepth_per_volume.keys())
for i in range(number_of_volumes):
fio_cmd = self.mkfiocmd(i)
p = common.pdsh(settings.getnodes('clients'), fio_cmd)
ps.append(p)
Expand Down Expand Up @@ -226,7 +249,10 @@ def run(self):
monitoring.start(self.run_dir)
logger.info('Running rbd fio %s test.', self.mode)
ps = []
for i in range(self.volumes_per_client):
number_of_volumes: int = int(self.volumes_per_client)
if self._ioddepth_per_volume != {}:
number_of_volumes = len(self._ioddepth_per_volume.keys())
for i in range(number_of_volumes):
fio_cmd = self.mkfiocmd(i)
p = common.pdsh(settings.getnodes('clients'), fio_cmd)
ps.append(p)
Expand All @@ -244,7 +270,7 @@ def run(self):
self.analyze(self.out_dir)


def mkfiocmd(self, volnum):
def mkfiocmd(self, volnum: int) -> str:
"""
Construct a FIO cmd (note the shell interpolation for the host
executing FIO).
Expand All @@ -257,7 +283,7 @@ def mkfiocmd(self, volnum):
logger.debug('Using rbdname %s', rbdname)
out_file = f'{self.run_dir}/output.{volnum:d}'

fio_cmd = ''
fio_cmd: str = ''
if not self.no_sudo:
fio_cmd = 'sudo '
fio_cmd += '%s --ioengine=rbd --clientname=admin --pool=%s --rbdname=%s --invalidate=0' % (self.cmd_path, self.pool_name, rbdname)
Expand All @@ -274,7 +300,12 @@ def mkfiocmd(self, volnum):
fio_cmd += ' --numjobs=%s' % self.numjobs
fio_cmd += ' --direct=1'
fio_cmd += ' --bs=%dB' % self.op_size
fio_cmd += ' --iodepth=%d' % self.iodepth

iodepth: str = f"{self.iodepth}"
if self._ioddepth_per_volume != {}:
iodepth = f"{self._ioddepth_per_volume[volnum]}"

fio_cmd += ' --iodepth=%s' % iodepth
fio_cmd += ' --end_fsync=%d' % self.end_fsync
# if self.vol_size:
# fio_cmd += ' -- size=%dM' % self.vol_size
Expand Down Expand Up @@ -401,6 +432,40 @@ def analyze(self, out_dir):
logger.info('Convert results to json format.')
self.parse(out_dir)

def _calculate_iodepth_per_volume(self, number_of_volumes: int, total_desired_iodepth: int) -> dict[int, int]:
"""
Given the total desired iodepth and the number of volumes from the
configuration yaml file, calculate the iodepth for each volume

If the iodepth specified in total_iodepth is too small to allow
an iodepth of 1 per volume, then reduce the number of volumes
used to allow an iodepth of 1 per volume.
"""
queue_depths: dict[int, int] = {}

if number_of_volumes > total_desired_iodepth:
logger.warning(
"The total iodepth requested: %s is less than 1 per volume (%s)",
total_desired_iodepth,
number_of_volumes,
)
logger.warning(
"Number of volumes per client will be reduced from %s to %s", number_of_volumes, total_desired_iodepth
)
number_of_volumes = total_desired_iodepth

iodepth_per_volume: int = total_desired_iodepth // number_of_volumes
remainder: int = total_desired_iodepth % number_of_volumes

for volume_id in range(number_of_volumes):
iodepth: int = iodepth_per_volume

if remainder > 0:
iodepth += 1
remainder -= 1
queue_depths[volume_id] = iodepth

return queue_depths

def __str__(self):
return "%s\n%s\n%s" % (self.run_dir, self.out_dir, super(LibrbdFio, self).__str__())