Skip to content

Commit

Permalink
Merge branch 'develop' into reports_v2
Browse files Browse the repository at this point in the history
  • Loading branch information
rfbgo committed Nov 15, 2024
2 parents ee47c53 + 6113835 commit 0c2fbf9
Show file tree
Hide file tree
Showing 46 changed files with 2,537 additions and 1,145 deletions.
5 changes: 0 additions & 5 deletions .coveragerc

This file was deleted.

7 changes: 4 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
Ramble is a multi-platform experimentation framework to increase exploration
productivity and improve reproducibility. Ramble is capable of driving software
installation, acquire input files, configure experiments, and extract results.
Ramble is a multi-platform experimentation framework that increases exploration
productivity and improves reproducibility. Ramble is capable of driving
software installation, acquiring input files, configuring experiments, and
extracting results.
It works on Linux, macOS, and many supercomputers.

Ramble can be used to configure a variety of experiments for applications.
Expand Down
2 changes: 1 addition & 1 deletion examples/basic_hostname_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ ramble:
variables:
mpi_command: mpirun
batch_submit: '{execute_experiment}'
processes_per_node: -1
processes_per_node: 1
applications:
hostname:
workloads:
Expand Down
4 changes: 2 additions & 2 deletions lib/ramble/docs/tutorials/mirrors.rst
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ Write the following configuration into the file, save, and exit:
variables:
mpi_command: mpirun -n {n_ranks}
batch_submit: '{execute_experiment}'
processes_per_node: -1
processes_per_node: 1
applications:
wrfv4:
variables:
Expand Down Expand Up @@ -84,7 +84,7 @@ will look something like this:
variables:
mpi_command: mpirun -n {n_ranks}
batch_submit: '{execute_experiment}'
processes_per_node: -1
processes_per_node: 1
applications:
wrfv4:
variables:
Expand Down
31 changes: 21 additions & 10 deletions lib/ramble/ramble/application.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
import shutil
import fnmatch
import time
import operator
from typing import List

import llnl.util.filesystem as fs
Expand Down Expand Up @@ -107,6 +108,7 @@ class ApplicationBase(metaclass=ApplicationMeta):
"pushdeployment",
"pushtocache",
"execute",
"logs",
]
_language_classes = [ApplicationMeta, SharedMeta]

Expand Down Expand Up @@ -1489,6 +1491,7 @@ def populate_inventory(self, workspace, force_compute=False, require_exist=False
self.package_manager.populate_inventory(workspace, force_compute, require_exist)

self.experiment_hash = ramble.util.hashing.hash_json(self.hash_inventory)
self.variables[self.keywords.experiment_hash] = self.experiment_hash

register_phase("write_inventory", pipeline="setup", run_after=["make_experiments"])

Expand Down Expand Up @@ -1827,7 +1830,7 @@ def is_numeric_fom(fom):
# If repeat_success_strict is true, one failed experiment will fail the whole set
# If repeat_success_strict is false, any passing experiment will pass the whole set
repeat_success = False
exp_success = []
exp_status_list = []
for exp in repeat_experiments.keys():
if exp in self.experiment_set.experiments.keys():
exp_inst = self.experiment_set.experiments[exp]
Expand All @@ -1836,15 +1839,15 @@ def is_numeric_fom(fom):
else:
continue

exp_success.append(exp_inst.get_status())
exp_status_list.append(exp_inst.get_status())

if workspace.repeat_success_strict:
if experiment_status.FAILED.name in exp_success:
if experiment_status.FAILED.name in exp_status_list:
repeat_success = False
else:
repeat_success = True
else:
if experiment_status.SUCCESS.name in exp_success:
if experiment_status.SUCCESS.name in exp_status_list:
repeat_success = True
else:
repeat_success = False
Expand Down Expand Up @@ -1919,27 +1922,30 @@ def is_numeric_fom(fom):
"display_name": _get_context_display_name(context),
}

summary_foms = []
if context == _NULL_CONTEXT:
# Use the app name as the origin of the FOM
summary_origin = self.name
n_total_dict = {
"value": self.repeats.n_repeats,
"units": "repeats",
"origin": list(fom_dict.keys())[0][2],
"origin": summary_origin,
"origin_type": "summary::n_total_repeats",
"name": "Experiment Summary",
"fom_type": FomType.MEASURE.to_dict(),
}
context_map["foms"].append(n_total_dict)
summary_foms.append(n_total_dict)

# Use the first FOM to count how many successful repeats values are present
n_success = exp_status_list.count("SUCCESS")
n_success_dict = {
"value": exp_success.count(experiment_status.SUCCESS.name),
"value": n_success,
"units": "repeats",
"origin": list(fom_dict.keys())[0][2],
"origin": summary_origin,
"origin_type": "summary::n_successful_repeats",
"name": "Experiment Summary",
"fom_type": FomType.MEASURE.to_dict(),
}
context_map["foms"].append(n_success_dict)
summary_foms.append(n_success_dict)

for fom_key, fom_contents in fom_dict.items():
fom_name, fom_units, fom_origin, fom_origin_type = fom_key
Expand Down Expand Up @@ -1981,6 +1987,11 @@ def is_numeric_fom(fom):
else:
continue

# Display the FOMs in alphabetic order, even if the corresponding log entries
# may be in different ordering.
context_map["foms"].sort(key=operator.itemgetter("name"))
if context == _NULL_CONTEXT:
context_map["foms"] = summary_foms + context_map["foms"]
results.append(context_map)

if results:
Expand Down
2 changes: 1 addition & 1 deletion lib/ramble/ramble/cmd/deployment.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
import spack.util.url as surl

import ramble.cmd
import ramble.cmd.common.arguments
import ramble.cmd.common.arguments as arguments

import ramble.fetch_strategy
Expand Down Expand Up @@ -102,6 +101,7 @@ def deployment_pull_setup_parser(subparser):
"-p",
dest="deployment_path",
help="Path to deployment that should be pulled",
required=True,
)


Expand Down
2 changes: 1 addition & 1 deletion lib/ramble/ramble/cmd/on.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def ramble_on(args):
suppress_run_header=suppress_run_header,
)

with ws.write_transaction():
with ws.read_transaction():
pipeline.run()


Expand Down
55 changes: 53 additions & 2 deletions lib/ramble/ramble/cmd/workspace.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@
"info",
"edit",
"mirror",
"experiment-logs",
["list", "ls"],
["remove", "rm"],
"generate-config",
Expand Down Expand Up @@ -483,7 +484,7 @@ def workspace_setup(args):
logger.debug("Setting up workspace")
pipeline = pipeline_cls(ws, filters)

with ws.write_transaction():
with ws.read_transaction():
workspace_run_pipeline(args, pipeline)


Expand Down Expand Up @@ -554,7 +555,7 @@ def workspace_analyze(args):
summary_only=args.summary_only,
)

with ws.write_transaction():
with ws.read_transaction():
workspace_run_pipeline(args, pipeline)


Expand Down Expand Up @@ -1293,6 +1294,56 @@ def workspace_generate_config(args):
workspace_manage_experiments(args)


def workspace_experiment_logs_setup_parser(subparser):
"""print log information for workspace"""
default_filters = subparser.add_mutually_exclusive_group()
default_filters.add_argument(
"--limit-one", action="store_true", help="only print the first log information block"
)

default_filters.add_argument(
"--first-failed",
action="store_true",
help="only print the information for the first failed experiment. "
+ "Requires `ramble workspace analyze` to have been run previously",
)

default_filters.add_argument(
"--failed", action="store_true", help="print only failed experiment logs"
)

arguments.add_common_arguments(
subparser,
["where", "exclude_where", "filter_tags"],
)


def workspace_experiment_logs(args):
"""Print log information for workspace"""

current_pipeline = ramble.pipeline.pipelines.logs
ws = ramble.cmd.require_active_workspace(cmd_name="workspace concretize")

first_only = args.limit_one or args.first_failed
where_filter = args.where.copy() if args.where else []
exclude_filter = args.exclude_where.copy() if args.exclude_where else []
only_failed = args.first_failed or args.failed

if only_failed:
exclude_filter.append(["'{experiment_status}' == 'SUCCESS'"])

filters = ramble.filters.Filters(
include_where_filters=where_filter,
exclude_where_filters=exclude_filter,
tags=args.filter_tags,
)

pipeline_cls = ramble.pipeline.pipeline_class(current_pipeline)
pipeline = pipeline_cls(ws, filters, first_only=first_only)
with ws.write_transaction():
workspace_run_pipeline(args, pipeline)


#: Dictionary mapping subcommand names and aliases to functions
subcommand_functions = {}

Expand Down
4 changes: 2 additions & 2 deletions lib/ramble/ramble/expander.py
Original file line number Diff line number Diff line change
Expand Up @@ -910,9 +910,9 @@ def _eval_susbscript_op(self, node):
"""Evaluate subscript operation in the ast"""
try:
operand = self.eval_math(node.value)
if not isinstance(operand, str):
raise SyntaxError("Currently only string slicing is supported for subscript")
slice_node = node.slice
if not isinstance(operand, str) or not isinstance(slice_node, ast.Slice):
raise SyntaxError("Currently only string slicing is supported for subscript")

def _get_with_default(s_node, attr, default):
v_node = getattr(s_node, attr)
Expand Down
6 changes: 6 additions & 0 deletions lib/ramble/ramble/experiment_set.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,12 +214,18 @@ def _compute_mpi_vars(self, expander, variables):

if n_ranks:
n_ranks = int(expander.expand_var(n_ranks))
if n_ranks <= 0:
logger.error("n_ranks must be positive")

if ppn:
ppn = int(expander.expand_var(ppn))
if ppn <= 0:
logger.error("processes_per_node must be positive")

if n_nodes:
n_nodes = int(expander.expand_var(n_nodes))
if n_nodes <= 0:
logger.error("n_nodes must be positive")

if n_threads:
n_threads = int(expander.expand_var(n_threads))
Expand Down
1 change: 1 addition & 0 deletions lib/ramble/ramble/keywords.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
"license_input_dir": {"type": key_type.reserved, "level": output_level.variable},
"experiments_file": {"type": key_type.reserved, "level": output_level.key},
"experiment_name": {"type": key_type.reserved, "level": output_level.key},
"experiment_hash": {"type": key_type.reserved, "level": output_level.key},
"experiment_run_dir": {"type": key_type.reserved, "level": output_level.variable},
"experiment_status": {"type": key_type.reserved, "level": output_level.key},
"experiment_index": {"type": key_type.reserved, "level": output_level.variable},
Expand Down
36 changes: 28 additions & 8 deletions lib/ramble/ramble/language/application_language.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,10 @@ def _execute_workload_groups(app):
for var in app.workload_group_vars[name]:
app.workloads[workload].add_variable(var)

if name in app.workload_group_env_vars:
for env_var in app.workload_group_env_vars[name]:
app.workloads[workload].add_environment_variable(env_var)

return _execute_workload_groups


Expand Down Expand Up @@ -260,8 +264,10 @@ def _execute_workload_variable(app):
return _execute_workload_variable


@application_directive(dicts=())
def environment_variable(name, value, description, workload=None, workloads=None, **kwargs):
@application_directive("workload_group_env_vars")
def environment_variable(
name, value, description, workload=None, workloads=None, workload_group=None, **kwargs
):
"""Define an environment variable to be used in experiments
Args:
Expand All @@ -274,15 +280,29 @@ def environment_variable(name, value, description, workload=None, workloads=None
"""

def _execute_environment_variable(app):
all_workloads = ramble.language.language_helpers.require_definition(
all_workloads = ramble.language.language_helpers.merge_definitions(
workload, workloads, app.workloads, "workload", "workloads", "environment_variable"
)

workload_env_var = ramble.workload.WorkloadEnvironmentVariable(
name, value=value, description=description
)

for wl_name in all_workloads:
app.workloads[wl_name].add_environment_variable(
ramble.workload.WorkloadEnvironmentVariable(
name, value=value, description=description
)
)
app.workloads[wl_name].add_environment_variable(workload_env_var.copy())

if workload_group is not None:
workload_group_list = app.workload_groups[workload_group]

if workload_group not in app.workload_group_env_vars:
app.workload_group_env_vars[workload_group] = []

app.workload_group_vars[workload_group].append(workload_env_var.copy())

for wl_name in workload_group_list:
app.workloads[wl_name].add_environment_variable(workload_env_var.copy())

if not all_workloads and workload_group is None:
raise DirectiveError("A workload or workload group is required")

return _execute_environment_variable
4 changes: 3 additions & 1 deletion lib/ramble/ramble/language/modifier_language.py
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,7 @@ def _env_var_modification(mod):


@modifier_directive("required_vars")
def required_variable(var: str, results_level="variable", modes=None):
def required_variable(var: str, results_level="variable", modes=None, description=None):
"""Mark a variable as being required by this modifier
Args:
Expand All @@ -265,6 +265,7 @@ def required_variable(var: str, results_level="variable", modes=None):
a key within JSON or YAML formatted results.
modes (list[str] | None): modes that the required check should be applied. The
default None means apply to all modes.
description (str | None): Description of the required variable.
"""

def _mark_required_var(mod):
Expand All @@ -273,6 +274,7 @@ def _mark_required_var(mod):
"level": ramble.keywords.output_level.variable,
# Extra prop that's only used for filtering
"modes": set(modes) if modes is not None else None,
"description": description,
}

return _mark_required_var
Expand Down
2 changes: 1 addition & 1 deletion lib/ramble/ramble/modifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ class ModifierBase(metaclass=ModifierMeta):
_builtin_name = NS_SEPARATOR.join(("modifier_builtin", "{obj_name}", "{name}"))
_mod_prefix_builtin = f"modifier_builtin{NS_SEPARATOR}"
_language_classes = [ModifierMeta, SharedMeta]
_pipelines = ["analyze", "archive", "mirror", "setup", "pushtocache", "execute"]
_pipelines = ["analyze", "archive", "mirror", "setup", "pushtocache", "execute", "logs"]

modifier_class = "ModifierBase"

Expand Down
1 change: 1 addition & 0 deletions lib/ramble/ramble/package_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ class PackageManagerBase(metaclass=PackageManagerMeta):
"pushdeployment",
"pushtocache",
"execute",
"logs",
]

_spec_groups = [
Expand Down
Loading

0 comments on commit 0c2fbf9

Please sign in to comment.