diff --git a/nebula/frontend/app.py b/nebula/frontend/app.py index a34a9fa..6ac8e47 100755 --- a/nebula/frontend/app.py +++ b/nebula/frontend/app.py @@ -757,6 +757,9 @@ def stop_scenario(scenario_name): ScenarioManagement.stop_participants() ScenarioManagement.stop_blockchain() scenario_set_status_to_finished(scenario_name) + # Generate statistics for the scenario + path = Utils.check_path(settings.log_dir, scenario_name) + ScenarioManagement.generate_statistics(path) def stop_all_scenarios(): diff --git a/nebula/scenarios.py b/nebula/scenarios.py index b979c9d..b3829fb 100644 --- a/nebula/scenarios.py +++ b/nebula/scenarios.py @@ -12,6 +12,7 @@ from datetime import datetime import docker +import tensorboard_reducer as tbr from nebula.addons.blockchain.blockchain_deployer import BlockchainDeployer from nebula.addons.topologymanager import TopologyManager @@ -1034,3 +1035,51 @@ def scenario_finished(self, timeout_seconds): return False time.sleep(5) + + @classmethod + def generate_statistics(cls, path): + try: + # Generate statistics + logging.info(f"Generating statistics for scenario {path}") + + # Define input directories + input_event_dirs = sorted(glob.glob(os.path.join(path, "metrics/*"))) + # Where to write reduced TB events + tb_events_output_dir = os.path.join(path, "metrics", "reduced-data") + csv_out_path = os.path.join(path, "metrics", "reduced-data-as.csv") + # Whether to abort or overwrite when csv_out_path already exists + overwrite = False + reduce_ops = ("mean", "min", "max", "median", "std", "var") + + # Handle duplicate steps + handle_dup_steps = "keep-first" + # Strict steps + strict_steps = False + + events_dict = tbr.load_tb_events( + input_event_dirs, handle_dup_steps=handle_dup_steps, strict_steps=strict_steps + ) + + # Number of recorded tags. e.g. would be 3 if you recorded loss, MAE and R^2 + n_scalars = len(events_dict) + n_steps, n_events = list(events_dict.values())[0].shape + + logging.info(f"Loaded {n_events} TensorBoard runs with {n_scalars} scalars and {n_steps} steps each") + logging.info(f"Events dict keys: {events_dict.keys()}") + + reduced_events = tbr.reduce_events(events_dict, reduce_ops) + + for op in reduce_ops: + logging.info(f"Writing '{op}' reduction to '{tb_events_output_dir}-{op}'") + + tbr.write_tb_events(reduced_events, tb_events_output_dir, overwrite) + + logging.info(f"Writing results to '{csv_out_path}'") + + tbr.write_data_file(reduced_events, csv_out_path, overwrite) + + logging.info("Reduction complete") + + except Exception as e: + logging.exception(f"Error generating statistics: {e}") + return False diff --git a/pyproject.toml b/pyproject.toml index 2f0ac0b..fa65870 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -124,6 +124,8 @@ frontend = [ "setuptools==74.1.2", "tensorboard==2.17.1", "tensorboardx==2.6.2.2", + "tensorboard-reducer==0.3.1", + "torch==2.4.1", "uvicorn==0.30.6", "web3==6.20.0", "wheel==0.44.0",