Skip to content

Commit

Permalink
train_tune bugfix
Browse files Browse the repository at this point in the history
loss experiments had bad log name
  • Loading branch information
alanngnet committed Jun 27, 2024
1 parent c328c99 commit fef3eed
Showing 1 changed file with 23 additions and 11 deletions.
34 changes: 23 additions & 11 deletions tools/train_tune.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,11 +82,11 @@ def run_experiment(
t.configure_scheduler()
t.train(max_epochs=hp["max_epochs"])
# ensure log files are saved before retrieving metrics
t.summary_writer.close()
t.summary_writer.close()
del t.model
del t
print(f"Completed experiment with seed {seed}")
time.sleep(1) # give OS time to save log file
time.sleep(1) # give OS time to save log file
return log_path


Expand Down Expand Up @@ -208,7 +208,9 @@ def get_final_metrics_from_logs(log_dir, test_name):
for seed in seeds:
hp_summary = f"m_per_class{m_per_class}"
log_path = run_experiment(hp_summary, checkpoint_dir, hp, seed)
final_val_loss, final_map = get_final_metrics_from_logs(log_path, test_name)
final_val_loss, final_map = get_final_metrics_from_logs(
log_path, test_name
)
results["val_loss"].append(final_val_loss)
results["map"].append(final_map)
mean_loss = np.mean(results["val_loss"])
Expand Down Expand Up @@ -248,7 +250,9 @@ def get_final_metrics_from_logs(log_dir, test_name):
)
)
log_path = run_experiment(hp_summary, checkpoint_dir, hp, seed)
final_val_loss, final_map = get_final_metrics_from_logs(log_path, test_name)
final_val_loss, final_map = get_final_metrics_from_logs(
log_path, test_name
)
results["val_loss"].append(final_val_loss)
results["map"].append(final_map)
mean_loss = np.mean(results["val_loss"])
Expand All @@ -261,7 +265,7 @@ def get_final_metrics_from_logs(log_dir, test_name):
}
print(f"Results for {hp_summary}")
pprint.pprint(all_results[hp_summary])

# loss experiments
hp = load_hparams(os.path.join(model_dir, "config/hparams.yaml"))
hp["every_n_epoch_to_save"] = 100
Expand All @@ -280,11 +284,15 @@ def get_final_metrics_from_logs(log_dir, test_name):
center_weight = center["weight"]

for seed in seeds:
hp_summary = f"CE_dims{ce_dims}_wt{ce_weight}_gamma{ce_gamma}_"
f"TRIP_marg{triplet_margin}_wt{triplet_weight}_"
f"CNTR_wt{center_weight}"
hp_summary = (
f"CE_dims{ce_dims}_wt{ce_weight}_gamma{ce_gamma}_"
+ f"TRIP_marg{triplet_margin}_wt{triplet_weight}_"
+ f"CNTR_wt{center_weight}"
)
log_path = run_experiment(hp_summary, checkpoint_dir, hp, seed)
final_val_loss, final_map = get_final_metrics_from_logs(log_path, test_name)
final_val_loss, final_map = get_final_metrics_from_logs(
log_path, test_name
)
results["val_loss"].append(final_val_loss)
results["map"].append(final_map)
mean_loss = np.mean(results["val_loss"])
Expand All @@ -301,5 +309,9 @@ def get_final_metrics_from_logs(log_dir, test_name):
print("\nSummary of Experiments:")
for hp_summary, result in all_results.items():
print(f"\nExperiment: {hp_summary}")
print(f" Validation Loss: mean = {result['val_loss']['mean']:.4f}, std = {result['val_loss']['std']:.4f}")
print(f" mAP: mean = {result['map']['mean']:.4f}, std = {result['map']['std']:.4f}")
print(
f" Validation Loss: mean = {result['val_loss']['mean']:.4f}, std = {result['val_loss']['std']:.4f}"
)
print(
f" mAP: mean = {result['map']['mean']:.4f}, std = {result['map']['std']:.4f}"
)

0 comments on commit fef3eed

Please sign in to comment.