Skip to content

Commit

Permalink
Adapted website generation for Cloud and Parallel tracks.
Browse files Browse the repository at this point in the history
  • Loading branch information
mbromber committed Sep 10, 2024
1 parent f3a200d commit 0daaaf7
Show file tree
Hide file tree
Showing 6 changed files with 53 additions and 29 deletions.
Binary file added data/results-cloud-2024.json.gz
Binary file not shown.
Binary file added data/results-parallel-2024.json.gz
Binary file not shown.
30 changes: 25 additions & 5 deletions smtcomp/generate_website_page.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,6 +242,13 @@ def get_winner(l: List[dict[str, str]] | None) -> str:
division = defs.Logic.name_of_int(d["logic"])
logics = dict()

if (track == defs.Track.Cloud) | (track == defs.Track.Parallel):
winner_seq = "-"
steps_seq = []
else:
winner_seq = get_winner(d[smtcomp.scoring.Kind.seq.name])
steps_seq = podium_steps(d[smtcomp.scoring.Kind.seq.name])

return PodiumDivision(
resultdate="2024-07-08",
year=config.current_year,
Expand All @@ -254,12 +261,12 @@ def get_winner(l: List[dict[str, str]] | None) -> str:
time_limit=config.timelimit_s,
mem_limit=config.memlimit_M,
logics=dict(sorted(logics.items())),
winner_seq=get_winner(d[smtcomp.scoring.Kind.seq.name]),
winner_seq=winner_seq,
winner_par=get_winner(d[smtcomp.scoring.Kind.par.name]),
winner_sat=get_winner(d[smtcomp.scoring.Kind.sat.name]),
winner_unsat=get_winner(d[smtcomp.scoring.Kind.unsat.name]),
winner_24s=get_winner(d[smtcomp.scoring.Kind.twentyfour.name]),
sequential=podium_steps(d[smtcomp.scoring.Kind.seq.name]),
sequential=steps_seq,
parallel=podium_steps(d[smtcomp.scoring.Kind.par.name]),
sat=podium_steps(d[smtcomp.scoring.Kind.sat.name]),
unsat=podium_steps(d[smtcomp.scoring.Kind.unsat.name]),
Expand Down Expand Up @@ -424,13 +431,19 @@ def get_winner(l: List[PodiumStepBiggestLead] | None) -> str:
unsat = biggest_lead_ranking_for_kind(data, smtcomp.scoring.Kind.unsat)
twentyfour = biggest_lead_ranking_for_kind(data, smtcomp.scoring.Kind.twentyfour)

if (track == defs.Track.Cloud) | (track == defs.Track.Parallel):
winner_seq = "-"
sequential = []
else:
winner_seq = get_winner(sequential)

return PodiumBiggestLead(
resultdate="2024-07-08",
year=config.current_year,
track=track,
results=f"results_{config.current_year}",
participants=f"participants_{config.current_year}",
winner_seq=get_winner(sequential),
winner_seq=winner_seq,
winner_par=get_winner(parallel),
winner_sat=get_winner(sat),
winner_unsat=get_winner(unsat),
Expand Down Expand Up @@ -506,18 +519,25 @@ def timeScore(vws_step: PodiumStep) -> float:
for k in smtcomp.scoring.Kind
)

if (track == defs.Track.Cloud) | (track == defs.Track.Parallel):
winner_seq = "-"
steps_seq = []
else:
winner_seq = get_winner(ld[smtcomp.scoring.Kind.seq])
steps_seq = ld[smtcomp.scoring.Kind.seq]

return PodiumLargestContribution(
resultdate="2024-07-08",
year=config.current_year,
track=track,
results=f"results_{config.current_year}",
participants=f"participants_{config.current_year}",
winner_seq=get_winner(ld[smtcomp.scoring.Kind.seq]),
winner_seq=winner_seq,
winner_par=get_winner(ld[smtcomp.scoring.Kind.par]),
winner_sat=get_winner(ld[smtcomp.scoring.Kind.sat]),
winner_unsat=get_winner(ld[smtcomp.scoring.Kind.unsat]),
winner_24s=get_winner(ld[smtcomp.scoring.Kind.twentyfour]),
sequential=ld[smtcomp.scoring.Kind.seq],
sequential=steps_seq,
parallel=ld[smtcomp.scoring.Kind.par],
sat=ld[smtcomp.scoring.Kind.sat],
unsat=ld[smtcomp.scoring.Kind.unsat],
Expand Down
46 changes: 25 additions & 21 deletions smtcomp/results.py
Original file line number Diff line number Diff line change
Expand Up @@ -431,7 +431,7 @@ def parse_dir(dir: Path) -> pl.LazyFrame:
).drop("sat", "unsat", "unsat_core")
else:
results = results.with_columns(validation_attempted=False)

return results


Expand Down Expand Up @@ -467,7 +467,7 @@ def helper_get_results(
else:
lf = pl.concat(pl.read_ipc(p / "parsed.feather").lazy() for p in results)
lf = lf.filter(track=int(track)).drop("scramble_id")

selection = smtcomp.selection.helper(config, track).filter(selected=True).with_columns(track=int(track))

selection = selection.unique()
Expand All @@ -477,9 +477,9 @@ def helper_get_results(
.collect() # Improve later works
.lazy()
)

selected = intersect(selection, smtcomp.selection.solver_competing_logics(config), on=["logic", "track"])

selected = add_columns(
selected,
lf.drop("logic", "participation"), # Hack for participation 0 bug move "participation" to on= for 2025
Expand All @@ -492,7 +492,7 @@ def helper_get_results(
"nb_answers": 0,
},
)

return selected, selection


Expand All @@ -505,10 +505,13 @@ def parse_aws_csv(dir: Path) -> pl.LazyFrame:
- if original_id is present it is used (all the other track)
- if it ends with "unsatcore" and the directory "../unsat_core_valisation_results" is present and converted (feather file) it is used to validate the unsat cores
"""

def aws_logic(logic: str) -> int:
return int(defs.Logic(logic))

def aws_track(track: str) -> int:
return int(defs.Track(track))

def aws_result(res: str) -> int:
match res:
case "unsat":
Expand All @@ -521,20 +524,21 @@ def aws_result(res: str) -> int:
csv = dir / "results.csv"
if not csv.exists():
raise (ValueError(f"results.csv missing in the directory"))
lf = pl.scan_csv(csv).select(pl.col("solver"),
pl.col("scramble_id"),
pl.col("logic").apply(aws_logic,return_dtype=pl.Int64).alias("logic"),
pl.col("solver_time").alias("walltime_s"),
pl.col("solver_time").alias("cputime_s"),
pl.col("file"),
pl.col("track").apply(aws_track,return_dtype=pl.Int32).alias("track"),
pl.col("solver_result").map_elements(aws_result,return_dtype=pl.Int64).alias("answer")
)

results = lf.with_columns((pl.col("logic") * 0).alias("participation"),
(pl.col("logic") * 0).alias("memory_B"),
(pl.col("logic") * 0).alias("nb_answers"))

print(results.collect())

lf = pl.scan_csv(csv).select(
pl.col("solver"),
pl.col("scramble_id"),
pl.col("logic").apply(aws_logic, return_dtype=pl.Int64).alias("logic"),
pl.col("solver_time").alias("walltime_s"),
pl.col("file"),
pl.col("track").apply(aws_track, return_dtype=pl.Int32).alias("track"),
pl.col("solver_result").map_elements(aws_result, return_dtype=pl.Int64).alias("answer"),
)

results = lf.with_columns(
(pl.col("logic") * 0).alias("participation"),
(pl.col("logic") * 0).alias("memory_B"),
(pl.col("logic") * 0).alias("nb_answers"),
(pl.col("logic") * 0).alias("cputime_s"),
)

return results
2 changes: 1 addition & 1 deletion smtcomp/scoring.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def benchmark_scoring(results: pl.LazyFrame, track: defs.Track) -> pl.LazyFrame:
Requires disagreements
Add "error_score", "correctly_solved_score", "wallclock_time_score","cpu_time_score"
"""

wallclock_time_score = pl.when(known_answer).then(c_walltime_s).otherwise(0.0)
"""Time if answered"""
cpu_time_score = pl.when(known_answer).then(c_cputime_s).otherwise(0.0)
Expand Down
4 changes: 2 additions & 2 deletions smtcomp/selection.py
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ def sample(lf: pl.LazyFrame) -> pl.LazyFrame:
b = sample(b.group_by("track", "hard", "logic").agg(file=c_file.sort())).sort(by="logic")

b = b.group_by("track", "hard", maintain_order=True).agg(file=pl.col("file"))

def round_robbin(files: list[list[int]]) -> list[int]:
result: list[int] = []
while True:
Expand All @@ -320,7 +320,7 @@ def round_robbin(files: list[list[int]]) -> list[int]:
return result
if empty:
raise (ValueError("Not enough elements, decrease aws_timelimit_hard"))

d = b.collect().to_dict(as_series=False)
d["file"] = list(map(round_robbin, d["file"]))

Expand Down

0 comments on commit 0daaaf7

Please sign in to comment.