diff --git a/data/results-cloud-2024.json.gz b/data/results-cloud-2024.json.gz new file mode 100644 index 00000000..a51f0905 Binary files /dev/null and b/data/results-cloud-2024.json.gz differ diff --git a/data/results-parallel-2024.json.gz b/data/results-parallel-2024.json.gz new file mode 100644 index 00000000..d6c0e99d Binary files /dev/null and b/data/results-parallel-2024.json.gz differ diff --git a/smtcomp/generate_website_page.py b/smtcomp/generate_website_page.py index 8961e578..fff44e6a 100644 --- a/smtcomp/generate_website_page.py +++ b/smtcomp/generate_website_page.py @@ -242,6 +242,13 @@ def get_winner(l: List[dict[str, str]] | None) -> str: division = defs.Logic.name_of_int(d["logic"]) logics = dict() + if (track == defs.Track.Cloud) | (track == defs.Track.Parallel): + winner_seq = "-" + steps_seq = [] + else: + winner_seq = get_winner(d[smtcomp.scoring.Kind.seq.name]) + steps_seq = podium_steps(d[smtcomp.scoring.Kind.seq.name]) + return PodiumDivision( resultdate="2024-07-08", year=config.current_year, @@ -254,12 +261,12 @@ def get_winner(l: List[dict[str, str]] | None) -> str: time_limit=config.timelimit_s, mem_limit=config.memlimit_M, logics=dict(sorted(logics.items())), - winner_seq=get_winner(d[smtcomp.scoring.Kind.seq.name]), + winner_seq=winner_seq, winner_par=get_winner(d[smtcomp.scoring.Kind.par.name]), winner_sat=get_winner(d[smtcomp.scoring.Kind.sat.name]), winner_unsat=get_winner(d[smtcomp.scoring.Kind.unsat.name]), winner_24s=get_winner(d[smtcomp.scoring.Kind.twentyfour.name]), - sequential=podium_steps(d[smtcomp.scoring.Kind.seq.name]), + sequential=steps_seq, parallel=podium_steps(d[smtcomp.scoring.Kind.par.name]), sat=podium_steps(d[smtcomp.scoring.Kind.sat.name]), unsat=podium_steps(d[smtcomp.scoring.Kind.unsat.name]), @@ -424,13 +431,19 @@ def get_winner(l: List[PodiumStepBiggestLead] | None) -> str: unsat = biggest_lead_ranking_for_kind(data, smtcomp.scoring.Kind.unsat) twentyfour = biggest_lead_ranking_for_kind(data, smtcomp.scoring.Kind.twentyfour) + if (track == defs.Track.Cloud) | (track == defs.Track.Parallel): + winner_seq = "-" + sequential = [] + else: + winner_seq = get_winner(sequential) + return PodiumBiggestLead( resultdate="2024-07-08", year=config.current_year, track=track, results=f"results_{config.current_year}", participants=f"participants_{config.current_year}", - winner_seq=get_winner(sequential), + winner_seq=winner_seq, winner_par=get_winner(parallel), winner_sat=get_winner(sat), winner_unsat=get_winner(unsat), @@ -506,18 +519,25 @@ def timeScore(vws_step: PodiumStep) -> float: for k in smtcomp.scoring.Kind ) + if (track == defs.Track.Cloud) | (track == defs.Track.Parallel): + winner_seq = "-" + steps_seq = [] + else: + winner_seq = get_winner(ld[smtcomp.scoring.Kind.seq]) + steps_seq = ld[smtcomp.scoring.Kind.seq] + return PodiumLargestContribution( resultdate="2024-07-08", year=config.current_year, track=track, results=f"results_{config.current_year}", participants=f"participants_{config.current_year}", - winner_seq=get_winner(ld[smtcomp.scoring.Kind.seq]), + winner_seq=winner_seq, winner_par=get_winner(ld[smtcomp.scoring.Kind.par]), winner_sat=get_winner(ld[smtcomp.scoring.Kind.sat]), winner_unsat=get_winner(ld[smtcomp.scoring.Kind.unsat]), winner_24s=get_winner(ld[smtcomp.scoring.Kind.twentyfour]), - sequential=ld[smtcomp.scoring.Kind.seq], + sequential=steps_seq, parallel=ld[smtcomp.scoring.Kind.par], sat=ld[smtcomp.scoring.Kind.sat], unsat=ld[smtcomp.scoring.Kind.unsat], diff --git a/smtcomp/results.py b/smtcomp/results.py index 6e4c284a..c2a2e7c9 100644 --- a/smtcomp/results.py +++ b/smtcomp/results.py @@ -431,7 +431,7 @@ def parse_dir(dir: Path) -> pl.LazyFrame: ).drop("sat", "unsat", "unsat_core") else: results = results.with_columns(validation_attempted=False) - + return results @@ -467,7 +467,7 @@ def helper_get_results( else: lf = pl.concat(pl.read_ipc(p / "parsed.feather").lazy() for p in results) lf = lf.filter(track=int(track)).drop("scramble_id") - + selection = smtcomp.selection.helper(config, track).filter(selected=True).with_columns(track=int(track)) selection = selection.unique() @@ -477,9 +477,9 @@ def helper_get_results( .collect() # Improve later works .lazy() ) - + selected = intersect(selection, smtcomp.selection.solver_competing_logics(config), on=["logic", "track"]) - + selected = add_columns( selected, lf.drop("logic", "participation"), # Hack for participation 0 bug move "participation" to on= for 2025 @@ -492,7 +492,7 @@ def helper_get_results( "nb_answers": 0, }, ) - + return selected, selection @@ -505,10 +505,13 @@ def parse_aws_csv(dir: Path) -> pl.LazyFrame: - if original_id is present it is used (all the other track) - if it ends with "unsatcore" and the directory "../unsat_core_valisation_results" is present and converted (feather file) it is used to validate the unsat cores """ + def aws_logic(logic: str) -> int: return int(defs.Logic(logic)) + def aws_track(track: str) -> int: return int(defs.Track(track)) + def aws_result(res: str) -> int: match res: case "unsat": @@ -521,20 +524,21 @@ def aws_result(res: str) -> int: csv = dir / "results.csv" if not csv.exists(): raise (ValueError(f"results.csv missing in the directory")) - lf = pl.scan_csv(csv).select(pl.col("solver"), - pl.col("scramble_id"), - pl.col("logic").apply(aws_logic,return_dtype=pl.Int64).alias("logic"), - pl.col("solver_time").alias("walltime_s"), - pl.col("solver_time").alias("cputime_s"), - pl.col("file"), - pl.col("track").apply(aws_track,return_dtype=pl.Int32).alias("track"), - pl.col("solver_result").map_elements(aws_result,return_dtype=pl.Int64).alias("answer") - ) - - results = lf.with_columns((pl.col("logic") * 0).alias("participation"), - (pl.col("logic") * 0).alias("memory_B"), - (pl.col("logic") * 0).alias("nb_answers")) - - print(results.collect()) - + lf = pl.scan_csv(csv).select( + pl.col("solver"), + pl.col("scramble_id"), + pl.col("logic").apply(aws_logic, return_dtype=pl.Int64).alias("logic"), + pl.col("solver_time").alias("walltime_s"), + pl.col("file"), + pl.col("track").apply(aws_track, return_dtype=pl.Int32).alias("track"), + pl.col("solver_result").map_elements(aws_result, return_dtype=pl.Int64).alias("answer"), + ) + + results = lf.with_columns( + (pl.col("logic") * 0).alias("participation"), + (pl.col("logic") * 0).alias("memory_B"), + (pl.col("logic") * 0).alias("nb_answers"), + (pl.col("logic") * 0).alias("cputime_s"), + ) + return results diff --git a/smtcomp/scoring.py b/smtcomp/scoring.py index fb2c90ca..5b1dd8ba 100644 --- a/smtcomp/scoring.py +++ b/smtcomp/scoring.py @@ -110,7 +110,7 @@ def benchmark_scoring(results: pl.LazyFrame, track: defs.Track) -> pl.LazyFrame: Requires disagreements Add "error_score", "correctly_solved_score", "wallclock_time_score","cpu_time_score" """ - + wallclock_time_score = pl.when(known_answer).then(c_walltime_s).otherwise(0.0) """Time if answered""" cpu_time_score = pl.when(known_answer).then(c_cputime_s).otherwise(0.0) diff --git a/smtcomp/selection.py b/smtcomp/selection.py index 486ae334..252f439a 100644 --- a/smtcomp/selection.py +++ b/smtcomp/selection.py @@ -307,7 +307,7 @@ def sample(lf: pl.LazyFrame) -> pl.LazyFrame: b = sample(b.group_by("track", "hard", "logic").agg(file=c_file.sort())).sort(by="logic") b = b.group_by("track", "hard", maintain_order=True).agg(file=pl.col("file")) - + def round_robbin(files: list[list[int]]) -> list[int]: result: list[int] = [] while True: @@ -320,7 +320,7 @@ def round_robbin(files: list[list[int]]) -> list[int]: return result if empty: raise (ValueError("Not enough elements, decrease aws_timelimit_hard")) - + d = b.collect().to_dict(as_series=False) d["file"] = list(map(round_robbin, d["file"]))