Skip to content

Commit

Permalink
Merge branch 'master' into yices2
Browse files Browse the repository at this point in the history
  • Loading branch information
ahmed-irfan authored May 29, 2024
2 parents 4d5c7eb + 300b8a5 commit abed841
Show file tree
Hide file tree
Showing 15 changed files with 409 additions and 70 deletions.
13 changes: 13 additions & 0 deletions .github/workflows/deploy-website.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,19 @@ jobs:
with:
submodules: recursive
fetch-depth: 0
#For smtcomp script
- uses: actions/cache@v3
with:
path: ~/.cache/pre-commit
key: pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}
- name: Set up the environment
uses: ./.github/actions/setup-poetry-env
# Generate files using smtcomp tool
- name: Files generation
run: |
pip install json-schema-for-humans
poetry run make submission-doc
###
- name: Setup Pages
id: pages
uses: actions/configure-pages@v4
Expand Down
58 changes: 58 additions & 0 deletions .github/workflows/test-solver.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
name: TestSolver

on:
pull_request:
paths:
- submissions/*.json

jobs:
generate-test-script:
runs-on: ubuntu-latest
steps:
- name: Check out
uses: actions/checkout@v3

- uses: actions/cache@v3
with:
path: ~/.cache/pre-commit
key: pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}

- name: Set up the environment
uses: ./.github/actions/setup-poetry-env

- name: Compute changed files
id: changed-files
uses: tj-actions/changed-files@v41
with:
files: |
submissions/*.json
- name: Generate script
run: |
poetry run smtcomp generate-test-script test_data/ ${{ steps.changed-files.outputs.all_changed_files }}
rm -rf test_data/download
- name: Archive directory
run: tar -cf test_data.tar test_data

- name: Upload generated test script
uses: actions/upload-artifact@v4
with:
name: generated_script
path: test_data.tar

run-test-script:
needs: generate-test-script
runs-on: ubuntu-latest
container: registry.gitlab.com/sosy-lab/benchmarking/competition-scripts/user:latest
steps:
- name: Download generated test script
uses: actions/download-artifact@v4
with:
name: generated_script

- name: Unarchive
run: tar -xf test_data.tar

- name: Run test script
run: python3 test_data/test_script.py
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ cython_debug/
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/

/web/content/solver_submission/schema.json
/web/content/solver_submission/schema.*
schema_doc.css
schema_doc.min.js
*.feather
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ submission-doc:
@poetry run smtcomp dump-json-schema $(GENERATED_SCHEMA_FILE)
@echo "🚀 Generating html doc to $(GENERATED_SCHEMA_HTML)"
@echo " Needs 'pip install json-schema-for-humans'"
generate-schema-doc --expand-buttons --no-link-to-reused-ref $(GENERATED_SCHEMA_FILE) $(GENERATED_SCHEMA_HTML)
@poetry run generate-schema-doc --expand-buttons --no-link-to-reused-ref $(GENERATED_SCHEMA_FILE) $(GENERATED_SCHEMA_HTML)

hugo-server:
(cd web; hugo server)
85 changes: 80 additions & 5 deletions smtcomp/defs.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import re
from enum import Enum
from pathlib import Path, PurePath
from typing import Any, Dict, Optional
from typing import Any, Dict, cast, Optional

from pydantic import BaseModel, Field, RootModel, model_validator, ConfigDict
from pydantic.networks import HttpUrl, validate_email
Expand All @@ -20,6 +20,21 @@ class Config:
cpuCores = 4
min_used_benchmarks = 300
ratio_of_used_benchmarks = 0.5
old_criteria = False
""""Do we try to emulate <= 2023 criteria that does not really follow the rules"""
invert_triviality = False
"""Prioritize triviality as much as possible for testing purpose.
Look for simple problems instead of hard one"""

def __init__(self: Config, seed: int | None) -> None:
self.__seed = seed

def seed(self) -> int:
if self.__seed is None:
raise ValueError("The seed as not been set")
s = self.__seed
self.__seed = +1
return s


class DataFiles:
Expand Down Expand Up @@ -1056,6 +1071,10 @@ def logic_used_for_track(t: Track) -> set[Logic]:


class Logics(RootModel):
"""
Can be a list of logics or a regexp matched on all the existing logics
"""

root: list[Logic]

@model_validator(mode="before")
Expand All @@ -1080,6 +1099,14 @@ def logics_from_regexp(cls, data: str) -> list[Logic]:


class Archive(BaseModel):
"""
The url must be record from http://zenodo.org for the final submission. So
the hash is not required because zenodo records are immutable.
The hash can be used if you want to be sure of the archive used during the
test runs.
"""

url: HttpUrl
h: Hash | None = None

Expand Down Expand Up @@ -1132,14 +1159,43 @@ def uniq_id(self, name: str, archive: Archive) -> str:
return h.hexdigest()


class ParticipationCompleted(BaseModel, extra="forbid"):
"""Participation using the default value in the submission root"""

tracks: dict[Track, dict[Division, set[Logic]]]
archive: Archive
command: Command
experimental: bool


class Participation(BaseModel, extra="forbid"):
"""
tracks: select the participation tracks
divisions: add all the logics of those divisions in each track
logics: add all the specified logics in each selected track it exists
aws_dockerfile should be used only in conjunction with Cloud and Parallel track
archive and command should not be used with Cloud and Parallel track. They superseed the one given at the root.
"""

tracks: list[Track]
logics: Logics = Logics(root=[])
divisions: list[Division] = []
archive: Archive | None = None
command: Command | None = None
aws_dockerfile: str | None = None
experimental: bool = False

@model_validator(mode="after")
def check_archive(self) -> Participation:
aws_track = {Track.Cloud, Track.Parallel}
if self.aws_dockerfile is not None and not set(self.tracks).issubset(aws_track):
raise ValueError("aws_dockerfile can be used only with Cloud and Parallel track")
if (self.archive is not None or self.command is not None) and not set(self.tracks).isdisjoint(aws_track):
raise ValueError("archive and command field can't be used with Cloud and Parallel track")
return self

def get(self, d: None | dict[Track, dict[Division, set[Logic]]] = None) -> dict[Track, dict[Division, set[Logic]]]:
if d is None:
d = {}
Expand All @@ -1155,17 +1211,31 @@ def get(self, d: None | dict[Track, dict[Division, set[Logic]]] = None) -> dict[
logics.add(logic)
return d

def complete(self, archive: Archive | None, command: Command | None) -> ParticipationCompleted:
archive = cast(Archive, archive if self.archive is None else self.archive)
command = cast(Command, command if self.command is None else self.command)
return ParticipationCompleted(
tracks=self.get(), archive=archive, command=command, experimental=self.experimental
)


import itertools


class Participations(RootModel):
root: list[Participation]

def get_divisions(self, track: Track) -> list[Division]:
def get_divisions(self, l: list[Track] = list(Track)) -> set[Division]:
""" " Return the divisions in which the solver participates"""
return [] # TODO
tracks = self.get()
divs = [set(tracks[track].keys()) for track in l]
return functools.reduce(lambda x, y: x | y, divs)

def get_logics(self, track: Track) -> list[Logic]:
def get_logics(self, l: list[Track] = list(Track)) -> set[Logic]:
""" " Return the logics in which the solver participates"""
return [] # TODO
tracks = self.get()
logics = itertools.chain.from_iterable([iter(tracks[track].values()) for track in l])
return functools.reduce(lambda x, y: x | y, logics)

def get(self, d: None | dict[Track, dict[Division, set[Logic]]] = None) -> dict[Track, dict[Division, set[Logic]]]:
if d is None:
Expand All @@ -1187,6 +1257,7 @@ class Submission(BaseModel, extra="forbid"):
system_description: HttpUrl
solver_type: SolverType
participations: Participations
seed: int | None = None

@model_validator(mode="after")
def check_archive(self) -> Submission:
Expand All @@ -1199,6 +1270,10 @@ def check_archive(self) -> Submission:
def uniq_id(self) -> str:
return hashlib.sha256(self.name.encode()).hexdigest()

def complete_participations(self) -> list[ParticipationCompleted]:
"""Push defaults from the submission into participations"""
return [p.complete(self.archive, self.command) for p in self.participations.root]


class Smt2File(BaseModel):
incremental: bool
Expand Down
23 changes: 21 additions & 2 deletions smtcomp/generate_benchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,25 @@
from smtcomp import defs


def path_trivial_benchmark(dst: Path, track: defs.Track, logic: defs.Logic, status: defs.Status) -> Path:
match track:
case defs.Track.Incremental:
suffix = "_inc"
case defs.Track.ModelValidation:
suffix = "_model"
case defs.Track.SingleQuery:
suffix = ""
case defs.Track.UnsatCore | defs.Track.ProofExhibition | defs.Track.Cloud | defs.Track.Parallel:
raise (ValueError("No trivial benchmarks yet for f{track}"))
match status:
case defs.Status.Sat:
return dst.joinpath("files", str(logic) + suffix + ".sat.smt2")
case defs.Status.Unsat:
return dst.joinpath("files", str(logic) + suffix + ".unsat.smt2")
case defs.Status.Unknown:
raise (ValueError("No trivial benchmarks yet for unknown"))


def generate_trivial_benchmarks(dst: Path) -> None:
dst.joinpath("files").mkdir(parents=True, exist_ok=True)
for track, divisions in defs.tracks.items():
Expand All @@ -18,8 +37,8 @@ def generate_trivial_benchmarks(dst: Path) -> None:
for _, theories in divisions.items():
for theory in theories:
file = dst.joinpath(str(theory) + suffix)
file_sat = dst.joinpath("files", str(theory) + suffix + ".sat.smt2")
file_unsat = dst.joinpath("files", str(theory) + suffix + ".unsat.smt2")
file_sat = path_trivial_benchmark(dst, track, theory, defs.Status.Sat)
file_unsat = path_trivial_benchmark(dst, track, theory, defs.Status.Unsat)

file.write_text("\n".join([str(file_sat.relative_to(dst)), str(file_unsat.relative_to(dst))]))

Expand Down
Loading

0 comments on commit abed841

Please sign in to comment.