diff --git a/Pipfile b/Pipfile index 4c0589d..3ff50c0 100644 --- a/Pipfile +++ b/Pipfile @@ -15,6 +15,7 @@ black = "==24.3.0" [packages] fastapi = "==0.109.1" uvicorn = "==0.26.0" +pydantic-settings = "==2.2.1" [requires] python_version = "3.11" diff --git a/Pipfile.lock b/Pipfile.lock index f8ff88b..9f22ddf 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "c3b3aa94bfdb743216f1b93e0108d822d52d1738dbb641b236384e5e1d838bb6" + "sha256": "bb9a0007005e14f614cbcce60d7702a5f22564b960da2f98d76ebf3754047733" }, "pipfile-spec": 6, "requires": { @@ -166,6 +166,22 @@ "markers": "python_version >= '3.8'", "version": "==2.16.3" }, + "pydantic-settings": { + "hashes": [ + "sha256:00b9f6a5e95553590434c0fa01ead0b216c3e10bc54ae02e37f359948643c5ed", + "sha256:0235391d26db4d2190cb9b31051c4b46882d28a51533f97440867f012d4da091" + ], + "index": "pypi", + "version": "==2.2.1" + }, + "python-dotenv": { + "hashes": [ + "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca", + "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a" + ], + "markers": "python_version >= '3.8'", + "version": "==1.0.1" + }, "sniffio": { "hashes": [ "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", diff --git a/README.md b/README.md index 6c70a89..3a07bae 100644 --- a/README.md +++ b/README.md @@ -56,8 +56,8 @@ You can expect 12 entries to be solved after ~1s with `bruteforce`and <0.1s with weaker machines. Multiple cores won't speed up job time, but will enable efficient solving of parallel jobs. -The thresholds that decide which jobs are solved which way are defined in constants.py. -This might be configurable by CLI later (TODO #69). +The thresholds that decide which jobs are solved which way are defined in constants.py and can be passed as env, +see [docker-compose.yml](/docker-compose.yml) for details. ## Contributing diff --git a/app/constants.py b/app/constants.py deleted file mode 100644 index 6b8f23f..0000000 --- a/app/constants.py +++ /dev/null @@ -1,19 +0,0 @@ -from pydantic import BaseModel - -# used for git tags -version = "v1.0.1" - - -class SolverSettings(BaseModel): - bruteforce_max_combinations: int - n_max: int - - -# TODO should be startup parameter (see #69) -solverSettings = SolverSettings( - # Desktop with Ryzen 2700X: - # (4, 3, 2)=1260 => 0.1s, (4, 3, 3)=4200 => 0.8s, (5, 3, 3)=9240 => 8s - bruteforce_max_combinations=5000, - # that is already unusable x100, but the solver takes it easily - n_max=2000 -) diff --git a/app/main.py b/app/main.py index fc449aa..52a2e00 100644 --- a/app/main.py +++ b/app/main.py @@ -6,7 +6,7 @@ from starlette.requests import Request from starlette.responses import HTMLResponse, PlainTextResponse -from app.constants import version, solverSettings +from app.settings import version, solverSettings # don't mark /app as a sources root or pycharm will delete the "app." prefix # that's needed for pytest to work correctly from app.solver.data.Job import Job @@ -17,6 +17,8 @@ @asynccontextmanager async def lifespan(app: FastAPI): print(f"Starting CutSolver {version}...") + print(f"Settings: {solverSettings.json()}") + print(f"Routes: {app.routes}") yield print("Shutting down CutSolver...") diff --git a/app/settings.py b/app/settings.py new file mode 100644 index 0000000..ef6cce3 --- /dev/null +++ b/app/settings.py @@ -0,0 +1,17 @@ +from pydantic import PositiveInt +from pydantic_settings import BaseSettings + +# constant; used for git tags +version = "v1.0.1" + + +class SolverSettings(BaseSettings): + # Desktop with Ryzen 2700X: + # (4, 3, 2)=1260 => 0.1s, (4, 3, 3)=4200 => 0.8s, (5, 3, 3)=9240 => 8s + bruteforce_max_combinations: PositiveInt = 5000 + # that is already unusable x100, but the solver takes it easily + solver_n_max: PositiveInt = 2000 + + +# defaults can be overwritten via env +solverSettings = SolverSettings() diff --git a/app/solver/solver.py b/app/solver/solver.py index 77c71d4..62f65ca 100644 --- a/app/solver/solver.py +++ b/app/solver/solver.py @@ -3,7 +3,7 @@ from itertools import permutations from time import perf_counter -from app.constants import solverSettings +from app.settings import solverSettings from app.solver.data.Job import Job, TargetSize from app.solver.data.Result import ResultLengths, Result, SolverType from app.solver.utils import _get_trimming, _sorted @@ -18,7 +18,7 @@ def solve(job: Job) -> Result: if job.n_combinations() <= solverSettings.bruteforce_max_combinations: lengths = _solve_bruteforce(job) solver_type = SolverType.bruteforce - elif job.n_targets() <= solverSettings.n_max: + elif job.n_targets() <= solverSettings.solver_n_max: lengths = _solve_FFD(job) solver_type = SolverType.FFD else: diff --git a/docker-compose.yml b/docker-compose.yml index 3008ac3..afcb3e2 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -8,6 +8,9 @@ services: restart: unless-stopped ports: - "8000:80" + environment: + - BRUTEFORCE_MAX_COMBINATIONS=4000 + - SOLVER_N_MAX=1500 cutsolver_frontend: image: modischfabrications/cutsolver_frontend:latest diff --git a/tag_from_version.py b/tag_from_version.py index 6959d95..3c11a1a 100644 --- a/tag_from_version.py +++ b/tag_from_version.py @@ -8,7 +8,7 @@ from git import Repo -from app import constants +from app import settings def compare_versions(left: str, right: str): @@ -35,7 +35,7 @@ def process(): repo = Repo(Path(".")) assert not repo.bare - version = constants.version + version = settings.version version_tags_only = tuple(filter(lambda tag: tag.name[0] == "v", repo.tags)) newest_tag = version_tags_only[-1]