diff --git a/app/constants.py b/app/constants.py
index de49de1..8f606c6 100644
--- a/app/constants.py
+++ b/app/constants.py
@@ -1,5 +1,19 @@
-version = "v1.0.0"
+from pydantic import BaseModel
-# solver parameter
-n_max_precise = 9 # 10 takes 30s on a beefy desktop, 9 only 1.2s
-n_max = 500 # around 1 million with n^2
+# used for git tags
+version = "v1.0.1"
+
+
+class SolverSettings(BaseModel):
+ bruteforce_max_combinations: int
+ n_max: int
+
+
+# TODO should be startup parameter
+solverSettings = SolverSettings(
+ # Desktop with Ryzen 2700X:
+ # (4, 3, 2)=1260 => 0.1s, (4, 3, 3)=4200 => 0.8s, (5, 3, 3)=9240 => 8s
+ bruteforce_max_combinations=5000,
+ # that is already unusable x100, but the solver takes it easily
+ n_max=2000
+)
diff --git a/app/main.py b/app/main.py
index bc75f42..a4e337f 100644
--- a/app/main.py
+++ b/app/main.py
@@ -6,8 +6,7 @@
from starlette.requests import Request
from starlette.responses import HTMLResponse, PlainTextResponse
-from app.constants import version, n_max_precise, n_max
-
+from app.constants import version, solverSettings
# don't mark /app as a sources root or pycharm will delete the "app." prefix
# that's needed for pytest to work correctly
from app.solver.data.Job import Job
@@ -84,15 +83,9 @@ def get_debug():
@app.get("/constants", response_class=HTMLResponse)
-def get_debug():
- static_answer = (
- "Constants:"
- "
"
- f"- Max Entries for perfect results: {n_max_precise}
"
- f"- Max Entries for any result: {n_max}
"
- )
-
- return static_answer
+@app.get("/settings", response_class=HTMLResponse)
+def get_settings():
+ return solverSettings
# content_type results in browser pretty printing
diff --git a/app/solver/data/Job.py b/app/solver/data/Job.py
index 3dfe5d8..9833345 100644
--- a/app/solver/data/Job.py
+++ b/app/solver/data/Job.py
@@ -1,11 +1,15 @@
-from typing import Iterator, List, Optional, Tuple
+from math import factorial, prod
+from typing import Iterator, Optional
-from pydantic import BaseModel
+from pydantic import BaseModel, ConfigDict, PositiveInt, NonNegativeInt, model_validator
class TargetSize(BaseModel):
- length: int
- quantity: int
+ # frozen might be nice, but that would make reuse in solvers worse
+ model_config = ConfigDict(validate_assignment=True)
+
+ length: PositiveInt
+ quantity: PositiveInt
name: Optional[str] = ""
def __lt__(self, other):
@@ -19,13 +23,13 @@ def __str__(self):
class Job(BaseModel):
- max_length: int
- cut_width: int = 0
- target_sizes: List[TargetSize]
+ model_config = ConfigDict(frozen=True, validate_assignment=True)
- # utility
+ max_length: PositiveInt
+ cut_width: NonNegativeInt = 0
+ target_sizes: tuple[TargetSize, ...]
- def iterate_sizes(self) -> Iterator[Tuple[int, str | None]]:
+ def iterate_sizes(self) -> Iterator[tuple[int, str | None]]:
"""
yields all lengths, sorted descending
"""
@@ -33,53 +37,37 @@ def iterate_sizes(self) -> Iterator[Tuple[int, str | None]]:
# sort descending to favor combining larger sizes first
for target in sorted(self.target_sizes, key=lambda x: x.length, reverse=True):
for _ in range(target.quantity):
- yield (target.length, target.name)
-
- # NOTE: Not used, so not really refactored at the moment
- def sizes_from_list(self, sizes_list: List[TargetSize]):
- # known_sizes = {}
- #
- # # list to dict to make them unique
- # for size in sizes_list:
- # if size.length in known_sizes:
- # known_sizes[size.length] += size.quantity
- # else:
- # known_sizes[size.length] = size.quantity
-
- self.target_sizes = sizes_list
-
- # NOTE: Can eventually be removed as it does nothing anymore
- def sizes_as_list(self) -> List[TargetSize]:
+ yield target.length, target.name
+
+ def n_targets(self) -> int:
+ """
+ Number of possible combinations of target sizes
+ """
+ return sum([target.quantity for target in self.target_sizes])
+
+ def n_combinations(self) -> int:
"""
- Compatibility function
+ Number of possible combinations of target sizes
"""
- # back to list again for compatibility
- return self.target_sizes
+ return int(factorial(self.n_targets()) / prod([factorial(n.quantity) for n in self.target_sizes]))
- def assert_valid(self):
+ @model_validator(mode='after')
+ def assert_valid(self) -> 'Job':
if self.max_length <= 0:
raise ValueError(f"Job has invalid max_length {self.max_length}")
if self.cut_width < 0:
raise ValueError(f"Job has invalid cut_width {self.cut_width}")
if len(self.target_sizes) <= 0:
raise ValueError("Job is missing target_sizes")
- if any(
- target.length > (self.max_length - self.cut_width)
- for target in self.target_sizes
- ):
+ if any(target.length > self.max_length for target in self.target_sizes):
raise ValueError("Job has target sizes longer than the stock")
-
- def __len__(self) -> int:
- """
- Number of target sizes in job
- """
- return sum([target.quantity for target in self.target_sizes])
+ return self
def __eq__(self, other):
return (
- self.max_length == other.max_length
- and self.cut_width == other.cut_width
- and self.target_sizes == other.target_sizes
+ self.max_length == other.max_length
+ and self.cut_width == other.cut_width
+ and self.target_sizes == other.target_sizes
)
def __hash__(self) -> int:
diff --git a/app/solver/data/Result.py b/app/solver/data/Result.py
index a57bbac..cae1dae 100644
--- a/app/solver/data/Result.py
+++ b/app/solver/data/Result.py
@@ -1,7 +1,7 @@
from enum import unique, Enum
-from typing import List, Tuple, Optional
+from typing import Optional, TypeAlias
-from pydantic import BaseModel
+from pydantic import BaseModel, PositiveInt, model_validator
from app.solver.data.Job import Job
@@ -13,12 +13,15 @@ class SolverType(str, Enum): # str as base enables Pydantic-Schemas
FFD = "FFD"
+ResultLength: TypeAlias = tuple[tuple[PositiveInt, str | None], ...]
+ResultLengths: TypeAlias = tuple[ResultLength, ...]
+
+
class Result(BaseModel):
- # allow IDs to skip redundant transmission for future versions
job: Job
solver_type: SolverType
- time_us: Optional[int] = -1
- lengths: List[List[Tuple[int, str]]]
+ time_us: Optional[int] = None
+ lengths: ResultLengths
# no trimmings as they can be inferred from difference to job
@@ -38,11 +41,11 @@ def exactly(self, other):
and self.lengths == other.lengths
)
+ @model_validator(mode='after')
def assert_valid(self):
self.job.assert_valid()
if self.solver_type not in SolverType:
raise ValueError(f"Result has invalid solver_type {self.solver_type}")
- if self.time_us < 0:
- raise ValueError(f"Result has invalid time_us {self.time_us}")
if len(self.lengths) <= 0:
raise ValueError("Result is missing lengths")
+ return self
diff --git a/app/solver/solver.py b/app/solver/solver.py
index 76020ae..5296161 100644
--- a/app/solver/solver.py
+++ b/app/solver/solver.py
@@ -2,23 +2,23 @@
import copy
from itertools import permutations
from time import perf_counter
-from typing import Collection, Tuple, List
+from typing import Collection
-from app.constants import n_max_precise, n_max
+from app.constants import solverSettings
from app.solver.data.Job import Job, TargetSize
-from app.solver.data.Result import SolverType, Result
+from app.solver.data.Result import SolverType, Result, ResultLengths
def distribute(job: Job) -> Result:
time: float = perf_counter()
- lengths: List[List[Tuple[int, str]]]
+ lengths: ResultLengths
solver_type: SolverType
- if len(job) <= n_max_precise:
+ if job.n_combinations() <= solverSettings.bruteforce_max_combinations:
lengths = _solve_bruteforce(job)
solver_type = SolverType.bruteforce
- elif len(job) <= n_max:
+ elif job.n_targets() <= solverSettings.n_max:
lengths = _solve_FFD(job)
solver_type = SolverType.FFD
else:
@@ -29,40 +29,46 @@ def distribute(job: Job) -> Result:
return Result(job=job, solver_type=solver_type, time_us=time_us, lengths=lengths)
-# CPU-bound
-# O(n!)
-def _solve_bruteforce(job: Job) -> List[List[Tuple[int, str | None]]]:
- # failsafe
- if len(job) > 12:
- raise OverflowError("Input too large")
-
+# slowest, but perfect solver; originally O(n!), now much faster (see Job.n_combinations())
+def _solve_bruteforce(job: Job) -> ResultLengths:
mutable_job = job.model_copy(deep=True)
+ mutable_job.model_config["frozen"] = False
+
# allow "overflowing" cut at the end
mutable_job.max_length += mutable_job.cut_width
- # find every possible ordering (n! elements)
- all_orderings = permutations(job.iterate_sizes())
- # TODO: remove duplicates (due to "quantity")
- all_orderings = permutations(mutable_job.iterate_sizes())
+ # find every possible ordering (`factorial(sum(sizes))` elements) and reduce to unique
+ all_orderings = set(permutations(mutable_job.iterate_sizes()))
- # "infinity"
- minimal_trimmings = len(mutable_job) * mutable_job.max_length
- best_stock: List[List[Tuple[int, str | None]]] = []
+ # start at "all of it"
+ minimal_trimmings = mutable_job.n_targets() * mutable_job.max_length
+ best_results: list[list[list[tuple[int, str | None]]]] = []
- # possible improvement: Distribute combinations to multiprocessing worker threads
+ # could distribute to multiprocessing, but web worker is parallel anyway
for combination in all_orderings:
- stocks, trimmings = _split_combination(
+ lengths, trimmings = _group_into_lengths(
combination, mutable_job.max_length, mutable_job.cut_width
)
if trimmings < minimal_trimmings:
- best_stock = stocks
+ best_stock = lengths
minimal_trimmings = trimmings
-
- return _sorted(best_stock)
-
-
-def _split_combination(
- combination: Tuple[Tuple[int, str | None]], max_length: int, cut_width: int
+ best_results.clear()
+ best_results.append(best_stock)
+ elif trimmings == minimal_trimmings:
+ best_results.append(lengths)
+
+ # set creates random order of perfect results due to missing sorting, so sort for determinism
+ ordered = (sorted(
+ set([tuple(sorted(
+ [tuple(sorted(y, reverse=True)) for y in x], reverse=True)
+ ) for x in best_results]), reverse=True
+ ))
+ # TODO evaluate which result aligns with user expectations best
+ return _sorted(ordered[0])
+
+
+def _group_into_lengths(
+ combination: tuple[tuple[int, str | None], ...], max_length: int, cut_width: int
):
"""
Collects sizes until length is reached, then starts another stock
@@ -71,18 +77,18 @@ def _split_combination(
:param cut_width:
:return:
"""
- stocks: List[List[Tuple[int, str | None]]] = []
+ stocks: list[list[tuple[int, str | None]]] = []
trimmings = 0
current_size = 0
- current_stock: List[Tuple[int, str | None]] = []
+ current_stock: list[tuple[int, str | None]] = []
for size, name in combination:
if (current_size + size + cut_width) > max_length:
# start next stock
stocks.append(current_stock)
trimmings += _get_trimming(max_length, current_stock, cut_width)
current_size = 0
- current_stock: List[Tuple[int, str | None]] = []
+ current_stock = []
current_size += size + cut_width
current_stock.append((size, name))
@@ -93,23 +99,62 @@ def _split_combination(
return stocks, trimmings
-# this might actually be worse than FFD (both in runtime and solution), disabled for now
-# O(n^2) ??
-def _solve_gapfill(job: Job) -> List[List[Tuple[int, str | None]]]:
+# textbook solution, guaranteed to get at most double trimmings of perfect solution; possibly O(n^2)?
+def _solve_FFD(job: Job) -> ResultLengths:
+ # iterate over list of stocks
+ # put into first stock that it fits into
+
+ # 1. Sort by magnitude (largest first)
+ # 2. stack until limit is reached
+ # 3. try smaller as long as possible
+ # 4. create new bar
+
+ mutable_sizes = copy.deepcopy(job.target_sizes)
+ sizes = sorted(mutable_sizes, reverse=True)
+
+ stocks: list[list[tuple[int, str | None]]] = [[]]
+
+ i_target = 0
+
+ while i_target < len(sizes):
+ current_size = sizes[i_target]
+ for stock in stocks:
+ # calculate current stock length
+ stock_length = (
+ sum([size[0] for size in stock]) + (len(stock) - 1) * job.cut_width
+ )
+ # step through existing stocks until current size fits
+ if (job.max_length - stock_length) > current_size.length:
+ # add size
+ stock.append((current_size.length, current_size.name))
+ break
+ else: # nothing fit, opening next bin
+ stocks.append([(current_size.length, current_size.name)])
+
+ # decrease/get next
+ if current_size.quantity <= 1:
+ i_target += 1
+ else:
+ current_size.quantity -= 1
+
+ return _sorted(stocks)
+
+
+# even faster than FFD, seems like equal results; selfmade and less proven!
+def _solve_gapfill(job: Job) -> ResultLengths:
# 1. Sort by magnitude (largest first)
# 2. stack until limit is reached
# 3. try smaller as long as possible
# 4. create new bar
- # TODO: rewrite to use native map instead
# we are writing around in target sizes, prevent leaking changes to job
- mutable_sizes = copy.deepcopy(job.sizes_as_list())
+ mutable_sizes = copy.deepcopy(job.target_sizes)
targets = sorted(mutable_sizes, reverse=True)
stocks = []
current_size = 0
- current_stock: List[Tuple[int, str | None]] = []
+ current_stock: list[tuple[int, str | None]] = []
i_target = 0
while len(targets) > 0:
@@ -125,9 +170,11 @@ def _solve_gapfill(job: Job) -> List[List[Tuple[int, str | None]]]:
current_target: TargetSize = targets[i_target]
# target fits inside current stock, transfer to results
- if (current_size + current_target.length + job.cut_width) < job.max_length:
+ if (current_size + current_target.length) <= job.max_length:
current_stock.append((current_target.length, current_target.name))
- current_size += current_target.length + job.cut_width
+ current_size += current_target.length
+ if current_size < job.max_length:
+ current_size += job.cut_width
# remove empty entries
if current_target.quantity <= 1:
@@ -146,68 +193,30 @@ def _solve_gapfill(job: Job) -> List[List[Tuple[int, str | None]]]:
return _sorted(stocks)
-# textbook solution, guaranteed to get at most double trimmings of perfect solution
-def _solve_FFD(job: Job) -> List[List[Tuple[int, str | None]]]:
- # iterate over list of stocks
- # put into first stock that it fits into
-
- # 1. Sort by magnitude (largest first)
- # 2. stack until limit is reached
- # 3. try smaller as long as possible
- # 4. create new bar
-
- # TODO: rewrite to use native map instead?
- mutable_sizes = copy.deepcopy(job.sizes_as_list())
- sizes = sorted(mutable_sizes, reverse=True)
-
- stocks: List[List[Tuple[int, str | None]]] = [[]]
-
- i_target = 0
-
- while i_target < len(sizes):
- current_size = sizes[i_target]
- for stock in stocks:
- # calculate current stock length
- stock_length = (
- sum([size[0] for size in stock]) + (len(stock) - 1) * job.cut_width
- )
- # step through existing stocks until current size fits
- if (job.max_length - stock_length) > current_size.length:
- # add size
- stock.append((current_size.length, current_size.name))
- break
- else: # nothing fit, opening next bin
- stocks.append([(current_size.length, current_size.name)])
-
- # decrease/get next
- if current_size.quantity <= 1:
- i_target += 1
- else:
- current_size.quantity -= 1
-
- return _sorted(stocks)
-
-
def _get_trimming(
- max_length: int, lengths: Collection[Tuple[int, str | None]], cut_width: int
+ max_length: int, lengths: Collection[tuple[int, str | None]], cut_width: int
) -> int:
sum_lengths = sum([length[0] for length in lengths])
sum_cuts = len(lengths) * cut_width
trimmings = max_length - (sum_lengths + sum_cuts)
+ # cut at the end can be omitted
+ if trimmings == -cut_width:
+ trimmings = 0
+
if trimmings < 0:
- raise OverflowError
+ raise OverflowError("Trimmings can't ever be negative!")
return trimmings
-def _get_trimmings(
- max_length: int, lengths: Collection[Collection[Tuple[int, str | None]]], cut_width: int
-) -> int:
+def _get_trimmings(max_length: int, lengths: ResultLengths, cut_width: int) -> int:
return sum(_get_trimming(max_length, x, cut_width) for x in lengths)
-def _sorted(lengths: List[List[Tuple[int, str | None]]]) -> List[List[Tuple[int, str | None]]]:
+
+def _sorted(lengths: Collection[Collection]) -> ResultLengths:
# keep most cuts at the top, getting simpler towards the end
# this could also sort by trimmings but that is more work
- return sorted(lengths, key=len, reverse=True)
+ lengths = tuple([tuple(sorted(l, reverse=True)) for l in lengths])
+ return tuple(sorted(lengths, key=len, reverse=True))
diff --git a/tests/res/in/testjob_cuts.json b/tests/res/in/testjob_cuts.json
deleted file mode 100644
index 778a4a1..0000000
--- a/tests/res/in/testjob_cuts.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "cut_width": 10,
- "max_length": 1010,
- "target_sizes": [
- {
- "length": 500,
- "quantity": 4
- }
- ]
-}
diff --git a/tests/res/in/testjob_equal.json b/tests/res/in/testjob_equal.json
deleted file mode 100644
index 460bc19..0000000
--- a/tests/res/in/testjob_equal.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "cut_width": 10,
- "max_length": 1000,
- "target_sizes": [
- {
- "length": 1000,
- "quantity": 4
- }
- ]
-}
diff --git a/tests/res/in/testjob_l.json b/tests/res/in/testjob_l.json
deleted file mode 100644
index 4921e5d..0000000
--- a/tests/res/in/testjob_l.json
+++ /dev/null
@@ -1,22 +0,0 @@
-{
- "cut_width": 5,
- "max_length": 2000,
- "target_sizes": [
- {
- "length": 750,
- "quantity": 5
- },
- {
- "length": 500,
- "quantity": 5
- },
- {
- "length": 300,
- "quantity": 10
- },
- {
- "length": 100,
- "quantity": 15
- }
- ]
-}
diff --git a/tests/res/in/testjob_m.json b/tests/res/in/testjob_m.json
deleted file mode 100644
index 23245fa..0000000
--- a/tests/res/in/testjob_m.json
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "cut_width": 5,
- "max_length": 1000,
- "target_sizes": [
- {
- "length": 500,
- "quantity": 4
- },
- {
- "length": 300,
- "quantity": 3
- },
- {
- "length": 100,
- "quantity": 1
- }
- ]
-}
diff --git a/tests/res/in/testjob_zero_cuts.json b/tests/res/in/testjob_zero_cuts.json
deleted file mode 100644
index 8beb9b2..0000000
--- a/tests/res/in/testjob_zero_cuts.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "cut_width": 0,
- "max_length": 1000,
- "target_sizes": [
- {
- "length": 500,
- "quantity": 4
- }
- ]
-}
diff --git a/tests/solver/data/test_Job.py b/tests/solver/data/test_Job.py
index 5e9258f..e859d13 100644
--- a/tests/solver/data/test_Job.py
+++ b/tests/solver/data/test_Job.py
@@ -32,7 +32,13 @@ def test_job_equal(testjob_s):
def test_job_length(testjob_s):
job1 = testjob_s
- assert len(job1) == 6
+ assert job1.n_targets() == 6
+
+
+def test_job_combinations(testjob_s):
+ job1 = testjob_s
+
+ assert job1.n_combinations() == 15
def test_equal_hash(testjob_s):
@@ -49,15 +55,17 @@ def test_valid(testjob_s):
def test_invalid(testjob_s):
invalid_job = testjob_s
- invalid_job.max_length = -1
with pytest.raises(ValueError):
- invalid_job.assert_valid()
+ invalid_job.max_length = -1
+
+ with pytest.raises(ValueError):
+ invalid_job.cut_width = -1
def test_too_long(testjob_s):
- job = testjob_s
- job.target_sizes.append(
- TargetSize(**{"length": job.max_length + 1, "quantity": 4, "name": "too long"})
- )
with pytest.raises(ValueError):
- job.assert_valid()
+ job = Job(
+ max_length=100,
+ cut_width=5,
+ target_sizes=(TargetSize(length=101, quantity=1),)
+ )
diff --git a/tests/solver/data/test_Result.py b/tests/solver/data/test_Result.py
index 50e05b4..c86e4f8 100644
--- a/tests/solver/data/test_Result.py
+++ b/tests/solver/data/test_Result.py
@@ -50,25 +50,42 @@ def test_valid(testjob_s):
def test_invalid(testjob_s):
job = testjob_s
- invalid_result = Result(
- job=job,
- solver_type=SolverType.FFD,
- time_us=-1,
- lengths=[
- [
- (100, "Part1"),
- (100, "Part1"),
- (100, "Part1"),
- ],
- [
- (200, "Part2"),
- (200, "Part2"),
- (200, "Part2"),
- ],
- ],
- )
+
+ with pytest.raises(ValueError):
+ no_job = Result(
+ solver_type=SolverType.FFD,
+ time_us=-1,
+ lengths=[
+ [
+ (100, "Part1"),
+ (100, "Part1"),
+ (100, "Part1"),
+ ],
+ [
+ (200, "Part2"),
+ (200, "Part2"),
+ (200, "Part2"),
+ ],
+ ],
+ )
+
with pytest.raises(ValueError):
- invalid_result.assert_valid()
+ no_solve = Result(
+ job=job,
+ time_us=-1,
+ lengths=[
+ [
+ (100, "Part1"),
+ (100, "Part1"),
+ (100, "Part1"),
+ ],
+ [
+ (200, "Part2"),
+ (200, "Part2"),
+ (200, "Part2"),
+ ],
+ ],
+ )
def test_equal(testjob_s):
diff --git a/tests/solver/data/test_json.py b/tests/solver/data/test_json.py
index 3fd1c59..ba65dcd 100644
--- a/tests/solver/data/test_json.py
+++ b/tests/solver/data/test_json.py
@@ -14,8 +14,8 @@ def test_to_json():
],
)
assert (
- job.model_dump_json()
- == '{"max_length":1200,"cut_width":5,"target_sizes":[{"length":300,"quantity":4,"name":"Part1"},{"length":200,"quantity":3,"name":""}]}'
+ job.model_dump_json()
+ == '{"max_length":1200,"cut_width":5,"target_sizes":[{"length":300,"quantity":4,"name":"Part1"},{"length":200,"quantity":3,"name":""}]}'
)
@@ -26,4 +26,4 @@ def test_from_json():
with open(json_file, "r") as encoded_job:
job = Job.model_validate_json(encoded_job.read())
assert job.__class__ == Job
- assert len(job) > 0
+ assert job.assert_valid
diff --git a/tests/solver/test_large.py b/tests/solver/test_large.py
index 7d8d982..1835b07 100644
--- a/tests/solver/test_large.py
+++ b/tests/solver/test_large.py
@@ -1,35 +1,68 @@
+from app.solver.data.Job import TargetSize
from app.solver.solver import (
_solve_bruteforce,
- _solve_FFD,
+ _solve_FFD, _solve_gapfill, _get_trimmings,
)
from tests.test_fixtures import *
# close to the max for bruteforce!
-@pytest.mark.parametrize("solver", [_solve_bruteforce, _solve_FFD])
-def test_m(testjob_m, solver):
- orig_job = testjob_m.model_copy(deep=True)
+@pytest.mark.parametrize("solver", [_solve_bruteforce, _solve_FFD, _solve_gapfill])
+def test_m(solver):
+ testjob_m = Job(max_length=1000, cut_width=5, target_sizes=(
+ TargetSize(length=500, quantity=4), TargetSize(length=300, quantity=3),
+ TargetSize(length=100, quantity=2)))
+
solved = solver(testjob_m)
- assert solved == [
- [(500, ''), (300, ''), (100, '')],
- [(500, ''), (300, '')],
- [(500, ''), (300, '')],
- [(500, '')]
- ]
- assert orig_job == testjob_m
+ assert solved == (
+ ((500, ''), (300, ''), (100, '')),
+ ((500, ''), (300, ''), (100, '')),
+ ((500, ''), (300, '')),
+ ((500, ''),))
+
+@pytest.mark.parametrize("solver", [_solve_FFD, _solve_gapfill])
+def test_l(solver):
+ testjob_l = Job(max_length=2000, cut_width=5, target_sizes=(
+ TargetSize(length=750, quantity=5), TargetSize(length=500, quantity=5),
+ TargetSize(length=300, quantity=10), TargetSize(length=100, quantity=15)))
-@pytest.mark.parametrize("solver", [_solve_FFD])
-def test_l(testjob_l, solver):
- orig_job = testjob_l.model_copy(deep=True)
solved = solver(testjob_l)
- assert solved == [
- [(300, ''), (100, ''), (100, ''), (100, ''), (100, ''), (100, ''), (100, ''), (100, ''), (100, ''), (100, '')],
- [(300, ''), (300, ''), (300, ''), (300, ''), (300, ''), (300, ''), (100, '')],
- [(750, ''), (500, ''), (500, ''), (100, ''), (100, '')],
- [(500, ''), (500, ''), (500, ''), (300, ''), (100, '')],
- [(750, ''), (750, ''), (300, ''), (100, '')],
- [(750, ''), (750, ''), (300, ''), (100, '')]]
- assert orig_job == testjob_l
+ assert solved == (
+ ((300, ''), (100, ''), (100, ''), (100, ''), (100, ''), (100, ''), (100, ''), (100, ''), (100, ''), (100, '')),
+ ((300, ''), (300, ''), (300, ''), (300, ''), (300, ''), (300, ''), (100, '')),
+ ((750, ''), (500, ''), (500, ''), (100, ''), (100, '')),
+ ((500, ''), (500, ''), (500, ''), (300, ''), (100, '')),
+ ((750, ''), (750, ''), (300, ''), (100, '')),
+ ((750, ''), (750, ''), (300, ''), (100, '')))
+
+
+# tests after here are only benchmarking and shouldn't ever be relevant
+
+@pytest.mark.parametrize("solver", [_solve_FFD, _solve_gapfill])
+def test_xl(solver):
+ testjob = Job(max_length=2000, cut_width=10, target_sizes=(
+ TargetSize(length=2000, quantity=5), TargetSize(length=1500, quantity=10),
+ TargetSize(length=750, quantity=25), TargetSize(length=500, quantity=50),
+ TargetSize(length=300, quantity=100), TargetSize(length=50, quantity=250),
+ ))
+
+ solved = solver(testjob)
+
+ assert _get_trimmings(testjob.max_length, solved, testjob.cut_width) == 2520
+
+
+@pytest.mark.parametrize("solver", [_solve_FFD, _solve_gapfill])
+def test_xxl(solver):
+ testjob = Job(max_length=2000, cut_width=10, target_sizes=(
+ TargetSize(length=1750, quantity=5), TargetSize(length=1500, quantity=10),
+ TargetSize(length=750, quantity=25), TargetSize(length=500, quantity=50),
+ TargetSize(length=300, quantity=100), TargetSize(length=200, quantity=150),
+ TargetSize(length=150, quantity=250), TargetSize(length=50, quantity=500),
+ ))
+
+ solved = solver(testjob)
+
+ assert _get_trimmings(testjob.max_length, solved, testjob.cut_width) == 3250
diff --git a/tests/solver/test_solver.py b/tests/solver/test_solver.py
index 27d17e5..c782c3a 100644
--- a/tests/solver/test_solver.py
+++ b/tests/solver/test_solver.py
@@ -19,6 +19,16 @@ def test_trimming():
assert trimming == 70
+def test_trimming_zero():
+ trimming = _get_trimming(
+ max_length=1500,
+ lengths=((500, ""), (500, ""), (480, "")),
+ cut_width=10,
+ )
+
+ assert trimming == 0
+
+
def test_trimming_raise():
# raises Error if more stock was used than available
with pytest.raises(OverflowError):
@@ -49,10 +59,10 @@ def test_solver_is_exactly(testjob_s, solver):
orig_job = testjob_s.model_copy(deep=True)
solved = solver(testjob_s)
- assert solved == [
- [(500, "Part1"), (500, "Part1"), (200, "Part2"), (200, "Part2")],
- [(200, "Part2"), (200, "Part2")],
- ]
+ assert solved == (
+ ((500, "Part1"), (500, "Part1"), (200, "Part2"), (200, "Part2")),
+ ((200, "Part2"), (200, "Part2")),
+ )
assert orig_job == testjob_s
diff --git a/tests/solver/test_special.py b/tests/solver/test_special.py
index e3f05ec..f246150 100644
--- a/tests/solver/test_special.py
+++ b/tests/solver/test_special.py
@@ -1,46 +1,44 @@
+from app.solver.data.Job import TargetSize
from app.solver.solver import (
_solve_bruteforce,
- _solve_FFD,
+ _solve_FFD, _solve_gapfill,
)
from tests.test_fixtures import *
# @pytest.mark.skip(reason="bug #63")
-@pytest.mark.parametrize("solver", [_solve_bruteforce, _solve_FFD])
-def test_cuts(testjob_cuts, solver):
- orig_job = testjob_cuts.model_copy(deep=True)
+@pytest.mark.parametrize("solver", [_solve_bruteforce, _solve_FFD, _solve_gapfill])
+def test_cuts(solver):
+ testjob_cuts = Job(max_length=1010, cut_width=10, target_sizes=(TargetSize(length=500, quantity=4),))
solved = solver(testjob_cuts)
- assert solved == [
- [(500, ''), (500, '')],
- [(500, ''), (500, '')]
- ]
- assert orig_job == testjob_cuts
+ assert solved == (
+ ((500, ''), (500, '')),
+ ((500, ''), (500, ''))
+ )
# @pytest.mark.skip(reason="bug #59")
-@pytest.mark.parametrize("solver", [_solve_bruteforce, _solve_FFD])
-def test_zero_cuts(testjob_zero_cuts, solver):
- orig_job = testjob_zero_cuts.model_copy(deep=True)
+@pytest.mark.parametrize("solver", [_solve_bruteforce, _solve_FFD, _solve_gapfill])
+def test_zero_cuts(solver):
+ testjob_zero_cuts = Job(max_length=1000, cut_width=0, target_sizes=(TargetSize(length=500, quantity=4),))
solved = solver(testjob_zero_cuts)
- assert solved == [
- [(500, ''), (500, '')],
- [(500, ''), (500, '')]
- ]
- assert orig_job == testjob_zero_cuts
+ assert solved == (
+ ((500, ''), (500, '')),
+ ((500, ''), (500, ''))
+ )
# @pytest.mark.skip(reason="bug #64")
-@pytest.mark.parametrize("solver", [_solve_bruteforce, _solve_FFD])
-def test_equal(testjob_equal, solver):
- orig_job = testjob_equal.model_copy(deep=True)
+@pytest.mark.parametrize("solver", [_solve_bruteforce, _solve_FFD, _solve_gapfill])
+def test_equal(solver):
+ testjob_equal = Job(max_length=1000, cut_width=10, target_sizes=(TargetSize(length=1000, quantity=4),))
solved = solver(testjob_equal)
- assert solved == [
- [(1000, '')],
- [(1000, '')],
- [(1000, '')],
- [(1000, '')]
- ]
- assert orig_job == testjob_equal
+ assert solved == (
+ ((1000, ''),),
+ ((1000, ''),),
+ ((1000, ''),),
+ ((1000, ''),)
+ )
diff --git a/tests/test_fixtures.py b/tests/test_fixtures.py
index d9513d6..417cc82 100644
--- a/tests/test_fixtures.py
+++ b/tests/test_fixtures.py
@@ -23,28 +23,3 @@ def testresult_s():
return Result.model_validate_json(
load_json(Path("./tests/res/out/testresult_s.json"))
)
-
-
-@pytest.fixture
-def testjob_m():
- return Job.model_validate_json(load_json(Path("./tests/res/in/testjob_m.json")))
-
-
-@pytest.fixture
-def testjob_l():
- return Job.model_validate_json(load_json(Path("./tests/res/in/testjob_l.json")))
-
-
-@pytest.fixture
-def testjob_cuts():
- return Job.model_validate_json(load_json(Path("./tests/res/in/testjob_cuts.json")))
-
-
-@pytest.fixture
-def testjob_zero_cuts():
- return Job.model_validate_json(load_json(Path("./tests/res/in/testjob_zero_cuts.json")))
-
-
-@pytest.fixture
-def testjob_equal():
- return Job.model_validate_json(load_json(Path("./tests/res/in/testjob_equal.json")))
diff --git a/tests/test_main.py b/tests/test_main.py
index cb3543d..b81940b 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -12,6 +12,7 @@
def test_get_root():
response = client.get("/")
assert response.status_code == 200
+ assert any(response.text)
def test_get_version():
@@ -23,6 +24,7 @@ def test_get_version():
def test_get_debug():
response = client.get("/debug")
assert response.status_code == 200
+ assert any(response.text)
def test_full(testjob_s, testresult_s):