Skip to content

Commit

Permalink
Change behavior of expected scores when using --tests flag (#90)
Browse files Browse the repository at this point in the history
* When using --tests flag check only groups with all tests run in expected_scores

* Add tests

* Fix unit test

* Add some comments

* Change comments

* Bump version
  • Loading branch information
MasloMaslane authored Aug 27, 2023
1 parent f150802 commit 8986c67
Show file tree
Hide file tree
Showing 5 changed files with 111 additions and 5 deletions.
2 changes: 1 addition & 1 deletion src/sinol_make/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

from sinol_make import util, oiejq

__version__ = "1.5.0"
__version__ = "1.5.1"

def configure_parsers():
parser = argparse.ArgumentParser(
Expand Down
49 changes: 46 additions & 3 deletions src/sinol_make/commands/run/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -691,6 +691,29 @@ def print_expected_scores(self, expected_scores):
yaml_dict = { "sinol_expected_scores": self.convert_status_to_string(expected_scores) }
print(yaml.dump(yaml_dict, default_flow_style=None))

def get_whole_groups(self):
"""
Returns a list of groups for which all tests were run.
"""
group_sizes = {}
for test in package_util.get_tests():
group = package_util.get_group(test)
if group not in group_sizes:
group_sizes[group] = 0
group_sizes[group] += 1

run_group_sizes = {}
for test in self.tests:
group = package_util.get_group(test)
if group not in run_group_sizes:
run_group_sizes[group] = 0
run_group_sizes[group] += 1

whole_groups = []
for group in group_sizes.keys():
if group in run_group_sizes and group_sizes[group] == run_group_sizes[group]:
whole_groups.append(group)
return whole_groups

def validate_expected_scores(self, results):
new_expected_scores = {} # Expected scores based on results
Expand Down Expand Up @@ -745,9 +768,25 @@ def convert_to_expected(results):
for group in config_expected_scores[solution]["expected"]:
used_groups.add(group)
else:
for solution in results.keys():
for group in results[solution].keys():
used_groups.add(group)
used_groups = self.get_whole_groups()

# This removes those groups from `new_expected_scores` that have not been run.
# Then, if there are any solutions for which no groups have been run, they are also removed.
solutions_to_delete = []
for solution in new_expected_scores.keys():
groups_to_remove = []
for group in new_expected_scores[solution]["expected"]:
if group not in used_groups:
groups_to_remove.append(group)
for group in groups_to_remove:
del new_expected_scores[solution]["expected"][group]

# If there are no groups left, remove the solution.
if len(new_expected_scores[solution]["expected"]) == 0:
solutions_to_delete.append(solution)
for solution in solutions_to_delete:
del new_expected_scores[solution]

used_groups = list(used_groups)

expected_scores = {} # Expected scores from config with only solutions and groups that were run
Expand All @@ -763,7 +802,11 @@ def convert_to_expected(results):
expected_scores[solution]["expected"][group] = config_expected_scores[solution]["expected"][group]

expected_scores[solution]["points"] = self.calculate_points(expected_scores[solution]["expected"])
if len(expected_scores[solution]["expected"]) == 0:
del expected_scores[solution]

if self.args.tests is not None:
print("Showing expected scores only for groups with all tests run.")
print(util.bold("Expected scores from config:"))
self.print_expected_scores(expected_scores)
print(util.bold("\nExpected scores based on results:"))
Expand Down
2 changes: 1 addition & 1 deletion src/sinol_make/helpers/package_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def get_test_key(test):
return get_group(test), test


def get_tests(arg_tests: Union[List[str], None]) -> List[str]:
def get_tests(arg_tests: Union[List[str], None] = None) -> List[str]:
"""
Returns list of tests to run.
:param arg_tests: Tests specified in command line arguments. If None, all tests are returned.
Expand Down
62 changes: 62 additions & 0 deletions tests/commands/run/test_integration.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import copy
import sys
import pytest

from ...fixtures import *
Expand Down Expand Up @@ -139,6 +141,66 @@ def test_flag_tests(create_package, time_tool):
assert command.tests == [test]


@pytest.mark.parametrize("create_package", [get_checker_package_path()], indirect=True)
def test_groups_in_flag_test(capsys, create_package, time_tool):
"""
Test flag --tests with whole and partial groups.
"""
package_path = create_package
create_ins_outs(package_path)

parser = configure_parsers()

# Test with only one test from group 1.
args = parser.parse_args(["run", "--tests", "in/chk1a.in", "--time-tool", time_tool])
command = Command()
command.run(args)
out = capsys.readouterr().out
assert "Showing expected scores only for groups with all tests run." in out
assert "sinol_expected_scores: {}" in out
assert "Expected scores are correct!" in out

# Test with all tests from group 1.
args = parser.parse_args(["run", "--tests", "in/chk1a.in", "in/chk1b.in", "in/chk1c.in", "--time-tool", time_tool])
command = Command()
command.run(args)
out = capsys.readouterr().out
assert 'sinol_expected_scores:\n' \
' chk.cpp:\n' \
' expected: {1: OK}\n' \
' points: 50\n' \
' chk1.cpp:\n' \
' expected: {1: WA}\n' \
' points: 0\n' \
' chk2.cpp:\n' \
' expected:\n' \
' 1: {points: 25, status: OK}\n' \
' points: 25\n' \
' chk3.cpp:\n' \
' expected: {1: OK}\n' \
' points: 50' in out

# Test with incorrect expected scores for first group.
with open(os.path.join(package_path, "config.yml"), "r") as config_file:
correct_config = yaml.load(config_file, Loader=yaml.SafeLoader)
config = copy.deepcopy(correct_config)
config["sinol_expected_scores"]["chk.cpp"]["expected"][1] = "WA"
config["sinol_expected_scores"]["chk.cpp"]["points"] = 50
with open(os.path.join(package_path, "config.yml"), "w") as config_file:
config_file.write(yaml.dump(config))

args = parser.parse_args(["run", "--tests", "in/chk1a.in", "in/chk1b.in", "in/chk1c.in", "--time-tool", time_tool,
"--apply-suggestions"])
command = Command()
command.run(args)
out = capsys.readouterr().out
sys.stdout.write(out)
assert "Solution chk.cpp passed group 1 with status OK while it should pass with status WA." in out
with open(os.path.join(package_path, "config.yml"), "r") as config_file:
config = yaml.load(config_file, Loader=yaml.SafeLoader)
assert config == correct_config


@pytest.mark.parametrize("create_package", [get_simple_package_path(), get_verify_status_package_path(),
get_checker_package_path()], indirect=True)
def test_flag_solutions(capsys, create_package, time_tool):
Expand Down
1 change: 1 addition & 0 deletions tests/commands/run/test_unit.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,7 @@ def test_validate_expected_scores_success():
command = get_command()
os.chdir(get_simple_package_path())
command.scores = command.config["scores"]
command.tests = package_util.get_tests(None)

# Test with correct expected scores.
command.args = argparse.Namespace(solutions=["prog/abc.cpp"], tests=None)
Expand Down

0 comments on commit 8986c67

Please sign in to comment.