diff --git a/src/npg_porch/models/task.py b/src/npg_porch/models/task.py index df3f01e..7947473 100644 --- a/src/npg_porch/models/task.py +++ b/src/npg_porch/models/task.py @@ -21,7 +21,7 @@ from enum import Enum import hashlib import ujson -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, ValidationError from npg_porch.models.pipeline import Pipeline @@ -45,7 +45,7 @@ class Task(BaseModel): title='Task Input', description='A structured parameter set that uniquely identifies a piece of work, and enables an iteration of a pipeline' # noqa: E501 ) - status: TaskStateEnum | None = None + status: TaskStateEnum = TaskStateEnum.PENDING def generate_task_id(self): return hashlib.sha256(ujson.dumps(self.task_input, sort_keys=True).encode()).hexdigest() @@ -56,18 +56,21 @@ def __eq__(self, other): The pipeline and task_input_ids can partially differ and it still be a valid comparison. Clients do not get to create task_input_ids and may - not fully specify a pipeline. Status is also optional + not fully specify a pipeline. Automatically attempts to cast a dict into a Task, and therefore ignores any properties not valid for a Task ''' - if not isinstance(other, Task): - if isinstance(other, dict): + if isinstance(other, dict): + try: other = Task.model_validate(other) - else: + except ValidationError: return False + if not isinstance(other, Task): + return False + truths = [] for k, v in self.model_dump().items(): other_d = other.model_dump() @@ -81,5 +84,5 @@ def __eq__(self, other): truths.append(v == other_d[k]) if all(truths): return True - else: - return False + + return False diff --git a/tests/fixtures/deploy_db.py b/tests/fixtures/deploy_db.py index ffecb99..654c467 100644 --- a/tests/fixtures/deploy_db.py +++ b/tests/fixtures/deploy_db.py @@ -46,7 +46,8 @@ def minimum_data(): definition={ 'to_do': 'stuff', 'why': 'reasons' - } + }, + state=TaskStateEnum.PENDING ), Task( pipeline=pipeline, @@ -56,7 +57,8 @@ def minimum_data(): definition={ 'to_do': 'more stuff', 'why': 'reasons' - } + }, + state=TaskStateEnum.PENDING ) ] diff --git a/tests/task_route_test.py b/tests/task_route_test.py index ba62fd5..90ebaa4 100644 --- a/tests/task_route_test.py +++ b/tests/task_route_test.py @@ -67,9 +67,9 @@ def test_task_creation(async_minimum, fastapi_testclient): def test_task_update(async_minimum, fastapi_testclient): task = fastapi_testclient.get('/tasks', headers=headers4ptest_one).json()[0] - assert task['status'] is None + assert task['status'] == TaskStateEnum.PENDING.value - task['status'] = TaskStateEnum.PENDING + task['status'] = TaskStateEnum.RUNNING response = fastapi_testclient.put( '/tasks', json=task, @@ -206,7 +206,16 @@ def test_get_tasks(async_minimum, async_tasks, fastapi_testclient): ) assert response.status_code == status.HTTP_200_OK, 'Other optional argument works' tasks = response.json() - assert len(tasks) == 10, 'Ten pending tasks selected' + # async_minimum provides 2 tasks, async_tasks provides 10 + assert len(tasks) == 12, 'Twelve pending tasks selected' + + response = fastapi_testclient.get( + '/tasks?status=RUNNING', + headers=headers4ptest_one + ) + assert response.status_code == status.HTTP_200_OK, 'Other optional argument works' + tasks = response.json() + assert len(tasks) == 0, 'No running tasks selected' response = fastapi_testclient.get( '/tasks?pipeline_name="ptest one"&status=PENDING',