diff --git a/openassessment/__init__.py b/openassessment/__init__.py index 9026118672..6ae6ec99d7 100644 --- a/openassessment/__init__.py +++ b/openassessment/__init__.py @@ -2,4 +2,4 @@ Initialization Information for Open Assessment Module """ -__version__ = '6.0.11' +__version__ = '6.0.12' diff --git a/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py b/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py index 77969f7d54..e7ed9aece0 100644 --- a/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py +++ b/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py @@ -9,7 +9,6 @@ SerializerMethodField, URLField, Serializer, - BooleanField, ) from openassessment.data import OraSubmissionAnswerFactory from openassessment.xblock.ui_mixins.mfe.serializer_utils import NullField @@ -224,10 +223,11 @@ class AssessmentSubmitRequestSerializer(MfeAssessmentDataSerializer): ... ], overallFeedback: (String / Empty) + step: (String): The step for which we are submitting an assessment } """ - continueGrading = BooleanField(required=False, default=False) + step = CharField() def to_legacy_format(self, xblock): """ diff --git a/openassessment/xblock/ui_mixins/mfe/mixin.py b/openassessment/xblock/ui_mixins/mfe/mixin.py index 2356e8bef2..37b45bcf65 100644 --- a/openassessment/xblock/ui_mixins/mfe/mixin.py +++ b/openassessment/xblock/ui_mixins/mfe/mixin.py @@ -290,18 +290,20 @@ def _assessment_submit_handler(self, data): if not serializer.is_valid(): raise OraApiException(400, error_codes.INCORRECT_PARAMETERS, serializer.errors) assessment_data = serializer.to_legacy_format(self) - peer_data = self.peer_assessment_data(serializer.data['continueGrading']) + requested_step = serializer.data['step'] try: - if peer_data.continue_grading or self.workflow_data.is_peer: + if self.workflow_data.is_cancelled: + raise InvalidStateToAssess() + if requested_step == 'peer': peer_assess( assessment_data['options_selected'], assessment_data['feedback'], assessment_data['criterion_feedback'], self.config_data, self.workflow_data, - peer_data, + self.peer_assessment_data(), ) - elif self.workflow_data.is_self: + elif requested_step == 'self': self_assess( assessment_data['options_selected'], assessment_data['criterion_feedback'], @@ -310,7 +312,7 @@ def _assessment_submit_handler(self, data): self.workflow_data, self.self_data ) - elif self.workflow_data.is_training: + elif requested_step == 'studentTraining': corrections = training_assess( assessment_data['options_selected'], self.config_data, @@ -324,6 +326,7 @@ def _assessment_submit_handler(self, data): # This catches the error we explicitly raise, as well as any that may be raised from within # the assessment logic itself context = { + 'requested_step': requested_step, 'student_item': self.config_data.student_item_dict, 'workflow': self.workflow_data.workflow, } diff --git a/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py b/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py index db697821d0..81f1f14ee7 100644 --- a/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py +++ b/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py @@ -476,9 +476,9 @@ def test_assessment_data(self): } ], "overallFeedback": "Foo", - "continueGrading": True, + "step": "Wham" }).data self.assertEqual(assessment_submit_request_data["overallFeedback"], "Foo") self.assertEqual(len(assessment_submit_request_data["criteria"]), 1) - self.assertTrue(assessment_submit_request_data["continueGrading"]) + self.assertEqual(assessment_submit_request_data["step"], "Wham") diff --git a/openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py b/openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py index 837b649114..c8034cf76d 100644 --- a/openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py +++ b/openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py @@ -37,6 +37,7 @@ from openassessment.xblock.test.test_staff_area import NullUserService, UserStateService from openassessment.xblock.test.test_submission import COURSE_ID, setup_mock_team from openassessment.xblock.test.test_team import MOCK_TEAM_ID, MockTeamsService +from openassessment.xblock.ui_mixins.mfe.mixin import MFE_STEP_TO_WORKFLOW_MAPPINGS from openassessment.xblock.ui_mixins.mfe.constants import error_codes, handler_suffixes from openassessment.xblock.ui_mixins.mfe.submission_serializers import DraftResponseSerializer, SubmissionSerializer @@ -149,9 +150,11 @@ def request_file_callback(self, xblock, payload): response_format='response' ) - def request_assessment_submit(self, xblock, payload=None): + def request_assessment_submit(self, xblock, step=None, payload=None): if payload is None: payload = self.DEFAULT_ASSESSMENT_SUBMIT_VALUE + if step is not None: + payload['step'] = step return super().request( xblock, 'assessment', @@ -926,6 +929,7 @@ def mock_assess_functions(self, self_kwargs=None, training_kwargs=None, peer_kwa { 'criterionFeedback': {}, 'overallFeedback': '', + 'step': 'peer' }, { 'optionsSelected': ['this is a list'], @@ -939,57 +943,65 @@ def test_incorrect_params(self, xblock, payload): assert resp.status_code == 400 assert resp.json['error']['errorCode'] == error_codes.INCORRECT_PARAMETERS - @ddt.data(None, 'cancelled', 'done', 'ai') + @ddt.data("self", "peer", "studentTraining") @scenario("data/basic_scenario.xml") - def test_not_allowed_step_error(self, xblock, status): - with self.mock_workflow_status(status): - resp = self.request_assessment_submit(xblock) + def test_not_allowed_to_assess_when_cancelled(self, xblock, step): + with self.mock_workflow_status("cancelled"): + resp = self.request_assessment_submit(xblock, step=step) assert resp.status_code == 400 assert resp.json['error']['errorCode'] == error_codes.INVALID_STATE_TO_ASSESS @ddt.unpack @ddt.data( ('self', True, False, False), - ('training', False, True, False), + ('studentTraining', False, True, False), ('peer', False, False, True) ) @scenario("data/basic_scenario.xml") - def test_assess(self, xblock, step, expect_self, expect_training, expect_peer): - with self.mock_workflow_status(step): + def test_assess(self, xblock, mfe_step, expect_self, expect_training, expect_peer): + workflow_step = MFE_STEP_TO_WORKFLOW_MAPPINGS[mfe_step] + with self.mock_workflow_status(workflow_step): with self.mock_assess_functions() as assess_mocks: - resp = self.request_assessment_submit(xblock) + resp = self.request_assessment_submit(xblock, step=mfe_step) assert resp.status_code == 200 assert assess_mocks.self.called == expect_self assert assess_mocks.training.called == expect_training assert assess_mocks.peer.called == expect_peer - @ddt.data(None, 'cancelled', 'waiting', 'self', 'training', 'done') + @ddt.data(None, 'waiting', 'self', 'training', 'done') @scenario("data/basic_scenario.xml") - def test_continue_grading(self, xblock, step): + def test_peer_assess_when_not_in_peer(self, xblock, step): with self.mock_assess_functions() as assess_mocks: with self.mock_workflow_status(step): - with self.mock_continue_grading(True): - resp = self.request_assessment_submit(xblock) + resp = self.request_assessment_submit(xblock, step="peer") assert resp.status_code == 200 assess_mocks.self.assert_not_called() assess_mocks.training.assert_not_called() assess_mocks.peer.assert_called() - @ddt.data('self', 'training', 'peer') + @ddt.data('self', 'studentTraining', 'peer') @scenario("data/basic_scenario.xml") - def test_assess_error(self, xblock, step): + def test_assess_error(self, xblock, mfe_step): error = AssessmentError("there was a problem") - with self.mock_workflow_status(step): - with self.mock_assess_functions(**{step + '_kwargs': {'side_effect': error}}): - resp = self.request_assessment_submit(xblock) + workflow_step = MFE_STEP_TO_WORKFLOW_MAPPINGS[mfe_step] + with self.mock_workflow_status(workflow_step): + with self.mock_assess_functions(**{workflow_step + '_kwargs': {'side_effect': error}}): + resp = self.request_assessment_submit(xblock, step=mfe_step) assert_error_response(resp, 500, error_codes.INTERNAL_EXCEPTION, str(error)) + @scenario("data/basic_scenario.xml") + def test_cant_submit_when_cancelled(self, xblock): + with self.mock_workflow_status('cancelled'): + resp = self.request_assessment_submit(xblock, step="peer") + assert resp.status_code == 400 + assert resp.json['error']['errorCode'] == error_codes.INVALID_STATE_TO_ASSESS + @scenario("data/basic_scenario.xml") def test_training_assess_corrections(self, xblock): corrections = {'ferocity': 'sublime', 'element': 'hydrogen'} with self.mock_workflow_status('training'): with self.mock_assess_functions(training_kwargs={'return_value': corrections}): - resp = self.request_assessment_submit(xblock) + resp = self.request_assessment_submit(xblock, step='studentTraining') assert_error_response(resp, 400, error_codes.TRAINING_ANSWER_INCORRECT, corrections) diff --git a/package.json b/package.json index 7f89968c71..260dfe248d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "edx-ora2", - "version": "6.0.11", + "version": "6.0.12", "repository": "https://github.com/openedx/edx-ora2.git", "dependencies": { "@edx/frontend-build": "8.0.6",