diff --git a/.gitignore b/.gitignore index 6d061558..24aefe00 100644 --- a/.gitignore +++ b/.gitignore @@ -38,6 +38,9 @@ htmlcov nosetests.xml unittests.xml +# VSCode Debugging Setup +pyvenv.cfg + # PII annotation reports pii_report diff --git a/edx_exams/apps/core/models.py b/edx_exams/apps/core/models.py index e9cf0940..8d21e308 100644 --- a/edx_exams/apps/core/models.py +++ b/edx_exams/apps/core/models.py @@ -161,6 +161,8 @@ class ExamAttempt(TimeStampedModel): ExamAttemptStatus.verified, ExamAttemptStatus.rejected, ExamAttemptStatus.expired, + ExamAttemptStatus.second_review_required, + ExamAttemptStatus.error, ] user = models.ForeignKey(User, db_index=True, on_delete=models.CASCADE) diff --git a/edx_exams/apps/lti/tests/test_views.py b/edx_exams/apps/lti/tests/test_views.py index 25617372..1ef3379b 100644 --- a/edx_exams/apps/lti/tests/test_views.py +++ b/edx_exams/apps/lti/tests/test_views.py @@ -21,6 +21,7 @@ ExamFactory, ProctoringProviderFactory ) +from edx_exams.apps.core.models import AssessmentControlResult from edx_exams.apps.core.statuses import ExamAttemptStatus from edx_exams.apps.lti.utils import get_lti_root @@ -38,7 +39,9 @@ def setUp(self): self.course_exam_config = CourseExamConfigurationFactory() self.exam = ExamFactory() - self.attempt = ExamAttemptFactory() + + # Create an Exam Attempt that has already been submitted. + self.attempt = ExamAttemptFactory(status=ExamAttemptStatus.submitted) # Variables required for testing and verification ISS = 'http://test-platform.example/' @@ -77,6 +80,7 @@ def setUp(self): self.test_provider.save() self.url = self.get_acs_url(self.attempt.id) + self.token = self.make_access_token('https://purl.imsglobal.org/spec/lti-ap/scope/control.all') def get_acs_url(self, lti_config_id): """ @@ -103,11 +107,11 @@ def make_access_token(self, scope): expiration=3600, ) - def create_request_body(self, attempt_number): + def create_request_body(self, attempt_number, action, reason_code=None, incident_severity=None): """ Return a template for the data sent in the request to the ACS endpoint. """ - return { + request_body = { 'user': { 'iss': self.lti_consumer.iss, 'sub': str(self.user.anonymous_user_id) @@ -116,13 +120,27 @@ def create_request_body(self, attempt_number): 'id': self.exam.resource_id }, 'attempt_number': attempt_number, - 'action': 'flag', + 'action': action, 'incident_time': '2018-02-01T10:45:33Z', - 'incident_severity': '0.1', + 'incident_incident_severity': '0.1', 'reason_code': '12056', 'reason_msg': 'Excessive background noise outside candidate control' } + if reason_code and incident_severity: + request_body.update({ + 'reason_code': reason_code, + 'incident_severity': incident_severity, + }) + return request_body + + def make_post_request(self, request_body, token): + # Even though the client.post function below uses json.dumps to serialize the request as json, + # The json serialization needs to happen before the request for an unknown reason + request_body = json.dumps(request_body) + return self.client.post(self.url, data=request_body, content_type='application/json', + HTTP_AUTHORIZATION='Bearer {}'.format(token)) + # Test that an ACS result is created with the expected type @ ddt.data( (ExamAttemptStatus.ready_to_start, 200), (ExamAttemptStatus.started, 200), @@ -152,16 +170,9 @@ def test_acs_attempt_status(self, mock_get_attempt.return_value = self.attempt mock_permissions.return_value = True + request_body = self.create_request_body(self.attempt.attempt_number, 'flag') - token = self.make_access_token('https://purl.imsglobal.org/spec/lti-ap/scope/control.all') - - request_body = self.create_request_body(self.attempt.attempt_number) - - # Even though the client.post function below uses json.dumps to serialize the request as json, - # The json serialization needs to happen before the request for an unknown reason - request_body = json.dumps(request_body) - response = self.client.post(self.url, data=request_body, content_type='application/json', - HTTP_AUTHORIZATION='Bearer {}'.format(token)) + response = self.make_post_request(request_body, self.token) self.assertEqual(response.status_code, expected_response_status) @@ -179,20 +190,161 @@ def test_acs_no_attempt_found(self, mock_get_attempt.return_value = None mock_permissions.return_value = True + # Request w/ attempt number for an attempt that does not exist + request_body = self.create_request_body(false_attempt_number, 'flag') - token = self.make_access_token('https://purl.imsglobal.org/spec/lti-ap/scope/control.all') + response = self.make_post_request(request_body, self.token) - # Request w/ attempt number for an attempt that does not exist - request_body = self.create_request_body(false_attempt_number) + self.assertEqual(response.status_code, 400) - # Even though the client.post function below uses json.dumps to serialize the request as json, - # The json serialization needs to happen before the request for an unkown reason - request_body = json.dumps(request_body) - response = self.client.post(self.url, data=request_body, content_type='application/json', - HTTP_AUTHORIZATION='Bearer {}'.format(token)) + @ ddt.data( + ('user', ''), + ('user', 'sub'), + ('resource_link', ''), + ('resource_link', 'id'), + ('attempt_number', ''), + ('action', ''), + ) + @ ddt.unpack + @ patch.object(Lti1p3ApiAuthentication, 'authenticate', return_value=(AnonymousUser(), None)) + @ patch('edx_exams.apps.lti.views.LtiProctoringAcsPermissions.has_permission') + @ patch('edx_exams.apps.lti.views.get_attempt_for_user_with_attempt_number_and_resource_id') + def test_acs_base_parameter_missing_errors(self, + acs_parameter, + acs_sub_parameter, + mock_get_attempt, + mock_permissions, + mock_authentication): # pylint: disable=unused-argument + """ + Test that the endpoint errors correctly if base ACS request parameters are not present + """ + # Make requests missing these items (create request but set the item passed in to undefined/None) + # Make the request + # Assert the correct error is thrown in the response with status 400 + mock_get_attempt.return_value = self.attempt + mock_permissions.return_value = True + request_body = self.create_request_body( + self.attempt.attempt_number, + 'terminate', + ) + + # Delete the selected field from the request body before sending to cause an error + if acs_sub_parameter == '': + del request_body[acs_parameter] + key_to_fail = acs_parameter + else: + del request_body[acs_parameter][acs_sub_parameter] + key_to_fail = acs_sub_parameter + + response = self.make_post_request(request_body, self.token) + + self.attempt.refresh_from_db() + self.assertEqual(response.data, f'ERROR: required parameter \'{key_to_fail}\' was not found.') + + @ ddt.data( + ['reason_code'], + ['incident_time'], + ['incident_severity'], + ) + @ ddt.unpack + @ patch.object(Lti1p3ApiAuthentication, 'authenticate', return_value=(AnonymousUser(), None)) + @ patch('edx_exams.apps.lti.views.LtiProctoringAcsPermissions.has_permission') + @ patch('edx_exams.apps.lti.views.get_attempt_for_user_with_attempt_number_and_resource_id') + def test_acs_terminate_parameter_errors(self, + acs_parameter, + mock_get_attempt, + mock_permissions, + mock_authentication): # pylint: disable=unused-argument + """ + Test the endpoint errors correctly if request parameters required for the terminate action are not present + """ + # Make requests missing these items (create request but set the item passed in to undefined/None) + # Make the request + # Assert the correct error is thrown in the response with status 400 + mock_get_attempt.return_value = self.attempt + mock_permissions.return_value = True + request_body = self.create_request_body( + self.attempt.attempt_number, + action='terminate', + reason_code='1', + incident_severity='0.1', + ) + + del request_body[acs_parameter] + + response = self.make_post_request(request_body, self.token) + self.attempt.refresh_from_db() + self.assertEqual(response.data, f'ERROR: required parameter \'{acs_parameter}\' was not found.') + + @ patch.object(Lti1p3ApiAuthentication, 'authenticate', return_value=(AnonymousUser(), None)) + @ patch('edx_exams.apps.lti.views.LtiProctoringAcsPermissions.has_permission') + @ patch('edx_exams.apps.lti.views.get_attempt_for_user_with_attempt_number_and_resource_id') + def test_acs_invalid_action(self, + mock_get_attempt, + mock_permissions, + mock_authentication): # pylint: disable=unused-argument + """ + Test that the endpoint fails if it receives an invalid/unsupported action type + """ + mock_get_attempt.return_value = self.attempt + mock_permissions.return_value = True + request_body = self.create_request_body( + self.attempt.attempt_number, + 'invalid_action', + '1', + '0.1' + ) + + response = self.make_post_request(request_body, self.token) + self.attempt.refresh_from_db() self.assertEqual(response.status_code, 400) + @ ddt.data( + # Testing reason codes with severity > 0.25 + ('0', '1.0', 'error'), + ('1', '1.0', 'second_review_required'), + ('2', '1.0', 'error'), + ('25', '1.0', 'error'), + # Testing the incident severity + ('1', '0.3', 'second_review_required'), + ('1', '0.26', 'second_review_required'), + ('1', '0.25', 'verified'), + ('1', '0.1', 'verified'), + ) + @ ddt.unpack + @ patch.object(Lti1p3ApiAuthentication, 'authenticate', return_value=(AnonymousUser(), None)) + @ patch('edx_exams.apps.lti.views.LtiProctoringAcsPermissions.has_permission') + @ patch('edx_exams.apps.lti.views.get_attempt_for_user_with_attempt_number_and_resource_id') + def test_acs_terminate(self, + reason_code, + incident_severity, + expected_attempt_status, + mock_get_attempt, + mock_permissions, + mock_authentication): # pylint: disable=unused-argument + """ + Test that the terminate action changes the exam attempt status as expected + based on the 'reason_code' and 'incident_severity'. + """ + mock_get_attempt.return_value = self.attempt + mock_permissions.return_value = True + request_body = self.create_request_body( + self.attempt.attempt_number, + 'terminate', + reason_code, + incident_severity + ) + + self.make_post_request(request_body, self.token) + self.attempt.refresh_from_db() + + self.assertEqual(self.attempt.status, expected_attempt_status) + + # Assure an entry was added to the ACResult model + data = AssessmentControlResult.objects.get(attempt=self.attempt.id) + self.assertEqual(self.attempt.id, data.attempt.id) + def test_auth_failures(self): """ Test that an exception occurs if basic access token authentication fails @@ -258,7 +410,7 @@ def test_start_proctoring_launch_data(self, mock_get_lti_launch_url): attempt_number=self.attempt.attempt_number, start_assessment_url=expected_proctoring_start_assessment_url, assessment_control_url='http://test.exams:18740/lti/1/acs', - assessment_control_actions=['flagRequest'], + assessment_control_actions=['terminate'], ) expected_launch_data = Lti1p3LaunchData( @@ -448,6 +600,7 @@ class LtiInstructorLaunchTest(ExamsAPITestCase): """ Test launch_instructor_view """ + def setUp(self): super().setUp() self.lti_configuration = LtiConfiguration.objects.create() diff --git a/edx_exams/apps/lti/views.py b/edx_exams/apps/lti/views.py index 66b1d8e4..3c3122fb 100644 --- a/edx_exams/apps/lti/views.py +++ b/edx_exams/apps/lti/views.py @@ -4,6 +4,7 @@ import logging +from decimal import Decimal from urllib.parse import urljoin from django.contrib.auth import login @@ -29,7 +30,7 @@ update_attempt_status ) from edx_exams.apps.core.exceptions import ExamIllegalStatusTransition -from edx_exams.apps.core.models import User +from edx_exams.apps.core.models import AssessmentControlResult, User from edx_exams.apps.core.statuses import ExamAttemptStatus from edx_exams.apps.lti.utils import get_lti_root @@ -46,30 +47,30 @@ def acs(request, lti_config_id): """ Endpoint for ACS actions - NOTE: for now just have the actions LOG what the actions is doing. - We can implement proper functionality and tests later once we - hear back from our third party proctoring service vendors (i.e. Proctorio). - - Currently, we only support flagging of exam attempts. - Other ACS actions (pause, resume, terminate, update) could be implemented + Currently, we only support the termination of exam attempts. + Other ACS actions (pause, resume, flag, update) could be implemented in the future if desired. """ - data = request.data + try: + data = request.data - # This identifies the proctoring tool the request is coming from. - anonymous_user_id = data['user']['sub'] + # This identifies the proctoring tool the request is coming from. + anonymous_user_id = data['user']['sub'] - # The link to exam the user is attempting - resource_id = data['resource_link']['id'] + # The link to exam the user is attempting + resource_id = data['resource_link']['id'] - # Exam attempt number (Note: ACS does not differentiate between launches) - # i.e, if a launch fails and multiple subsequent launches occur for - # the same resource_link + attempt_number combo, the ACS only perceives - # this as one singular attempt. - attempt_number = data['attempt_number'] + # Exam attempt number (Note: ACS does not differentiate between launches) + # i.e, if a launch fails and multiple subsequent launches occur for + # the same resource_link + attempt_number combo, the ACS only perceives + # this as one singular attempt. + attempt_number = data['attempt_number'] - # ACS action to be performed - action = data['action'] + # ACS action to be performed + action = data['action'] + except KeyError as err: + error_msg = f'ERROR: required parameter {err} was not found.' + return Response(status=status.HTTP_400_BAD_REQUEST, data=error_msg) # Only flag ongoing or completed attempts VALID_STATUSES = [ @@ -101,14 +102,85 @@ def acs(request, lti_config_id): return Response(status=400) if action == 'flag': + # NOTE: The flag action is not yet supported. + # If implemented, have it modify the exam attempt data (or perhaps another model?) success_msg = ( - f'Flagging exam for user with id {anonymous_user_id} ' + f'NOTE: The flag action is not yet supported. The following is a placeholder message.' + f'Flagging exam attempt for user with id {anonymous_user_id} ' f'with resource id {resource_id} and attempt number {attempt_number} ' f'for lti config id {lti_config_id}, status {attempt.status}, exam id {attempt.exam.id}, ' f'and attempt id {attempt.id}.' ) log.info(success_msg) - return Response(status=200) + + # NOTE: The code below is for the 'terminate' action, which is the only action we support currently. + # This code and its tests will need to be modified if other ACS actions are implemented. + elif action == 'terminate': + # Upon receiving a terminate request, the attempt referenced should have their status updated + # depending on the reason for termination (reason_code) and the incident_severity (scaling from 0.0 to 1.0). + # If the severity is greater than 0.25, then the attempt is marked for secondary review. + + # Get the termination paramenters + try: + reason_code = data['reason_code'] + incident_time = data['incident_time'] + severity = data['incident_severity'] + except KeyError as err: + error_msg = f'ERROR: required parameter {err} was not found.' + return Response(status=status.HTTP_400_BAD_REQUEST, data=error_msg) + + severity = Decimal(severity) + SEVERITY_THRESHOLD = 0.25 + # Regular submission occurred, but the learner did something + # that might be worth marking the attempt for review. Mark the attempt + # as requiring review based on the severity level (>0.25 -> needs review) + if reason_code == '1': + if severity > SEVERITY_THRESHOLD: + update_attempt_status(attempt.id, ExamAttemptStatus.second_review_required) + success_msg = 'Termination Severity > 0.25, marking exam attempt for secondary review. ' + else: + update_attempt_status(attempt.id, ExamAttemptStatus.verified) + success_msg = 'Termination Severity <= 0.25, marking exam attempt as verified. ' + log.info(success_msg) + # Errors outside of the learner's control occurred -> Mark the attempt with status 'error' + # NOTE: This currently catches all reason codes that are not '1'. Should this integration + # be changed, or if we add another proctoring integration, then we may need to add a more + # precise condition here. + else: + update_attempt_status(attempt.id, ExamAttemptStatus.error) + success_msg = 'Marked exam attempt as error. ' + + success_msg += ( + f'Terminating exam attempt for user with id {anonymous_user_id} ' + f'with resource id {resource_id} and attempt number {attempt_number} ' + f'for lti config id {lti_config_id}, status {attempt.status}, exam id {attempt.exam.id}, ' + f'and attempt id {attempt.id}. ' + f'Reason code for the ACS request is: {reason_code}' + ) + log.info(success_msg) + + # Create a record of the ACS result + AssessmentControlResult.objects.create( + attempt=attempt, + action_type=action, + incident_time=incident_time, + severity=severity, + reason_code=reason_code, + ) + log.info( + f'Created AssessmentControlResult for attempt with id {attempt.id}, ' + f'action_type {action}, incident_time {incident_time}, severity {severity}, ' + f'and reason_code {reason_code}.' + ) + + # Base case for unsupported or invalid action types + else: + log.info(f'Received ACS request containing invalid or unsupported action: {action}') + return Response(status=status.HTTP_400_BAD_REQUEST) + + # Send back the status of terminated for the terminate action per LTI specs + # for ACS Response data: http://www.imsglobal.org/spec/proctoring/v1p0#h.r9n0nket2gul + return Response(data={'status': 'terminated'}, status=status.HTTP_200_OK) @api_view(['GET']) @@ -172,7 +244,7 @@ def start_proctoring(request, attempt_id): attempt_number=attempt.attempt_number, start_assessment_url=proctoring_start_assessment_url, assessment_control_url=assessment_control_url, - assessment_control_actions=['flagRequest'], + assessment_control_actions=['terminate'], ) launch_data = Lti1p3LaunchData( diff --git a/requirements/base.txt b/requirements/base.txt index 834cf536..5b25bf4a 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -140,7 +140,7 @@ jsonfield==3.1.0 # via lti-consumer-xblock lazy==1.5 # via lti-consumer-xblock -lti-consumer-xblock==9.5.5 +lti-consumer-xblock==9.5.7 # via -r requirements/base.in lxml==4.9.3 # via diff --git a/requirements/dev.txt b/requirements/dev.txt index 2e35e9eb..cc91ce13 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -296,7 +296,7 @@ lazy-object-proxy==1.9.0 # via # -r requirements/validation.txt # astroid -lti-consumer-xblock==9.5.5 +lti-consumer-xblock==9.5.7 # via -r requirements/validation.txt lxml==4.9.3 # via diff --git a/requirements/doc.txt b/requirements/doc.txt index 2c79e1cb..f35fcc1e 100644 --- a/requirements/doc.txt +++ b/requirements/doc.txt @@ -290,7 +290,7 @@ lazy-object-proxy==1.9.0 # via # -r requirements/test.txt # astroid -lti-consumer-xblock==9.5.5 +lti-consumer-xblock==9.5.7 # via -r requirements/test.txt lxml==4.9.3 # via diff --git a/requirements/production.txt b/requirements/production.txt index 17196c9b..c069e16c 100644 --- a/requirements/production.txt +++ b/requirements/production.txt @@ -193,7 +193,7 @@ lazy==1.5 # via # -r requirements/base.txt # lti-consumer-xblock -lti-consumer-xblock==9.5.5 +lti-consumer-xblock==9.5.7 # via -r requirements/base.txt lxml==4.9.3 # via diff --git a/requirements/quality.txt b/requirements/quality.txt index 0e1e2e43..5847fffe 100644 --- a/requirements/quality.txt +++ b/requirements/quality.txt @@ -275,7 +275,7 @@ lazy-object-proxy==1.9.0 # via # -r requirements/test.txt # astroid -lti-consumer-xblock==9.5.5 +lti-consumer-xblock==9.5.7 # via -r requirements/test.txt lxml==4.9.3 # via diff --git a/requirements/test.txt b/requirements/test.txt index baf1e98c..c26ebe3c 100644 --- a/requirements/test.txt +++ b/requirements/test.txt @@ -235,7 +235,7 @@ lazy==1.5 # lti-consumer-xblock lazy-object-proxy==1.9.0 # via astroid -lti-consumer-xblock==9.5.5 +lti-consumer-xblock==9.5.7 # via -r requirements/base.txt lxml==4.9.3 # via diff --git a/requirements/validation.txt b/requirements/validation.txt index 806fd474..4f5a1f07 100644 --- a/requirements/validation.txt +++ b/requirements/validation.txt @@ -355,7 +355,7 @@ lazy-object-proxy==1.9.0 # -r requirements/quality.txt # -r requirements/test.txt # astroid -lti-consumer-xblock==9.5.5 +lti-consumer-xblock==9.5.7 # via # -r requirements/quality.txt # -r requirements/test.txt