Skip to content

Commit

Permalink
Submission lock tests (#1716)
Browse files Browse the repository at this point in the history
* test: add tests for submission locking

* refactor: update submission lock for timezone support

* feat: allow a user to reacquire a submission lock

* feat: add submission validation to submission lock

* feat: add xblock test mode for returning response

In a test using the XBlockHandlerTestCaseMixin, running self.request
with response_format='response' returns the entire response object
instead of just the body. This is useful for asserts on status code

Co-authored-by: jansenk <jkantor@edx.org>
  • Loading branch information
nsprenkle and jansenk committed Nov 1, 2021
1 parent 86272cb commit c6596db
Show file tree
Hide file tree
Showing 10 changed files with 304 additions and 57 deletions.
7 changes: 4 additions & 3 deletions openassessment/staffgrader/migrations/0001_initial.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# Generated by Django 2.2.24 on 2021-09-20 20:02
# Generated by Django 2.2.24 on 2021-11-01 16:26

from django.db import migrations, models
import django.utils.timezone


class Migration(migrations.Migration):
Expand All @@ -15,9 +16,9 @@ class Migration(migrations.Migration):
name='SubmissionGradingLock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('owner_id', models.CharField(db_index=True, max_length=40)),
('created_at', models.DateTimeField(db_index=True, null=True)),
('submission_uuid', models.CharField(db_index=True, max_length=128, unique=True)),
('owner_id', models.CharField(max_length=40)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
],
options={
'ordering': ['created_at', 'id'],
Expand Down
10 changes: 5 additions & 5 deletions openassessment/staffgrader/models/submission_lock.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
Models for locking Submissions for exclusive grading.
Part of Enhanced Staff Grader (ESG).
"""
from datetime import datetime
from django.db import models
from django.utils.timezone import now
from django.utils.translation import ugettext as _
Expand All @@ -19,8 +18,8 @@ class SubmissionGradingLock(models.Model):

# NOTE - submission_uuid can refer to either the team or individual submission
submission_uuid = models.CharField(max_length=128, db_index=True, unique=True)
owner_id = models.CharField(max_length=40, db_index=True)
created_at = models.DateTimeField(db_index=True, default=datetime.now())
owner_id = models.CharField(max_length=40)
created_at = models.DateTimeField(default=now)

class Meta:
app_label = "staffgrader"
Expand Down Expand Up @@ -62,10 +61,11 @@ def claim_submission_lock(cls, submission_uuid, user_id):
current_lock = cls.get_submission_lock(submission_uuid)

# If there's already an active lock, raise an error
if current_lock and current_lock.is_active:
# Unless the lock owner is trying to reacquire a lock, which is allowed
if current_lock and current_lock.is_active and current_lock.owner_id != user_id:
raise SubmissionLockContestedError(_("Submission already locked"))

# Otherwise, delete the lock. This is needed so we don't violate the unique submisison_uuid constraint
# Otherwise, delete the lock. This is needed so we don't violate the unique submission_uuid constraint
if current_lock:
current_lock.delete()

Expand Down
54 changes: 38 additions & 16 deletions openassessment/staffgrader/staff_grader_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
from django.db.models.fields import CharField
from xblock.core import XBlock
from xblock.exceptions import JsonHandlerError
from submissions import api as sub_api
from submissions.api import get_student_ids_by_submission_uuid
from submissions.api import get_student_ids_by_submission_uuid, get_submission
from submissions.errors import SubmissionInternalError, SubmissionNotFoundError, SubmissionRequestError, SubmissionError

from openassessment.assessment.models.base import Assessment, AssessmentPart
from openassessment.assessment.models.staff import StaffWorkflow
Expand All @@ -26,14 +26,36 @@
log = logging.getLogger(__name__)


def require_submission_uuid(handler):
@wraps(handler)
def wrapped_handler(self, data, suffix=""): # pylint: disable=unused-argument
submission_uuid = data.get('submission_id', None)
if not submission_uuid:
raise JsonHandlerError(400, "Body must contain a submission_id")
return handler(self, submission_uuid, data, suffix=suffix)
return wrapped_handler
def require_submission_uuid(validate=True):
"""
Unpacks and passes submission_id from request to handler function.
params:
- validate: Whether or not to check submissions to see if this is a real submission UUID or not. Default True
Raises:
- 400 if the submission_id was not provided or was incorrectly formatted
- 404 if the submission_id wasn't found in submissions
- 500 for errors with submissions or general exceptions
"""
def decorator(handler):
@wraps(handler)
def wrapped_handler(self, data, suffix=""): # pylint: disable=unused-argument
submission_uuid = data.get('submission_id', None)
if not submission_uuid:
raise JsonHandlerError(400, "Body must contain a submission_id")
if validate:
try:
get_submission(submission_uuid)
except SubmissionNotFoundError as exc:
raise JsonHandlerError(404, "Submission not found") from exc
except SubmissionRequestError as exc:
raise JsonHandlerError(400, "Bad submission_uuid provided") from exc
except (SubmissionInternalError, Exception) as exc:
raise JsonHandlerError(500, "Internal error getting submission info") from exc
return handler(self, submission_uuid, data, suffix=suffix)
return wrapped_handler
return decorator


class StaffGraderMixin:
Expand All @@ -44,7 +66,7 @@ class StaffGraderMixin:

@XBlock.json_handler
@require_course_staff("STUDENT_GRADE")
@require_submission_uuid
@require_submission_uuid(validate=False)
def check_submission_lock(self, submission_uuid, data, suffix=""): # pylint: disable=unused-argument
submission_lock = SubmissionGradingLock.get_submission_lock(submission_uuid)
if submission_lock:
Expand All @@ -54,7 +76,7 @@ def check_submission_lock(self, submission_uuid, data, suffix=""): # pylint: di

@XBlock.json_handler
@require_course_staff("STUDENT_GRADE")
@require_submission_uuid
@require_submission_uuid(validate=True)
def claim_submission_lock(self, submission_uuid, data, suffix=''): # pylint: disable=unused-argument
anonymous_user_id = self.get_anonymous_user_id_from_xmodule_runtime()
try:
Expand All @@ -65,7 +87,7 @@ def claim_submission_lock(self, submission_uuid, data, suffix=''): # pylint: di

@XBlock.json_handler
@require_course_staff("STUDENT_GRADE")
@require_submission_uuid
@require_submission_uuid(validate=False)
def delete_submission_lock(self, submission_uuid, data, suffix=''): # pylint: disable=unused-argument
anonymous_user_id = self.get_anonymous_user_id_from_xmodule_runtime()
try:
Expand Down Expand Up @@ -235,7 +257,7 @@ def bulk_deep_fetch_assessments(self, assessment_ids):

@XBlock.json_handler
@require_course_staff("STUDENT_GRADE")
@require_submission_uuid
@require_submission_uuid(validate=True)
def get_submission_and_assessment_info(self, submission_uuid, _, suffix=''): # pylint: disable=unused-argument
# TODO: Checks for if the submission we're given actually has a Workflow
submission_info = self.get_submission_info(submission_uuid)
Expand All @@ -259,9 +281,9 @@ def get_submission_info(self, submission_uuid):
}
"""
try:
submission = sub_api.get_submission(submission_uuid)
submission = get_submission(submission_uuid)
answer = OraSubmissionAnswerFactory.parse_submission_raw_answer(submission.get('answer'))
except sub_api.SubmissionError as err:
except SubmissionError as err:
raise JsonHandlerError(404, str(err)) from err
except VersionNotFoundException as err:
raise JsonHandlerError(500, str(err)) from err
Expand Down
Empty file.
61 changes: 59 additions & 2 deletions openassessment/staffgrader/tests/test_decorators.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,25 @@
"""
Tests for decorators used in Staff Grading
"""
from unittest import TestCase
from mock import Mock
from unittest.mock import patch
from uuid import uuid4

from mock import Mock
from submissions.errors import SubmissionInternalError, SubmissionNotFoundError, SubmissionRequestError
from xblock.exceptions import JsonHandlerError
from openassessment.staffgrader.staff_grader_mixin import require_submission_uuid


class RequireSubmissionUUIDTest(TestCase):
valid_data = {"submission_id": uuid4()}

def setUp(self):
super().setUp()
self.mock_self = Mock()
self.mock_function = Mock()
self.mock_suffix = Mock()
self.wrapped_function = require_submission_uuid(self.mock_function)
self.wrapped_function = require_submission_uuid()(self.mock_function)

def test_no_submission_uuid(self):
with self.assertRaises(JsonHandlerError) as error_context:
Expand All @@ -22,6 +30,8 @@ def test_no_submission_uuid(self):
self.assertEqual(error_context.exception.message, 'Body must contain a submission_id')

def test_arguments_passed(self):
self.wrapped_function = require_submission_uuid(validate=False)(self.mock_function)

data = {str(i): str((i * 2) - 1) for i in range(10)}
submission_uuid = uuid4()
data['submission_id'] = submission_uuid
Expand All @@ -30,3 +40,50 @@ def test_arguments_passed(self):

self.assertEqual(result, self.mock_function.return_value)
self.mock_function.assert_called_once_with(self.mock_self, submission_uuid, data, suffix=self.mock_suffix)

@patch('openassessment.staffgrader.staff_grader_mixin.get_submission')
def test_validate_submission(self, mock_get_submission): # pylint: disable=unused-argument
mock_get_submission.return_value = {}
submission_uuid = self.valid_data['submission_id']
result = self.wrapped_function(self.mock_self, self.valid_data, suffix=self.mock_suffix)

self.assertEqual(result, self.mock_function.return_value)
self.mock_function.assert_called_once_with(
self.mock_self,
submission_uuid,
self.valid_data,
suffix=self.mock_suffix,
)

@patch('openassessment.staffgrader.staff_grader_mixin.get_submission')
def test_validate_submission_not_found(self, mock_get_submission):
mock_get_submission.side_effect = SubmissionNotFoundError

with self.assertRaises(JsonHandlerError) as error_context:
self.wrapped_function(self.mock_self, self.valid_data)

self.mock_function.assert_not_called()
self.assertEqual(error_context.exception.status_code, 404)
self.assertEqual(error_context.exception.message, 'Submission not found')

@patch('openassessment.staffgrader.staff_grader_mixin.get_submission')
def test_validate_bad_submission_id(self, mock_get_submission):
mock_get_submission.side_effect = SubmissionRequestError

with self.assertRaises(JsonHandlerError) as error_context:
self.wrapped_function(self.mock_self, self.valid_data)

self.mock_function.assert_not_called()
self.assertEqual(error_context.exception.status_code, 400)
self.assertEqual(error_context.exception.message, 'Bad submission_uuid provided')

@patch('openassessment.staffgrader.staff_grader_mixin.get_submission')
def test_validate_submission_error(self, mock_get_submission):
mock_get_submission.side_effect = SubmissionInternalError

with self.assertRaises(JsonHandlerError) as error_context:
self.wrapped_function(self.mock_self, self.valid_data)

self.mock_function.assert_not_called()
self.assertEqual(error_context.exception.status_code, 500)
self.assertEqual(error_context.exception.message, 'Internal error getting submission info')
Original file line number Diff line number Diff line change
@@ -1,19 +1,19 @@
""" Tests for the get_submission_and_assessment_info endpoint """
from contextlib import contextmanager
from typing import OrderedDict
from mock import patch, Mock, MagicMock
from uuid import uuid4
import json
import random

from mock import patch, Mock, MagicMock
from submissions import api as sub_api
from xblock.exceptions import JsonHandlerError

from openassessment.xblock.test.base import XBlockHandlerTestCase, scenario
from openassessment.data import VersionNotFoundException
from openassessment.assessment.models.staff import StaffWorkflow
from openassessment.workflow import api as workflow_api
from submissions import api as sub_api
from xblock.exceptions import JsonHandlerError
from openassessment.tests.factories import (
AssessmentFactory, AssessmentPartFactory, CriterionOptionFactory, CriterionFactory, StaffWorkflowFactory
AssessmentFactory, AssessmentPartFactory, CriterionOptionFactory, CriterionFactory
)


Expand All @@ -39,6 +39,15 @@ def _mock_get_submission_info(self, xblock, **kwargs):
with patch.object(xblock, 'get_submission_info', **kwargs) as mocked_get:
yield mocked_get

@contextmanager
def _mock_get_submission(self, **kwargs):
""" Context manager to mock the fetching of a submission """
with patch(
'openassessment.staffgrader.staff_grader_mixin.get_submission',
**kwargs
) as mock_get:
yield mock_get

@contextmanager
def _mock_get_assessment_info(self, xblock, **kwargs):
""" Context manager to mock get_assessment_info """
Expand All @@ -55,7 +64,7 @@ def set_staff_user(self, xblock, staff_id):
xblock.xmodule_runtime.anonymous_student_id = staff_id
xblock.get_student_item_dict = Mock(return_value=self._student_item_dict(staff_id))

def request(self, xblock, submission_id, json_format=True):
def request(self, xblock, submission_id, json_format=True): # pylint: disable=arguments-differ
""" Helper to candle calling the `get_submission_and_assessment_info` handler """
if submission_id is None:
payload = {}
Expand Down Expand Up @@ -142,7 +151,10 @@ def test_handler(self, xblock):

with self._mock_get_submission_info(xblock, return_value='getSubmissionInfoResult') as mocked_get_submission:
with self._mock_get_assessment_info(xblock, return_value='getAssessmentResult') as mocked_get_assessment:
resp = self.request(xblock, submission_uuid)
# To mock the check in th validating decorator
with self._mock_get_submission(return_value=True):
resp = self.request(xblock, submission_uuid)

mocked_get_submission.assert_called_once_with(submission_uuid)
mocked_get_assessment.assert_called_once_with(submission_uuid)
self.assertDictEqual(
Expand Down Expand Up @@ -252,15 +264,6 @@ def test_handler_integration__assessment(self, xblock):
class GetSubmissionInfoTests(GetSubmissionAndAssessmentInfoBase):
""" Tests for the get_submission_info method """

@contextmanager
def _mock_get_submission(self, **kwargs):
""" Context manager to mock the fetching of a submission """
with patch(
'openassessment.staffgrader.staff_grader_mixin.sub_api.get_submission',
**kwargs
) as mock_get:
yield mock_get

@contextmanager
def _mock_parse_submission_raw_answer(self, **kwargs):
""" Context manager to mock the parsing of a raw answer into an nswer object """
Expand Down Expand Up @@ -292,7 +295,7 @@ def test_answer_version_unknown(self, xblock):
""" What happens when the raw answer we look up doesn't parse correctly? """
submission_uuid = Mock()
mock_submission = Mock()
mock_exception = VersionNotFoundException(f"No version found!!!!11")
mock_exception = VersionNotFoundException("No version found!!!!11")
with self._mock_get_submission(return_value=mock_submission) as mock_get:
with self._mock_parse_submission_raw_answer(side_effect=mock_exception) as mock_parse:
with self.assertRaises(JsonHandlerError) as error_context:
Expand Down Expand Up @@ -351,7 +354,7 @@ def test_get_submission_info__integration(self, xblock):
'files_names': ['filename-1', 'filename-2', 'filename-3'],
'files_sizes': [200, 1500, 3000],
}
submission, new_student_item = self._create_student_and_submission(student_id, test_answer)
submission, _ = self._create_student_and_submission(student_id, test_answer)

with self._mock_get_url_by_file_key(xblock):
submission_info = xblock.get_submission_info(submission['uuid'])
Expand Down
14 changes: 9 additions & 5 deletions openassessment/staffgrader/tests/test_list_staff_workflows.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
""" Tests for the Staff Workflow Listing view in the staff_grader_mixin"""
""" Tests for the Staff Workflow Listing view in the staff_grader_mixin """

from collections import namedtuple
from contextlib import contextmanager
from datetime import datetime, time, timedelta, timezone
from datetime import datetime, timedelta, timezone
import json
import random

Expand Down Expand Up @@ -58,6 +58,7 @@ def setUp(self):
@classmethod
@freeze_time(SUBMITTED_DATE)
def setUpTestData(cls):
super().setUpTestData()
cls.course_id = STUDENT_ITEM['course_id']
# Create four TestUser learners with submissions.
cls.students = [
Expand Down Expand Up @@ -331,9 +332,12 @@ def test_not_staff(self, xblock):
def test_teams(self, xblock):
self.set_staff_user(xblock)
with patch.object(xblock, 'is_team_assignment', return_value=True):
response = self.request(xblock, 'list_staff_workflows', "{}", response_format='json')
response = self.request(xblock, 'list_staff_workflows', "{}", response_format='response')
response_body = json.loads(response.body.decode('utf-8'))

self.assertEqual(response.status_code, 400)
self.assertDictEqual(
response,
response_body,
{"error": "Team Submissions not currently supported"}
)

Expand Down Expand Up @@ -441,7 +445,7 @@ def test_bulk_fetch_annotated_staff_workflows(self, xblock, set_up_grades, set_u
self.set_staff_user(xblock)

with self.assertNumQueries(1):
annotated_workflows = xblock._bulk_fetch_annotated_staff_workflows()
annotated_workflows = xblock._bulk_fetch_annotated_staff_workflows() # pylint: disable=protected-access
self.assertEqual(len(annotated_workflows), 4)

# This is a bit verbose but I thought for a unit test it would be best to be explicit about test expectations
Expand Down
Loading

0 comments on commit c6596db

Please sign in to comment.