Skip to content

Commit

Permalink
Merge a64c11a into 7fe4fa6
Browse files Browse the repository at this point in the history
  • Loading branch information
pomegranited committed Jul 28, 2016
2 parents 7fe4fa6 + a64c11a commit 67ad688
Show file tree
Hide file tree
Showing 10 changed files with 379 additions and 41 deletions.
2 changes: 0 additions & 2 deletions analytics_data_api/constants/learner.py
Original file line number Diff line number Diff line change
@@ -1,3 +1 @@
LEARNER_API_DEFAULT_LIST_PAGE_SIZE = 25

SEGMENTS = ["highly_engaged", "disengaging", "struggling", "inactive", "unenrolled"]
22 changes: 22 additions & 0 deletions analytics_data_api/renderers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
"""
API renderers common to all versions of the API.
"""
from rest_framework_csv.renderers import CSVRenderer


class PaginatedCsvRenderer(CSVRenderer):
"""
Render CSV data using just the results array.
Use with PaginatedHeadersMixin to preserve the pagination links in the response header.
"""
results_field = 'results'
media_type = 'text/csv'

def render(self, data, *args, **kwargs):
"""
Replace the rendered data with just what is in the results_field.
"""
if not isinstance(data, list):
data = data.get(self.results_field, [])
return super(PaginatedCsvRenderer, self).render(data, *args, **kwargs)
37 changes: 36 additions & 1 deletion analytics_data_api/v0/serializers.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from urlparse import urljoin
from django.conf import settings
from django.utils.datastructures import SortedDict
from rest_framework import pagination, serializers

from analytics_data_api.constants import (
Expand Down Expand Up @@ -45,6 +46,40 @@ class ModelSerializerWithCreatedField(serializers.ModelSerializer):
created = serializers.DateTimeField(format=settings.DATETIME_FORMAT)


class DynamicFieldsSerializerMixin(object):
"""
Allows the `fields` query parameter to determine which fields should be returned in the response.
"""
fields_sep = ','

def get_fields(self):
"""
Filter the list of available fields based on the list of fields passed to the request.
"""
fields = super(DynamicFieldsSerializerMixin, self).get_fields()

request = self.context.get('request')
if request:
request_fields = request.QUERY_PARAMS.get('fields')
if request_fields:
# Include only fields that are specified in the `fields` argument,
# in the order they are given.
request_fields = request_fields.split(self.fields_sep)
allowed = SortedDict()
for field_name in request_fields:
if field_name in fields:
allowed[field_name] = fields[field_name]
fields = allowed

# Set the renderer's header attribute to the sorted fields list, if relevant.
# 'header' is used by the CSVRenderer to decide what order to display fields in.
renderer = request.accepted_renderer
if renderer and hasattr(renderer, 'header'):
renderer.header = fields.keys()

return fields


class ProblemSerializer(serializers.Serializer):
"""
Serializer for problems.
Expand Down Expand Up @@ -333,7 +368,7 @@ class LastUpdatedSerializer(serializers.Serializer):
last_updated = serializers.DateField(source='date', format=settings.DATE_FORMAT)


class LearnerSerializer(serializers.Serializer, DefaultIfNoneMixin):
class LearnerSerializer(DynamicFieldsSerializerMixin, serializers.Serializer, DefaultIfNoneMixin):
username = serializers.CharField(source='username')
enrollment_mode = serializers.CharField(source='enrollment_mode')
name = serializers.CharField(source='name')
Expand Down
61 changes: 61 additions & 0 deletions analytics_data_api/v0/tests/views/__init__.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,13 @@
import json
import StringIO
import csv

from opaque_keys.edx.keys import CourseKey
from rest_framework import status

from analytics_data_api.v0.tests.utils import flatten


DEMO_COURSE_ID = u'course-v1:edX+DemoX+Demo_2014'


Expand Down Expand Up @@ -36,3 +41,59 @@ def verify_bad_course_id(self, response, course_id='malformed-course-id'):
u"developer_message": u"Course id/key {} malformed.".format(course_id)
}
self.assertDictEqual(json.loads(response.content), expected)


class VerifyCsvResponseMixin(object):

def assertCsvResponseIsValid(self, response, expected_filename, expected_data=None, expected_headers=None):

# Validate the basic response status, content type, and filename
self.assertEquals(response.status_code, 200)
if expected_data:
self.assertEquals(response['Content-Type'].split(';')[0], 'text/csv')
self.assertEquals(response['Content-Disposition'], u'attachment; filename={}'.format(expected_filename))

# Validate other response headers
if expected_headers:
for header_name, header_content in expected_headers.iteritems():
self.assertEquals(response.get(header_name), header_content)

# Validate the content data
if expected_data:
data = map(flatten, expected_data)

# The CSV renderer sorts the headers alphabetically
fieldnames = sorted(data[0].keys())

# Generate the expected CSV output
expected = StringIO.StringIO()
writer = csv.DictWriter(expected, fieldnames)
writer.writeheader()
writer.writerows(data)
self.assertEqual(response.content, expected.getvalue())
else:
self.assertEqual(response.content, '')


class VerifyDynamicFieldsMixin(object):

def assertResponseFields(self, response, fields):
content_type = response.get('Content-Type', '').split(';')[0]
if content_type == 'text/csv':
return self.assertCsvResponseFields(response, fields)
else:
return self.assertJsonResponseFields(response, fields)

def assertCsvResponseFields(self, response, fields):
data = StringIO.StringIO(response.content)
reader = csv.DictReader(data)
for row in reader:
self.assertEqual(row.keys(), fields)
self.assertEqual(reader.fieldnames, fields)

def assertJsonResponseFields(self, response, fields):
data = json.loads(response.content)
results = data.get('results')
self.assertIsNotNone(results)
for row in results:
self.assertEquals(fields, row.keys())
25 changes: 3 additions & 22 deletions analytics_data_api/v0/tests/views/test_courses.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,6 @@
# change for versions greater than 1.0.0. Tests target a specific version of the API, additional tests should be added
# for subsequent versions if there are breaking changes introduced in those versions.

import StringIO
import csv
import datetime
from itertools import groupby
import urllib
Expand All @@ -17,8 +15,7 @@
from analytics_data_api.v0 import models
from analytics_data_api.constants import country, enrollment_modes, genders
from analytics_data_api.v0.models import CourseActivityWeekly
from analytics_data_api.v0.tests.utils import flatten
from analytics_data_api.v0.tests.views import DemoCourseMixin, DEMO_COURSE_ID
from analytics_data_api.v0.tests.views import DemoCourseMixin, VerifyCsvResponseMixin, DEMO_COURSE_ID
from analyticsdataserver.tests import TestCaseWithAuthentication


Expand All @@ -37,7 +34,7 @@ def test_default_fill(self):


# pylint: disable=no-member
class CourseViewTestCaseMixin(DemoCourseMixin):
class CourseViewTestCaseMixin(DemoCourseMixin, VerifyCsvResponseMixin):
model = None
api_root_path = '/api/v0/'
path = None
Expand Down Expand Up @@ -92,24 +89,8 @@ def assertCSVIsValid(self, course_id, filename):
csv_content_type = 'text/csv'
response = self.authenticated_get(path, HTTP_ACCEPT=csv_content_type)

# Validate the basic response status, content type, and filename
self.assertEquals(response.status_code, 200)
self.assertEquals(response['Content-Type'].split(';')[0], csv_content_type)
self.assertEquals(response['Content-Disposition'], u'attachment; filename={}'.format(filename))

# Validate the actual data
data = self.format_as_response(*self.get_latest_data(course_id=course_id))
data = map(flatten, data)

# The CSV renderer sorts the headers alphabetically
fieldnames = sorted(data[0].keys())

# Generate the expected CSV output
expected = StringIO.StringIO()
writer = csv.DictWriter(expected, fieldnames)
writer.writeheader()
writer.writerows(data)
self.assertEqual(response.content, expected.getvalue())
self.assertCsvResponseIsValid(response, filename, data)

def test_get_csv(self):
""" Verify the endpoint returns data that has been properly converted to CSV. """
Expand Down
154 changes: 152 additions & 2 deletions analytics_data_api/v0/tests/views/test_learners.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,15 @@
from analyticsdataserver.tests import TestCaseWithAuthentication
from analytics_data_api.constants import engagement_events
from analytics_data_api.v0.models import ModuleEngagementMetricRanges
from analytics_data_api.v0.tests.views import DemoCourseMixin, VerifyCourseIdMixin
from analytics_data_api.v0.views import PaginatedHeadersMixin
from analytics_data_api.v0.tests.views import (
DemoCourseMixin, VerifyCourseIdMixin, VerifyCsvResponseMixin, VerifyDynamicFieldsMixin,
)


class LearnerAPITestMixin(object):
csv_filename_slug = 'learners'

"""Manages an elasticsearch index for testing the learner API."""
def setUp(self):
"""Creates the index and defines a mapping."""
Expand Down Expand Up @@ -184,7 +189,8 @@ def test_bad_course_id(self):


@ddt.ddt
class LearnerListTests(LearnerAPITestMixin, VerifyCourseIdMixin, TestCaseWithAuthentication):
class LearnerListTests(LearnerAPITestMixin, VerifyCourseIdMixin,
VerifyDynamicFieldsMixin, TestCaseWithAuthentication):
"""Tests for the learner list endpoint."""
def setUp(self):
super(LearnerListTests, self).setUp()
Expand Down Expand Up @@ -415,6 +421,34 @@ def test_pagination(self):
)
self.assert_learners_returned(response, [{'username': 'e'}])

@ddt.data(
# Note that "last_updated" always gets reported, even with fields filter.
(['username', 'cohort', 'email'],
['username', 'cohort', 'last_updated', 'email'],),
# valid fields interpersed with invalid fields
(['foo', 'username', 'last_updated', 'bar', 'email', 'name', 'baz'],
['username', 'last_updated', 'email', 'name']),
)
@ddt.unpack
def test_json_fields(self, fields, valid_fields):
""" Verify the endpoint returns only the valid requested fields, in the requested order. """

# Create learners, using a cohort name with a comma, to test escaping.
usernames = ['victor', 'olga', 'gabe', ]
commaCohort = 'Lions, Tigers, & Bears'
self.create_learners([{'username': username, 'course_id': self.course_id, 'cohort': commaCohort}
for username in usernames])

# Request list of fields, deliberately out of alphabetical order
if not valid_fields:
valid_fields = fields

# Render JSON with given fields list
response = self._get(self.course_id, fields=','.join(fields))

# Check that response contains the valid fields
self.assertResponseFields(response, valid_fields)

# Error cases
@ddt.data(
({}, 'course_not_specified'),
Expand All @@ -439,6 +473,122 @@ def test_bad_request(self, parameters, expected_error_code):
self.assertEqual(json.loads(response.content)['error_code'], expected_error_code)


@ddt.ddt
class LearnerCsvListTests(LearnerAPITestMixin, VerifyCourseIdMixin,
VerifyCsvResponseMixin, VerifyDynamicFieldsMixin,
TestCaseWithAuthentication):
"""Tests for the learner list CSV endpoint."""
def setUp(self):
super(LearnerCsvListTests, self).setUp()
self.course_id = 'edX/DemoX/Demo_Course'
self.course_csv_filename = 'edX-DemoX-Demo_Course--learners.csv'
self.create_update_index('2015-09-28')
self.path = '/api/v0/learners/'

def test_empty_csv(self):
""" Verify the endpoint returns data that has been properly converted to CSV. """
response = self.authenticated_get(
self.path,
dict(course_id=self.course_id),
True,
HTTP_ACCEPT='text/csv'
)
self.assertCsvResponseIsValid(response, self.course_csv_filename, [], {'Link': None})

def test_csv_pagination(self):
""" Verify the endpoint returns properly paginated CSV data"""

# Create learners, using a cohort name with a comma, to test escaping.
usernames = ['victor', 'olga', 'gabe', ]
commaCohort = 'Lions, Tigers, & Bears'
self.create_learners([{'username': username, 'course_id': self.course_id, 'cohort': commaCohort}
for username in usernames])

# Set last_updated index date
last_updated = '2015-09-28'
self.create_update_index(last_updated)

# Render CSV with one learner per page
page_size = 1
prev_page = None
expected_page_url_template = 'http://testserver/api/v0/learners/?' \
'{course_query}&page={page}&page_size={page_size}'
for idx, username in enumerate(sorted(usernames)):
page = idx + 1
response = self.authenticated_get(
self.path,
dict(course_id=self.course_id, page=page, page_size=page_size),
True,
HTTP_ACCEPT='text/csv'
)

# Construct expected content data
expected_data = [{
"username": username,
"enrollment_mode": 'honor',
"name": username,
"email": "{}@example.com".format(username),
"account_url": "http://lms-host/{}".format(username),
"cohort": commaCohort,
"engagements.problems_attempted": 0,
"engagements.problems_completed": 0,
"engagements.videos_viewed": 0,
"engagements.discussion_contributions": 0,
"engagements.problem_attempts_per_completed": None,
"enrollment_date": '2015-01-28',
"last_updated": last_updated,
}]

# Construct expected links header from pagination data
prev_url = None
if prev_page:
prev_url = expected_page_url_template.format(
course_query=urlencode({'course_id': self.course_id}), page=prev_page, page_size=page_size
)
next_url = None
next_page = page + 1
if next_page <= len(usernames):
next_url = expected_page_url_template.format(
course_query=urlencode({'course_id': self.course_id}), page=next_page, page_size=page_size
)
expected_links = PaginatedHeadersMixin.get_paginated_links(dict(next=next_url, previous=prev_url))

self.assertCsvResponseIsValid(response, self.course_csv_filename, expected_data, {'Link': expected_links})
prev_page = page

@ddt.data(
# fields deliberately out of alphabetical order
(['username', 'cohort', 'email'], None),
# valid fields interpersed with invalid fields
(['foo', 'username', 'bar', 'email', 'name', 'baz'],
['username', 'email', 'name']),
)
@ddt.unpack
def test_csv_fields(self, fields, valid_fields):
""" Verify the endpoint returns only the valid requested fields, in the requested order. """

# Create learners, using a cohort name with a comma, to test escaping.
usernames = ['victor', 'olga', 'gabe', ]
commaCohort = 'Lions, Tigers, & Bears'
self.create_learners([{'username': username, 'course_id': self.course_id, 'cohort': commaCohort}
for username in usernames])

# Request list of fields, deliberately out of alphabetical order
if not valid_fields:
valid_fields = fields

# Render CSV with given fields list
response = self.authenticated_get(
self.path,
dict(course_id=self.course_id, fields=','.join(fields)),
True,
HTTP_ACCEPT='text/csv'
)

# Check that response contains the valid fields
self.assertResponseFields(response, valid_fields)


@ddt.ddt
class CourseLearnerMetadataTests(DemoCourseMixin, VerifyCourseIdMixin,
LearnerAPITestMixin, TestCaseWithAuthentication):
Expand Down
Loading

0 comments on commit 67ad688

Please sign in to comment.