Skip to content
This repository was archived by the owner on Mar 15, 2018. It is now read-only.

Commit d3707b5

Browse files
author
Jeff Balogh
committed
bring back .csv for stats (bug 682102)
1 parent 034c3b4 commit d3707b5

File tree

4 files changed

+156
-158
lines changed

4 files changed

+156
-158
lines changed

apps/stats/tests/test_views.py

Lines changed: 91 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
# -*- coding: utf-8 -*-
22
import csv
33
import json
4-
from datetime import datetime
54

65
from nose.tools import eq_
76

@@ -294,21 +293,28 @@ def _test_cache_control(self):
294293
'Bad or no cache-control: %r' % response.get('cache-control', ''))
295294

296295

297-
class TestJSON(StatsTest, amo.tests.ESTestCase):
296+
class TestResponses(StatsTest, amo.tests.ESTestCase):
298297
es = True
299298

300299
def setUp(self):
301-
super(TestJSON, self).setUp()
300+
super(TestResponses, self).setUp()
302301
self.index()
303302

303+
def csv_eq(self, response, expected):
304+
# Drop the first 4 lines, which contain the header comment.
305+
content = response.content.splitlines()[4:]
306+
# Strip any extra spaces from the expected content.
307+
expected = [line.strip() for line in expected.splitlines()]
308+
self.assertListEqual(content, expected)
309+
304310
def index(self):
305311
updates = UpdateCount.objects.values_list('id', flat=True)
306312
tasks.index_update_counts(list(updates))
307313
downloads = DownloadCount.objects.values_list('id', flat=True)
308314
tasks.index_download_counts(list(downloads))
309315
self.refresh('update_counts')
310316

311-
def test_usage(self):
317+
def test_usage_json(self):
312318
r = self.get_view_response('stats.usage_series', group='day',
313319
format='json')
314320
eq_(r.status_code, 200)
@@ -317,7 +323,16 @@ def test_usage(self):
317323
{'count': 1000, 'date': '2009-06-01', 'end': '2009-06-01'},
318324
])
319325

320-
def test_usage_by_app(self):
326+
def test_usage_csv(self):
327+
r = self.get_view_response('stats.usage_series', group='day',
328+
format='csv')
329+
eq_(r.status_code, 200)
330+
self.csv_eq(r,
331+
"""date,count
332+
2009-06-02,1500
333+
2009-06-01,1000""")
334+
335+
def test_usage_by_app_json(self):
321336
r = self.get_view_response('stats.apps_series', group='day',
322337
format='json')
323338
eq_(r.status_code, 200)
@@ -340,7 +355,15 @@ def test_usage_by_app(self):
340355
}
341356
])
342357

343-
def test_usage_by_locale(self):
358+
def test_usage_by_app_csv(self):
359+
r = self.get_view_response('stats.apps_series', group='day',
360+
format='csv')
361+
eq_(r.status_code, 200)
362+
self.csv_eq(r, """date,count,{ec8030f7-c20a-464f-9b0e-13a3a9e97384}
363+
2009-06-02,1500,{u'4.0': 1500}
364+
2009-06-01,1000,{u'4.0': 1000}""")
365+
366+
def test_usage_by_locale_json(self):
344367
r = self.get_view_response('stats.locales_series', group='day',
345368
format='json')
346369
eq_(r.status_code, 200)
@@ -365,7 +388,15 @@ def test_usage_by_locale(self):
365388
}
366389
])
367390

368-
def test_usage_by_os(self):
391+
def test_usage_by_locale_csv(self):
392+
r = self.get_view_response('stats.locales_series', group='day',
393+
format='csv')
394+
eq_(r.status_code, 200)
395+
self.csv_eq(r, """date,count,English (US) (en-us),Ελληνικά (el)
396+
2009-06-02,1500,300,400
397+
2009-06-01,1000,300,400""")
398+
399+
def test_usage_by_os_json(self):
369400
r = self.get_view_response('stats.os_series', group='day',
370401
format='json')
371402
eq_(r.status_code, 200)
@@ -390,7 +421,12 @@ def test_usage_by_os(self):
390421
}
391422
])
392423

393-
def test_usage_by_version(self):
424+
def test_usage_by_os_csv(self):
425+
r = self.get_view_response('stats.os_series', group='day',
426+
format='csv')
427+
eq_(r.status_code, 200)
428+
429+
def test_usage_by_version_json(self):
394430
r = self.get_view_response('stats.versions_series', group='day',
395431
format='json')
396432
eq_(r.status_code, 200)
@@ -415,7 +451,15 @@ def test_usage_by_version(self):
415451
}
416452
])
417453

418-
def test_usage_by_status(self):
454+
def test_usage_by_version_csv(self):
455+
r = self.get_view_response('stats.versions_series', group='day',
456+
format='csv')
457+
eq_(r.status_code, 200)
458+
self.csv_eq(r, """date,count,2.0,1.0
459+
2009-06-02,1500,950,550
460+
2009-06-01,1000,800,200""")
461+
462+
def test_usage_by_status_json(self):
419463
r = self.get_view_response('stats.statuses_series', group='day',
420464
format='json')
421465
eq_(r.status_code, 200)
@@ -440,6 +484,14 @@ def test_usage_by_status(self):
440484
}
441485
])
442486

487+
def test_usage_by_status_csv(self):
488+
r = self.get_view_response('stats.statuses_series', group='day',
489+
format='csv')
490+
eq_(r.status_code, 200)
491+
self.csv_eq(r, """date,count,userEnabled,userDisabled
492+
2009-06-02,1500,1370,130
493+
2009-06-01,1000,950,50""")
494+
443495
def test_overview(self):
444496
r = self.get_view_response('stats.overview_series', group='day',
445497
format='json')
@@ -523,8 +575,7 @@ def test_overview(self):
523575
{'downloads': 0, 'updates': 0})
524576
next_actual = next(actual)
525577

526-
527-
def test_downloads(self):
578+
def test_downloads_json(self):
528579
r = self.get_view_response('stats.downloads_series', group='day',
529580
format='json')
530581
eq_(r.status_code, 200)
@@ -539,7 +590,21 @@ def test_downloads(self):
539590
{"count": 10, "date": "2009-06-01", "end": "2009-06-01"},
540591
])
541592

542-
def test_downloads_sources(self):
593+
def test_downloads_csv(self):
594+
r = self.get_view_response('stats.downloads_series', group='day',
595+
format='csv')
596+
eq_(r.status_code, 200)
597+
self.csv_eq(r, """date,count
598+
2009-09-03,10
599+
2009-08-03,10
600+
2009-07-03,10
601+
2009-06-28,10
602+
2009-06-20,10
603+
2009-06-12,10
604+
2009-06-07,10
605+
2009-06-01,10""")
606+
607+
def test_downloads_sources_json(self):
543608
r = self.get_view_response('stats.sources_series', group='day',
544609
format='json')
545610
eq_(r.status_code, 200)
@@ -585,3 +650,17 @@ def test_downloads_sources(self):
585650
"data": {"api": 2, "search": 3}
586651
}
587652
])
653+
654+
def test_downloads_sources_csv(self):
655+
r = self.get_view_response('stats.sources_series', group='day',
656+
format='csv')
657+
eq_(r.status_code, 200)
658+
self.csv_eq(r, """date,count,search,api
659+
2009-09-03,10,3,2
660+
2009-08-03,10,3,2
661+
2009-07-03,10,3,2
662+
2009-06-28,10,3,2
663+
2009-06-20,10,3,2
664+
2009-06-12,10,3,2
665+
2009-06-07,10,3,2
666+
2009-06-01,10,3,2""")

apps/stats/unicode_csv.py

Lines changed: 0 additions & 65 deletions
This file was deleted.

apps/stats/utils.py

Lines changed: 0 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -31,60 +31,6 @@ def csv_prep(stats, field_list, precision='1'):
3131
return (stats, fields)
3232

3333

34-
def csv_dynamic_prep(stats, queryset, field_list, total_key, dynamic_key):
35-
"""Prepare dynamic stats for CSV output.
36-
37-
This is suitable for stats containing breakdown values.
38-
All Decimal values will be rounded and converted to integers.
39-
40-
Returns a tuple containing a row generator and a list of field
41-
names suitable for the CSV header.
42-
"""
43-
if not queryset:
44-
return ([], [])
45-
46-
# Summarize entire queryset to get all dynamic field names and
47-
# determine if we need to calculate 'unknown' values.
48-
totals = queryset.summary(**dict(field_list))
49-
50-
# Since there may be averages in play, round all decimals to integers
51-
totals = list(decimal_to_int_gen([totals]))[0]
52-
stats = decimal_to_int_gen(stats)
53-
54-
# Perform 'unknown' calculations if there is a difference between
55-
# dynamic field total and grand total.
56-
dyn_sum = totals[dynamic_key].sum_reduce()
57-
if dyn_sum < totals[total_key]:
58-
totals[dynamic_key]['unknown'] = totals[total_key] - dyn_sum
59-
stats = unknown_gen(stats, total_key, dynamic_key)
60-
61-
# Flatten the nested dynamic dictionary, grab all keys and sort them
62-
# by their values.
63-
d = flatten_dict({dynamic_key: totals[dynamic_key]})
64-
dyn_keys = sorted(d, key=d.__getitem__, reverse=True)
65-
66-
# Build the final list of field keys, replacing dynamic_key with all its
67-
# breakdown fields in dyn_keys.
68-
dyn_index = zip(*field_list)[0].index(dynamic_key)
69-
fields = [k for k, v in field_list[:dyn_index]]
70-
fields.extend(k for k in dyn_keys)
71-
fields.extend(k for k, v in field_list[dyn_index + 1:])
72-
73-
# For CSV headers, we trim the "dynamic_key/" portion from the start of all
74-
# the dyn_keys.
75-
headings = [k for k, v in field_list[:dyn_index]]
76-
headings.extend(k[len(dynamic_key) + 1:] for k in dyn_keys)
77-
headings.extend(k for k, v in field_list[dyn_index + 1:])
78-
79-
# Almost done...
80-
# For each row: flatten the dynamic field dictionary, and turn the row
81-
# into a list of values.
82-
stats = flatten_gen(stats, flatten_key=dynamic_key)
83-
stats = values_gen(stats, fields, zero_val=0)
84-
85-
return (stats, headings)
86-
87-
8834
def flatten_dict(d, key=None):
8935
"""Flatten a nested dictionary.
9036

0 commit comments

Comments
 (0)