Skip to content

Commit

Permalink
addjsonresults doesn't call create_report_if_enough_data() for every …
Browse files Browse the repository at this point in the history
…single result. It will be called once for every exe, rev, env combination, and only after all data has been saved
  • Loading branch information
tobami committed Feb 7, 2011
1 parent 81e5b09 commit fe0d0ca
Show file tree
Hide file tree
Showing 2 changed files with 50 additions and 34 deletions.
61 changes: 36 additions & 25 deletions speedcenter/codespeed/tests.py
Expand Up @@ -54,7 +54,7 @@ def test_add_correct_result(self):

def test_add_non_default_result(self):
"""
Add result data with non-default options
Add result data with non-mandatory options
"""
modified_data = copy.deepcopy(self.data)
modified_data['result_date'] = self.cdate
Expand Down Expand Up @@ -85,9 +85,7 @@ def test_add_non_default_result(self):
self.assertEquals(res.val_min, 1)

def test_bad_environment(self):
"""
Add result associated with non-existing environment
"""
"""Should return 400 when environment does not exist"""
bad_name = 'bigdog1'
self.data['environment'] = bad_name
response = self.client.post(self.path, self.data)
Expand Down Expand Up @@ -136,7 +134,7 @@ def test_report_is_created(self):
self.assertEquals(number_of_reports, 1)


class AddJSONResultTest(TestCase):
class AddJSONResultsTest(TestCase):
def setUp(self):
self.path = reverse('speedcenter.codespeed.views.addjsonresults')
self.client = Client()
Expand All @@ -146,24 +144,32 @@ def setUp(self):
self.cdate = datetime(
temp.year, temp.month, temp.day, temp.hour, temp.minute, temp.second)

self.data = [ {'commitid': '123',
'project': 'pypy',
'executable': 'pypy-c',
'benchmark': 'Richards',
'environment': 'bigdog',
'result_value': 456,},
{'commitid': '456',
'project': 'pypy',
'executable': 'pypy-c',
'benchmark': 'Richards',
'environment': 'bigdog',
'result_value': 457,},
{'commitid': '789',
'project': 'pypy',
'executable': 'pypy-c',
'benchmark': 'Richards',
'environment': 'bigdog',
'result_value': 458,}]
self.data = [
{'commitid': '123',
'project': 'pypy',
'executable': 'pypy-c',
'benchmark': 'Richards',
'environment': 'bigdog',
'result_value': 456,},
{'commitid': '456',
'project': 'pypy',
'executable': 'pypy-c',
'benchmark': 'Richards',
'environment': 'bigdog',
'result_value': 457,},
{'commitid': '456',
'project': 'pypy',
'executable': 'pypy-c',
'benchmark': 'Richards2',
'environment': 'bigdog',
'result_value': 34,},
{'commitid': '789',
'project': 'pypy',
'executable': 'pypy-c',
'benchmark': 'Richards',
'environment': 'bigdog',
'result_value': 458,},
]

def test_add_correct_results(self):
"""Should add all results when the request data is valid"""
Expand All @@ -172,7 +178,7 @@ def test_add_correct_results(self):
# Check that we get a success response
self.assertEquals(response.status_code, 202)
self.assertEquals(response.content, "All result data saved successfully")

# Check that the data was correctly saved
e = Environment.objects.get(name='bigdog')
b = Benchmark.objects.get(name='Richards')
Expand Down Expand Up @@ -201,7 +207,7 @@ def test_add_correct_results(self):
environment=e
)
self.assertTrue(res.value, 457)

r = Revision.objects.get(commitid='789', project=p)
res = Result.objects.get(
revision=r,
Expand All @@ -211,6 +217,11 @@ def test_add_correct_results(self):
)
self.assertTrue(res.value, 458)

number_of_reports = len(Report.objects.all())
# After adding 4 result for 3 revisions, only 2 reports should be created
# The third revision will need a result for Richards2 in order to trigger report creation
self.assertEquals(number_of_reports, 1)

def test_bad_environment(self):
"""Add result associated with non-existing environment.
Only change one item in the list.
Expand Down
23 changes: 14 additions & 9 deletions speedcenter/codespeed/views.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-

from datetime import datetime
from itertools import chain
import json
Expand Down Expand Up @@ -712,7 +711,7 @@ def validate_result(item):
except Environment.DoesNotExist:
return "Environment %(environment)s not found" % item, error

def check_report(rev, exe, e):
def create_report_if_enough_data(rev, exe, e):
# Trigger Report creation when there are enough results
last_revs = Revision.objects.filter(project=rev.project).order_by('-date')[:2]
if len(last_revs) > 1:
Expand Down Expand Up @@ -777,29 +776,35 @@ def save_result(data):

r.full_clean()
r.save()
check_report(rev, exe, e)

return None, False
return (rev, exe, e), False

def addresult(request):
if request.method != 'POST':
return HttpResponseNotAllowed('POST')
data = request.POST

res, error = save_result(data)
response, error = save_result(data)
if error:
return HttpResponseBadRequest(res)
else:
return HttpResponseBadRequest(response)
else:
create_report_if_enough_data(response[0], response[1], response[2])
return HttpResponse("Result data saved succesfully", status=202)

def addjsonresults(request):
if request.method != 'POST':
return HttpResponseNotAllowed('POST')
data = json.loads(request.POST['json'])

unique_reports = set()
for result in data:
res, error = save_result(result)
response, error = save_result(result)
if error:
return HttpResponseBadRequest(res)
return HttpResponseBadRequest(response)
else:
unique_reports.add(response)

for rep in unique_reports:
create_report_if_enough_data(rep[0], rep[1], rep[2])

return HttpResponse("All result data saved successfully", status=202)

0 comments on commit fe0d0ca

Please sign in to comment.