Skip to content
This repository has been archived by the owner on Oct 21, 2018. It is now read-only.

Commit

Permalink
Added gross OMR logic for filter by cohort reports. Shoot me.
Browse files Browse the repository at this point in the history
  • Loading branch information
bufke committed Oct 25, 2013
1 parent 92523f0 commit 14d387b
Show file tree
Hide file tree
Showing 3 changed files with 37 additions and 13 deletions.
18 changes: 13 additions & 5 deletions ecwsp/omr/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,12 +61,20 @@ def enroll_students(self, students):
for student in students:
self.__enroll_student(student)

@property
def get_average(self):
def get_average(self, cohorts=None):
""" Calculate the average. Pretty fast so no caching is needed """
total_test_earned = self.testinstance_set.aggregate(total_earned=Sum('answerinstance__points_earned'))['total_earned']
total_tests_taken = self.testinstance_set.annotate(earned=Sum('answerinstance__points_earned')).filter(earned__gt=0).count()
points_possible = self.testinstance_set.all()[0].points_possible
test_instances = self.testinstance_set.all()
if not cohorts:
from ecwsp.sis.models import Cohort
cohorts = Cohorts.objects.all()

# http://stackoverflow.com/questions/4093910/django-aggregates-sums-in-postgresql-dont-use-distinct-is-this-a-bug/4917507#4917507
subquery = self.testinstance_set.filter(student__cohort__in=cohorts)
test_instances = test_instances.filter(pk__in=subquery)

total_test_earned = test_instances.aggregate(total_earned=Sum('answerinstance__points_earned'))['total_earned']
total_tests_taken = test_instances.annotate(earned=Sum('answerinstance__points_earned')).filter(earned__gt=0).distinct().count()
points_possible = test_instances.all()[0].points_possible
return float(total_test_earned) / (total_tests_taken * points_possible)

def get_percent_scoring_over(self, min_score=70):
Expand Down
32 changes: 24 additions & 8 deletions ecwsp/omr/reports.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
from ecwsp.sis.report import *
from ecwsp.sis.helper_functions import Struct
from ecwsp.sis.template_report import TemplateReport
from ecwsp.sis.models import Cohort
from ecwsp.administration.models import Template
from ecwsp.omr.models import AnswerInstance
from ecwsp.benchmarks.models import Benchmark
Expand Down Expand Up @@ -138,24 +139,39 @@ def download_results(self, test):

return report.as_download()

def download_teacher_results(self, test, format, template):
def download_teacher_results(self, test, format, template, cohorts=None):
""" Make appy based report showing results for a whole class """
if not cohorts:
cohorts = Cohort.objects.all()

# Stupid fucking hack
subquery = test.testinstance_set.filter(student__cohort__in=cohorts)

report = TemplateReport()
report.file_format = format
test_instances = test.testinstance_set.annotate(Sum('answerinstance__points_earned'))
test_instances = test.testinstance_set.filter(pk__in=subquery).annotate(Sum('answerinstance__points_earned'))
test.benchmarks = Benchmark.objects.filter(question__test=test).distinct()

points_possible = test.points_possible
points_to_earn = 0.70 * test.points_possible
number_above_70 = test_instances.filter(answerinstance__points_earned__sum__gte=points_to_earn).count()
total_test_takers = test.testinstance_set.filter(answerinstance__points_earned__gt=0).distinct().count()
number_above_70 = test_instances.filter(pk__in=subquery).filter(answerinstance__points_earned__sum__gte=points_to_earn).count()
total_test_takers = test_instances.filter(pk__in=subquery).filter(answerinstance__points_earned__gt=0).distinct().count()
test.percent_over_70 = float(number_above_70) / total_test_takers
test.report_average = test.get_average(cohorts=cohorts)

for benchmark in test.benchmarks:
question_benchmarks = test.question_set.filter(benchmarks=benchmark)
qb_subquery = test.question_set.filter(answerinstance__test_instance__student__cohort__in=cohorts)
question_benchmarks = test.question_set.filter(pk__in=qb_subquery).filter(benchmarks=benchmark).distinct()
benchmark.points_possible = question_benchmarks.aggregate(Sum('point_value'))['point_value__sum']
benchmark.total_points_possible = benchmark.points_possible * test_instances.count()
benchmark.total_points_earned = question_benchmarks.aggregate(Sum('answerinstance__points_earned'))['answerinstance__points_earned__sum']
# Really think this should work...but nope.
#benchmark.total_points_earned = question_benchmarks.aggregate(Sum('answerinstance__points_earned'))['answerinstance__points_earned__sum']
earned_sum = 0
for question_benchmark in question_benchmarks:
for answer in question_benchmark.answerinstance_set.filter(test_instance__student__cohort__in=cohorts).distinct():
earned_sum += answer.points_earned
benchmark.total_points_earned = earned_sum

benchmark.average = float(benchmark.total_points_earned) / benchmark.total_points_possible

# Percent students over 70%
Expand All @@ -181,8 +197,8 @@ def download_teacher_results(self, test, format, template):
for benchmark in question.benchmarks.all():
question.benchmark_text += '{}, '.format(benchmark.number)
question.benchmark_text = question.benchmark_text[:-2]
question.num_correct = question.answerinstance_set.filter(points_earned__gte=F('points_possible')).count()
question.num_total = question.answerinstance_set.count()
question.num_correct = question.answerinstance_set.filter(test_instance__student__cohort__in=cohorts).filter(points_earned__gte=F('points_possible')).count()
question.num_total = question.answerinstance_set.filter(test_instance__student__cohort__in=cohorts).count()
question.percent_correct = float(question.num_correct) / question.num_total


Expand Down
Binary file modified media/templates/omr_teacher_results.odt
Binary file not shown.

0 comments on commit 14d387b

Please sign in to comment.