Skip to content

Commit

Permalink
Merge pull request #2029 from johnbumgardner/mapping-level
Browse files Browse the repository at this point in the history
Grade refactoring
  • Loading branch information
nnhimes committed Aug 31, 2021
2 parents fcc760a + a3f1a4d commit c7573ce
Show file tree
Hide file tree
Showing 17 changed files with 207 additions and 209 deletions.
4 changes: 2 additions & 2 deletions app/controllers/bookmarks_controller.rb
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def specific_average_score(bookmark)
reviewed_object_id: assessment.id,
reviewee_id: bookmark.id,
reviewer_id: AssignmentParticipant.find_by(user_id: current_user.id).id).flat_map {|r| Response.where(map_id: r.id) }
score = Answer.assessment_score(response: responses, questions: questions)
score = Response.assessment_score(response: responses, questions: questions)
if score.nil?
return '-'
else
Expand All @@ -105,7 +105,7 @@ def total_average_score(bookmark)
responses = BookmarkRatingResponseMap.where(
reviewed_object_id: assessment.id,
reviewee_id: bookmark.id).flat_map {|r| Response.where(map_id: r.id) }
totalScore = Answer.compute_scores(responses, questions)
totalScore = Response.compute_scores(responses, questions)
if totalScore[:avg].nil?
return '-'
else
Expand Down
2 changes: 1 addition & 1 deletion app/helpers/grades_helper.rb
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def get_accordion_title(last_topic, new_topic)
def score_vector(reviews, symbol)
scores = []
reviews.each do |review|
scores << Answer.assessment_score(response: [review], questions: @questions[symbol.to_sym], q_types: [])
scores << Response.assessment_score(response: [review], questions: @questions[symbol.to_sym], q_types: [])
end
scores
end
Expand Down
86 changes: 0 additions & 86 deletions app/models/answer.rb
Original file line number Diff line number Diff line change
Expand Up @@ -5,89 +5,6 @@ class Answer < ActiveRecord::Base
belongs_to :question
belongs_to :response

# Computes the total score for a *list of assessments*
# parameters
# assessments - a list of assessments of some type (e.g., author feedback, teammate review)
# questions - the list of questions that was filled out in the process of doing those assessments
def self.compute_scores(assessments, questions)
scores = {}
if assessments.present?
scores[:max] = -999_999_999
scores[:min] = 999_999_999
total_score = 0
length_of_assessments = assessments.length.to_f
assessments.each do |assessment|
curr_score = assessment_score(response: [assessment], questions: questions)

scores[:max] = curr_score if curr_score > scores[:max]
scores[:min] = curr_score unless curr_score >= scores[:min] || curr_score == -1

# Check if the review is invalid. If is not valid do not include in score calculation
if @invalid == 1 or curr_score == -1
length_of_assessments -= 1
curr_score = 0
end
total_score += curr_score
end
scores[:avg] = unless length_of_assessments.zero?
total_score.to_f / length_of_assessments
else
0
end
else
scores[:max] = nil
scores[:min] = nil
scores[:avg] = nil
end

scores
end

# Computes the total score for an assessment
# params
# assessment - specifies the assessment for which the total score is being calculated
# questions - specifies the list of questions being evaluated in the assessment

def self.assessment_score(params)
@response = params[:response].last
if @response
@questions = params[:questions]

weighted_score = 0
sum_of_weights = 0
max_question_score = 0

@questionnaire = Questionnaire.find(@questions.first.questionnaire_id)

questionnaireData = ScoreView.find_by_sql ["SELECT q1_max_question_score ,SUM(question_weight) as sum_of_weights,SUM(question_weight * s_score) as weighted_score FROM score_views WHERE type in('Criterion', 'Scale') AND q1_id = ? AND s_response_id = ?", @questions[0].questionnaire_id, @response.id]
# zhewei: we should check whether weighted_score is nil,
# which means student did not assign any score before save the peer review.
# If we do not check here, to_f method will convert nil to 0, at that time, we cannot figure out the reason behind 0 point,
# whether is reviewer assign all questions 0 or reviewer did not finish any thing and save directly.
weighted_score = (questionnaireData[0].weighted_score.to_f unless questionnaireData[0].weighted_score.nil?)
sum_of_weights = questionnaireData[0].sum_of_weights.to_f
# Zhewei: we need add questions' weights only their answers are not nil in DB.
all_answers_for_curr_response = Answer.where(response_id: @response.id)
all_answers_for_curr_response.each do |answer|
question = Question.find(answer.question_id)
# if a questions is a scored question (criterion or scale), the weight cannot be null.
# Answer.answer is nil indicates that this scored questions is not filled. Therefore the score of this question is ignored and not counted
# towards the score for this response.
if answer.answer.nil? && question.is_a?(ScoredQuestion)
question_weight = Question.find(answer.question_id).weight
sum_of_weights -= question_weight
end
end
max_question_score = questionnaireData[0].q1_max_question_score.to_f
unless sum_of_weights <= 0 || !max_question_score || weighted_score.nil?
return (weighted_score / (sum_of_weights * max_question_score)) * 100
else
return -1.0 # indicating no score
end
end
end

# start added by ferry, required for the summarization (refactored by Yang on June 22, 2016)
def self.answers_by_question_for_reviewee_in_round(assignment_id, reviewee_id, q_id, round)
# get all answers to this question
question_answer = Answer.select(:answer, :comments)
Expand Down Expand Up @@ -120,13 +37,10 @@ def self.answers_by_question_for_reviewee(assignment_id, reviewee_id, q_id)
answers.question_id = ? ", assignment_id, reviewee_id, q_id)
question_answers
end
# end added by ferry, required for the summarization

# start added by ferry for answer tagging
def get_reviewee_from_answer(answer)
resp = Response.find(answer.response_id)
map = ResponseMap.find(resp.map_id)
map.reviewee_id
end
# end added by ferry for answer tagging
end
4 changes: 2 additions & 2 deletions app/models/assignment.rb
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ def scores(questions)
scores[:teams][index.to_s.to_sym][:scores] = merge_grades_by_rounds(grades_by_rounds, total_num_of_assessments, total_score)
else
assessments = ReviewResponseMap.get_assessments_for(team)
scores[:teams][index.to_s.to_sym][:scores] = Answer.compute_scores(assessments, questions[:review])
scores[:teams][index.to_s.to_sym][:scores] = Response.compute_scores(assessments, questions[:review])
end
index += 1
end
Expand Down Expand Up @@ -614,7 +614,7 @@ def compute_grades_by_rounds(questions, team)
(1..self.num_review_rounds).each do |i|
assessments = ReviewResponseMap.get_responses_for_team_round(team, i)
round_sym = ("review" + i.to_s).to_sym
grades_by_rounds[round_sym] = Answer.compute_scores(assessments, questions[round_sym])
grades_by_rounds[round_sym] = Response.compute_scores(assessments, questions[round_sym])
total_num_of_assessments += assessments.size
total_score += grades_by_rounds[round_sym][:avg] * assessments.size.to_f unless grades_by_rounds[round_sym][:avg].nil?
end
Expand Down
4 changes: 2 additions & 2 deletions app/models/assignment_participant.rb
Original file line number Diff line number Diff line change
Expand Up @@ -84,8 +84,8 @@ def compute_assignment_score(questions, scores)
else
questionnaire.get_assessments_round_for(self, round)
end
# Anser.compute_scores computes the total score for a *list of assessments*
scores[questionnaire_symbol][:scores] = Answer.compute_scores(scores[questionnaire_symbol][:assessments], questions[questionnaire_symbol])
# Response.compute_scores computes the total score for a list of responses to a questionnaire
scores[questionnaire_symbol][:scores] = Response.compute_scores(scores[questionnaire_symbol][:assessments], questions[questionnaire_symbol])
end
end

Expand Down
2 changes: 1 addition & 1 deletion app/models/assignment_team.rb
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ def scores(questions)
assignment.questionnaires.each do |questionnaire|
scores[questionnaire.symbol] = {}
scores[questionnaire.symbol][:assessments] = ReviewResponseMap.where(reviewee_id: self.id)
scores[questionnaire.symbol][:scores] = Answer.compute_scores(scores[questionnaire.symbol][:assessments], questions[questionnaire.symbol])
scores[questionnaire.symbol][:scores] = Response.compute_scores(scores[questionnaire.symbol][:assessments], questions[questionnaire.symbol])
end
scores[:total_score] = assignment.compute_total_score(scores)
scores
Expand Down
6 changes: 3 additions & 3 deletions app/models/on_the_fly_calc.rb
Original file line number Diff line number Diff line change
Expand Up @@ -32,15 +32,15 @@ def compute_avg_and_ranges_hash
assessments = assessments.select {|assessment| assessment.round == round }
scores[contributor.id] = {} if round == 1
scores[contributor.id][round] = {}
scores[contributor.id][round] = Answer.compute_scores(assessments, questions)
scores[contributor.id][round] = Response.compute_scores(assessments, questions)
end
end
else
contributors.each do |contributor|
questions = peer_review_questions_for_team(contributor)
assessments = ReviewResponseMap.get_assessments_for(contributor)
scores[contributor.id] = {}
scores[contributor.id] = Answer.compute_scores(assessments, questions)
scores[contributor.id] = Response.compute_scores(assessments, questions)
end
end
scores
Expand All @@ -58,7 +58,7 @@ def peer_review_questions_for_team(team, round_number = nil)

def calc_review_score
unless @corresponding_response.empty?
@this_review_score_raw = Answer.assessment_score(response: @corresponding_response, questions: @questions)
@this_review_score_raw = Response.assessment_score(response: @corresponding_response, questions: @questions)
if @this_review_score_raw
@this_review_score = ((@this_review_score_raw * 100) / 100.0).round if @this_review_score_raw >= 0.0
end
Expand Down
77 changes: 76 additions & 1 deletion app/models/response.rb
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def populate_new_response(response_map, current_round)

most_recent_submission_by_reviewee = reviewee_team.most_recent_submission if reviewee_team

if response.nil? || (most_recent_submission_by_reviewee and most_recent_submission_by_reviewee.updated_at > response.updated_at)
if response.nil? || (most_recent_submission_by_reviewee && most_recent_submission_by_reviewee.updated_at > response.updated_at)
response = Response.create(map_id: response_map.id, additional_comment: '', round: current_round, is_submitted: 0)
end
response
Expand Down Expand Up @@ -319,4 +319,79 @@ def add_table_rows questionnaire_max, questions, answers, code, tag_prompt_deplo
end
code
end

# Computes the total score for a *list of assessments*
# parameters
# assessments - a list of assessments of some type (e.g., author feedback, teammate review)
# questions - the list of questions that was filled out in the process of doing those assessments
def self.compute_scores(assessments, questions)
scores = {}
if assessments.present?
scores[:max] = -999_999_999
scores[:min] = 999_999_999
total_score = 0
length_of_assessments = assessments.length.to_f
assessments.each do |assessment|
curr_score = assessment_score(response: [assessment], questions: questions)

scores[:max] = curr_score if curr_score > scores[:max]
scores[:min] = curr_score unless curr_score >= scores[:min] || curr_score == -1

# Check if the review is invalid. If is not valid do not include in score calculation
if @invalid == 1 || curr_score == -1
length_of_assessments -= 1
curr_score = 0
end
total_score += curr_score
end
scores[:avg] = unless length_of_assessments.zero?
total_score.to_f / length_of_assessments
else
0
end
else
scores[:max] = nil
scores[:min] = nil
scores[:avg] = nil
end

scores
end

# Computes the total score for an assessment
# params
# assessment - specifies the assessment for which the total score is being calculated
# questions - specifies the list of questions being evaluated in the assessment

def self.assessment_score(params)
@response = params[:response].last
return -1.0 if @response.nil?
if @response
@questions = params[:questions]
return -1.0 if @questions.nil?
weighted_score = 0
sum_of_weights = 0
max_question_score = 0

@questionnaire = Questionnaire.find(@questions.first.questionnaire_id)

# Retrieve data for questionnaire (max score, sum of scores, weighted scores, etc.)
questionnaire_data = ScoreView.questionnaire_data(@questions[0].questionnaire_id, @response.id)
weighted_score = questionnaire_data.weighted_score.to_f unless questionnaire_data.weighted_score.nil?
sum_of_weights = questionnaire_data.sum_of_weights.to_f
answers = Answer.where(response_id: @response.id)
answers.each do |answer|
question = Question.find(answer.question_id)
if answer.answer.nil? && question.is_a?(ScoredQuestion)
sum_of_weights -= Question.find(answer.question_id).weight
end
end
max_question_score = questionnaire_data.q1_max_question_score.to_f
if sum_of_weights > 0 && max_question_score && weighted_score > 0
return (weighted_score / (sum_of_weights * max_question_score)) * 100
else
return -1.0 # indicating no score
end
end
end
end
5 changes: 5 additions & 0 deletions app/models/score_view.rb
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,9 @@ class ScoreView < ActiveRecord::Base
def readonly?
false
end

def self.questionnaire_data(questionnaire_id, response_id)
questionnaire_data = ScoreView.find_by_sql ["SELECT q1_max_question_score ,SUM(question_weight) as sum_of_weights,SUM(question_weight * s_score) as weighted_score FROM score_views WHERE type in('Criterion', 'Scale') AND q1_id = ? AND s_response_id = ?", questionnaire_id, response_id]
questionnaire_data[0]
end
end
2 changes: 1 addition & 1 deletion app/views/grades/_review_table.html.erb
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@

<% for review in reviews %>
<td align="center">
<% score = Answer.assessment_score(:response => [review], :questions => @questions[symbol], :q_types => []) %>
<% score = Response.assessment_score(:response => [review], :questions => @questions[symbol], :q_types => []) %>
<% if controller.action_name != "view_my_scores" %>
<input type="hidden" class="form-control" id="mailer_<%= review.map.reviewer.fullname(session[:ip]) %>_grade" name="mailer[<%= review.map.reviewer.fullname(session[:ip]) %>_grade]" value="<%= score %>">
<% end %>
Expand Down
4 changes: 2 additions & 2 deletions spec/helpers/grades_helper_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@

describe 'score_vector' do
it 'should return the scores from the questions in a vector' do
allow(Answer).to receive(:assessment_score).with(response: [review_response], questions: [question], q_types: []).and_return(75)
allow(Response).to receive(:assessment_score).with(response: [review_response], questions: [question], q_types: []).and_return(75)
@questions = {:s => [question]}
expect(score_vector([review_response, review_response], 's')).to eq([75,75])
end
Expand All @@ -50,7 +50,7 @@
symbol = :s
@grades_bar_charts = {:s => nil}
@participant_score = {symbol => {:assessments => [review_response, review_response]}}
allow(Answer).to receive(:assessment_score).with(response: [review_response], questions: [question], q_types: []).and_return(75)
allow(Response).to receive(:assessment_score).with(response: [review_response], questions: [question], q_types: []).and_return(75)
allow(GradesController).to receive(:bar_chart).with([75,75]).and_return(
'http://chart.apis.google.com/chart?chs=800x200&cht=bvg&chco=0000ff,ff0000,00ff00&chd=s:yoeKey,KUeoy9,9yooy9&chdl=Trend+1|Trend+2|Trend+3&chtt=Bar+Chart'
)
Expand Down

0 comments on commit c7573ce

Please sign in to comment.