Skip to content

Commit

Permalink
Merge 1099ee5 into 7bcb5b7
Browse files Browse the repository at this point in the history
  • Loading branch information
jolyonb committed Jul 11, 2018
2 parents 7bcb5b7 + 1099ee5 commit 3b61c65
Show file tree
Hide file tree
Showing 5 changed files with 140 additions and 24 deletions.
19 changes: 14 additions & 5 deletions mitxgraders/baseclasses.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from voluptuous import Schema, Required, All, Any, Range, MultipleInvalid
from voluptuous.humanize import validate_with_humanized_errors as voluptuous_validate
from mitxgraders.version import __version__
from mitxgraders.helpers.validatorfuncs import all_unique

class ConfigError(Exception):
"""Raised whenever a configuration error occurs"""
Expand Down Expand Up @@ -92,13 +93,15 @@ def schema_config(self):
})

@abc.abstractmethod
def check(self, answers, student_input):
def check(self, answers, student_input, **kwargs):
"""
Check student_input for correctness and provide feedback.
Arguments:
answers: The expected result(s) and grading information
student_input: The student's input passed by edX
**kwargs: Anything else that has been passed in to the grader
(used for passing dependent_inputs)
"""

def __call__(self, expect, student_input):
Expand Down Expand Up @@ -227,7 +230,9 @@ def schema_config(self):
schema = super(ItemGrader, self).schema_config
return schema.extend({
Required('answers', default=tuple()): self.schema_answers,
Required('wrong_msg', default=""): str
Required('wrong_msg', default=""): str,
# dependent_input is used by ListGrader for correlated grading
Required('dependent_input', default=[]): All([int], all_unique)
})

def schema_answers(self, answer_tuple):
Expand Down Expand Up @@ -298,14 +303,16 @@ def validate_expect(expect):
"""
return Schema(str)(expect)

def check(self, answers, student_input):
def check(self, answers, student_input, **kwargs):
"""
Compares student input to each answer in answers, using check_response.
Computes the best outcome for the student.
Arguments:
answer: A tuple of answers to compare to, or None to use internal config
student_input (str): The student's input passed by edX
**kwargs: Anything else that has been passed in to the grader
(used for passing dependent_inputs)
"""
# If no answers provided, use the internal configuration
answers = self.config['answers'] if answers is None else answers
Expand All @@ -324,7 +331,7 @@ def check(self, answers, student_input):
raise ConfigError(msg.format(type(student_input)))

# Compute the results for each answer
results = [self.check_response(answer, student_input) for answer in answers]
results = [self.check_response(answer, student_input, **kwargs) for answer in answers]

# Now find the best result for the student
best_score = max([r['grade_decimal'] for r in results])
Expand All @@ -338,7 +345,7 @@ def check(self, answers, student_input):
return best_result_with_longest_msg

@abc.abstractmethod
def check_response(self, answer, student_input):
def check_response(self, answer, student_input, **kwargs):
"""
Compares student_input against a single answer.
Differs from check, which must compare against all possible answers.
Expand All @@ -347,4 +354,6 @@ def check_response(self, answer, student_input):
Arguments:
answer (schema_answer): The answer to compare to
student_input (str): The student's input passed by edX
**kwargs: Anything else that has been passed in (used by ListGrader to
facilitate dependent_input)
"""
49 changes: 40 additions & 9 deletions mitxgraders/formulagrader.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,10 +215,10 @@ def numbered_vars_regexp(numbered_vars):
True
"""
head_list = '|'.join(map(re.escape, numbered_vars))
regexp = (r"^((" + head_list + ")" # Start and match any head (capture full string, head)
r"_{" # match _{
r"(?:[-]?[1-9]\d*|0)" # match number pattern
r"})$") # match closing }, close group, and end of string
regexp = (r"^((" + head_list + ")" # Start and match any head (capture full string, head)
r"_{" # match _{
r"(?:[-]?[1-9]\d*|0)" # match number pattern
r"})$") # match closing }, close group, and end of string
return re.compile(regexp)

def validate_no_collisions(config, keys):
Expand Down Expand Up @@ -478,7 +478,7 @@ def validate_expect(cls, expect):
"Variables:\n"
"{variables}\n"
"Student Eval: {student_eval}\n"
"Compare to: {compare_parms_eval}\n" # compare_parms_eval is list, so start 1 char earlier
"Compare to: {compare_parms_eval}\n" # compare_parms_eval is list, so start 1 char earlier
"Comparer Function: {comparer}\n"
"Comparison Satisfied: {comparer_result}\n"
""
Expand Down Expand Up @@ -527,12 +527,12 @@ def __init__(self, config=None, **kwargs):
self.config['sample_from'] = schema_sample_from(self.config['sample_from'])
# Note that voluptuous ensures that there are no orphaned entries in sample_from

def check_response(self, answer, student_input):
def check_response(self, answer, student_input, **kwargs):
"""Check the student response against a given answer"""

# Now perform the computations
try:
result, used_funcs = self.raw_check(answer, student_input)
result, used_funcs = self.raw_check(answer, student_input, **kwargs)
if result['ok'] is True or result['ok'] == 'partial':
self.post_eval_validation(student_input, used_funcs)
return result
Expand Down Expand Up @@ -586,8 +586,20 @@ def generate_variable_list(self, answer, student_input):

return variable_list, sample_from_dict

def raw_check(self, answer, student_input):
def raw_check(self, answer, student_input, **kwargs):
"""Perform the numerical check of student_input vs answer"""
# Check that all dependencies are present
if self.config["dependent_input"]:
if "dependencies" not in kwargs: # pragma: no cover
raise ConfigError("Expected dependencies in kwargs, not found")
dependencies = kwargs["dependencies"]
for i in self.config["dependent_input"]:
if i not in dependencies: # pragma: no cover
raise ConfigError("Expected dependency {} to be present, "
"but not found".format(i))
else:
dependencies = {}

# Generate samples
variable_list, sample_from_dict = self.generate_variable_list(answer,
student_input)
Expand All @@ -613,6 +625,18 @@ def raw_check(self, answer, student_input):
funclist.update(func_samples[i])
varlist.update(var_samples[i])

# Compute any dependencies
dependencies_eval = {
"input_{}".format(idx): evaluator(formula=expr,
variables=varlist,
functions=funclist,
suffixes=self.suffixes)[0]
for idx, expr in dependencies.items()
}
# This makes a dictionary of new variables "input_n" that can be used
# in computing expressions. Go and put them in the variables.
varlist.update(dependencies_eval)

# Compute expressions
comparer_params_eval = [
evaluator(formula=param,
Expand All @@ -623,6 +647,11 @@ def raw_check(self, answer, student_input):
for param in answer['expect']['comparer_params']
]

# Before performing student evaluation, scrub the dependencies
# so that students can't use them
for idx in self.config['dependent_input']:
del varlist["input_{}".format(idx)]

student_eval, used_funcs = evaluator(student_input,
variables=varlist,
functions=funclist,
Expand All @@ -632,6 +661,8 @@ def raw_check(self, answer, student_input):
# Check if expressions agree
comparer_result = comparer(comparer_params_eval, student_eval, self.comparer_utils)
if self.config['debug']:
# Put the dependencies back in for the debug output
varlist.update(dependencies_eval)
self.log_sample_info(i, varlist, funclist, student_eval,
comparer, comparer_params_eval, comparer_result)

Expand Down Expand Up @@ -662,7 +693,7 @@ def log_sample_info(self, index, varlist, funclist, student_eval,
)
self.log(re.sub(r"0x[0-9a-fA-F]+", "0x...", header))
self.log(self.debug_appendix_sample_template.format(
sample_num=index + 1, # to account for 0 index
sample_num=index + 1, # to account for 0 index
samples_total=self.config['samples'],
variables=pp.pformat(varlist),
student_eval=student_eval,
Expand Down
90 changes: 83 additions & 7 deletions mitxgraders/listgrader.py
Original file line number Diff line number Diff line change
Expand Up @@ -345,7 +345,7 @@ def validate_grouping(self):
"instead of ListGrader"
raise ConfigError(msg.format(group_idx, num_items, type(subgrader).__name__))

def check(self, answers, student_input):
def check(self, answers, student_input, **kwargs):
"""Checks student_input against answers, which may be provided"""
# If no answers provided, use the internal configuration
answers = self.config['answers'] if answers is None else answers
Expand Down Expand Up @@ -459,18 +459,43 @@ def perform_check(self, answers, student_list):
# If ordered, pass answers and inputs to the appropriate grader.
compare = zip(answers, grouped_inputs)
if self.subgrader_list:
input_list = [
self.config['subgraders'][index].check(*pair)
for index, pair in enumerate(compare)
]
# This is the only situation in which you can have correlated graders
# where one grader depends on another input, because:
# * Must have ordered graders, so inputs can be numbered and routed
# appropriately
# * Must have individual graders, because if one grader expects extra
# inputs, all of them must, and then there are circular references
# Also, this must be the lowest level of ListGrader, so there can be no
# grouping present.
# TODO: Validate that correlated graders are correctly set up on load

# Detect if corelated graders are being employed
correlated = False
if not self.grouping:
for subgrader in self.config['subgraders']:
if len(subgrader.config["dependent_input"]) > 0:
correlated = True
break

# Perform the grading
if correlated:
input_list = ListGrader.correlated_check(self.config['subgraders'],
compare)
else:
input_list = [
self.config['subgraders'][index].check(*pair)
for index, pair in enumerate(compare)
]
else:
input_list = [
self.config['subgraders'].check(*pair)
for pair in compare
]
else:
# If unordered, then there is a single subgrader. Find optimal grading.
input_list = find_optimal_order(self.config['subgraders'].check, answers, grouped_inputs)
input_list = find_optimal_order(self.config['subgraders'].check,
answers,
grouped_inputs)

# We need to restore the original order of inputs.
# At this point, input_list contains items each of which is either:
Expand Down Expand Up @@ -541,6 +566,57 @@ def get_best_result(results):
# Just return the first result in our remaining list.
return culled_results[np.where(in_the_running)[0][0]]

@staticmethod
def correlated_check(subgraders, compare_list):
"""
Grade a list that utilizes correlated graders using dependent_input
Returns a list of check results from each input, or raises a
ConfigError if circular references are detected.
"""
# Store the index of graders we have not yet evaluated
unevaluated = set(range(len(subgraders)))
results = [None for subgrader in subgraders]

# Check all inputs, following chains as necessary
while unevaluated:
remove = []
# Try each unevaluated grader
for idx in unevaluated:
grader = subgraders[idx]
# Check to see if dependencies have been evaluated yet
dependencies = {}
for num in grader.config["dependent_input"]:
# Note that our lists are 0-based, while we expect
# 1-based information from dependent_input
if num-1 in unevaluated:
break
else:
# Pull out that input
dependencies[num] = compare_list[num-1][1]
else:
if dependencies:
# All dependencies have been evaluated
# Pass them through a kwarg
results[idx] = grader.check(*compare_list[idx],
dependencies=dependencies)
else:
# No dependencies present
results[idx] = grader.check(*compare_list[idx])
remove.append(idx)

if remove:
# Take out the indices we just evaluated
for idx in remove:
unevaluated.remove(idx)
else:
# No progress made
bad_indices = sorted([x + 1 for x in unevaluated])
bad_symbols = ", ".join(map(str, bad_indices))
raise ConfigError("Circularly dependent dependent_inputs detected: " +
bad_symbols)

return results


class SingleListGrader(ItemGrader):
"""
Expand Down Expand Up @@ -657,7 +733,7 @@ def schema_answers(self, answer_tuple):

return answers_tuple

def check_response(self, answer, student_input):
def check_response(self, answer, student_input, **kwargs):
"""Check student_input against a given answer list"""
answers = answer # Rename from the ItemGrader name
student_list = student_input.split(self.config['delimiter'])
Expand Down
4 changes: 2 additions & 2 deletions mitxgraders/plugins/integralgrader.py
Original file line number Diff line number Diff line change
Expand Up @@ -336,7 +336,7 @@ def structure_and_validate_input(self, student_input):

return structured_input

def check(self, answers, student_input):
def check(self, answers, student_input, **kwargs):
"""Validates and cleans student_input, then checks response and handles errors"""
answers = self.config['answers'] if answers is None else answers
structured_input = self.structure_and_validate_input(student_input)
Expand Down Expand Up @@ -518,7 +518,7 @@ def raw_integrand(x):
errmsg = "Integrand has evaluated to complex number but must evaluate to a real."
integrand = check_output_is_real(raw_integrand, IntegrationError, errmsg)
result_re = integrate.quad(integrand, lower, upper, **self.config['integrator_options'])
result_im = (None, None, {'neval':None})
result_im = (None, None, {'neval': None})

# Restore the integration variable's initial value now that we are done integrating
if int_var_initial is not None:
Expand Down
2 changes: 1 addition & 1 deletion mitxgraders/stringgrader.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def schema_config(self):
Required('case_sensitive', default=True): bool
})

def check_response(self, answer, student_input):
def check_response(self, answer, student_input, **kwargs):
"""
Grades a student response against a given answer
Expand Down

0 comments on commit 3b61c65

Please sign in to comment.