Skip to content

Commit

Permalink
Let it use total score ifere is too many tests
Browse files Browse the repository at this point in the history
  • Loading branch information
aben20807 committed Sep 19, 2021
1 parent e4f21bf commit 5bb1216
Show file tree
Hide file tree
Showing 5 changed files with 26 additions and 15 deletions.
1 change: 1 addition & 0 deletions examples/student/correct/judge.conf
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,5 @@ DeleteTempOutput = true

ExitOrLog = exit
ScoreDict = {"0":"0","1":"30","2":"60","3":"90","4":"100"}
TotalScore = 100
Timeout = 10
1 change: 1 addition & 0 deletions examples/student/wrong/judge.conf
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,5 @@ DeleteTempOutput = true

ExitOrLog = exit
ScoreDict = {"0":"0","1":"30","2":"60","3":"90","4":"100"}
TotalScore = 100
Timeout = 10
1 change: 1 addition & 0 deletions examples/ta/ta_judge.conf
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ AnswerDir = ./judge_resources/answer
AnswerExtension = .out
ExitOrLog = exit
ScoreDict = {"0":"0","1":"30","2":"60","3":"80","4":"90","5":"100"}
TotalScore = 100
Timeout = 10

[TaConfig]
Expand Down
27 changes: 19 additions & 8 deletions judge/judge.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
SOFTWARE.
"""

__version__ = "2.2.2"
__version__ = "2.3.0"

import sys

Expand Down Expand Up @@ -143,7 +143,6 @@ def __init__(self, config, error_handler):
self._ans_dir = self._config["AnswerDir"]
self._ans_ext = self._config["AnswerExtension"]
self.timeout = self._config["Timeout"]
self.score_dict = json.loads(self._config["ScoreDict"])
self.error_handler = error_handler
# tests contains corresponding input and answer path
self.tests = self.inputs_to_tests(self._config["Inputs"])
Expand Down Expand Up @@ -329,8 +328,9 @@ def compare(


class Report:
def __init__(self, report_verbose=0, score_dict=None):
def __init__(self, report_verbose=0, score_dict=None, total_score=0):
self.score_dict = score_dict
self.total_score = total_score
self.report_verbose = report_verbose
self.table = []

Expand Down Expand Up @@ -371,15 +371,22 @@ def print_report(self):
print(dash)
print(row["diff"])
print(doubledash)

# The test which ends with "hide" will not be count to calculate the score.
correct_cnt = [
row["accept"] for row in self.table if not row["test"].endswith("hide")
].count(True)
obtained_score = self.get_score_by_correct_cnt(correct_cnt)
valid_test_number = len(
[test for test in tests if not test.endswith("hide")]
) # not to count hide test case
total_score = int(self.score_dict[str(valid_test_number)])
total_score = 0
obtained_score = 0
try: # try to use score_dict first
total_score = int(self.score_dict[str(valid_test_number)])
obtained_score = self.get_score_by_correct_cnt(correct_cnt)
except KeyError: # if the number of tests out of range, use total_score
total_score = self.total_score
obtained_score = int(correct_cnt / len(tests) * total_score)
print(
f"Correct/Total problems:\t{correct_cnt}/{valid_test_number}\n"
f"Obtained/Total scores:\t{obtained_score}/{total_score}"
Expand Down Expand Up @@ -422,15 +429,17 @@ def get_args():
return parser.parse_args()


def judge_all_tests(judge: LocalJudge, verbose_level, score_dict):
def judge_all_tests(judge: LocalJudge, verbose_level, score_dict, total_score):
"""Judge all tests for given program.
If `--input` is set, there is only one input in this judgement.
"""

judge.build()

report = Report(report_verbose=verbose_level, score_dict=score_dict)
report = Report(
report_verbose=verbose_level, score_dict=score_dict, total_score=total_score
)
for test in judge.tests:
returncode, output_filepath = judge.run(test.input_filepath)
accept, diff = judge.compare(output_filepath, test.answer_filepath, returncode)
Expand Down Expand Up @@ -493,5 +502,7 @@ def copy_output_to_dir(judge: LocalJudge, output_dir, delete_temp_output, ans_ex
exit(returncode)

score_dict = json.loads(config["Config"]["ScoreDict"])
returncode = judge_all_tests(judge, args.verbose, score_dict)
# total_score will be used when the number of tests out of score_dict
total_score = json.loads(config["Config"]["TotalScore"])
returncode = judge_all_tests(judge, args.verbose, score_dict, total_score)
exit(returncode)
11 changes: 4 additions & 7 deletions judge/ta_judge.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,6 @@
+ "Please use Python 3"
)

from judge import ErrorHandler
from judge import LocalJudge
import argparse
import configparser
import os
Expand All @@ -51,7 +49,6 @@
import multiprocessing
import signal
import time
from judge import Report

Student = namedtuple("Student", ("id", "zip_type", "zip_path", "extract_path"))

Expand Down Expand Up @@ -155,7 +152,7 @@ def append_log_msg(ori_result, log_msg):


def judge_one_student(
student, all_student_results, tj: TaJudge, lj: LocalJudge, skip_report=False
student, all_student_results, tj: TaJudge, lj: judge.LocalJudge, skip_report=False
):
"""Judge one student and return the correctness result."""
lj.error_handler.init_student(student.id)
Expand Down Expand Up @@ -297,9 +294,9 @@ def setup():
"format": "%(asctime)-15s [%(levelname)s] %(message)s",
}

eh = ErrorHandler(ta_config["Config"]["ExitOrLog"], **logging_config)
eh = judge.ErrorHandler(ta_config["Config"]["ExitOrLog"], **logging_config)
tj = TaJudge(ta_config["TaConfig"])
lj = LocalJudge(ta_config["Config"], eh)
lj = judge.LocalJudge(ta_config["Config"], eh)

if not args.student is None:
# Assign specific student for this judgement and report to screen
Expand All @@ -317,7 +314,7 @@ def setup():
res_dict = judge_one_student(student, None, tj, lj, False)
report_table = res_dict["report_table"]

report = Report(report_verbose=args.verbose, score_dict=lj.score_dict)
report = judge.Report(report_verbose=args.verbose, score_dict=lj.score_dict)
report.table = report_table
report.print_report()

Expand Down

0 comments on commit 5bb1216

Please sign in to comment.