Skip to content

Commit

Permalink
Merge ScoringService into EvaluationService
Browse files Browse the repository at this point in the history
  • Loading branch information
stefano-maggiolo committed Mar 23, 2015
1 parent 1fafeb2 commit 2fb9175
Show file tree
Hide file tree
Showing 17 changed files with 72 additions and 500 deletions.
19 changes: 17 additions & 2 deletions cms/db/submission.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

# Contest Management System - http://cms-dev.github.io/
# Copyright © 2010-2012 Giovanni Mascellani <mascellani@poisson.phc.unipi.it>
# Copyright © 2010-2014 Stefano Maggiolo <s.maggiolo@gmail.com>
# Copyright © 2010-2015 Stefano Maggiolo <s.maggiolo@gmail.com>
# Copyright © 2010-2012 Matteo Boscariol <boscarim@hotmail.com>
# Copyright © 2012-2013 Luca Wehrstedt <luca.wehrstedt@gmail.com>
# Copyright © 2013 Bernard Blackham <bernard@largestprime.net>
Expand Down Expand Up @@ -322,7 +322,7 @@ class SubmissionResult(Base):
nullable=False,
default=0)

# Score as computed by ScoringService. Null means not yet scored.
# Score computed by EvaluationService. Null means not yet scored.
score = Column(
Float,
nullable=True)
Expand Down Expand Up @@ -380,6 +380,21 @@ def get_evaluation(self, testcase):
.filter(Evaluation.testcase == testcase)\
.first()

def compute_score(self, score_type):
"""Compute the score of this submission result and save the results in
the fields of the object.
score_type (ScoreType): the scoretype of the task containing
the submission.
"""
score = score_type.compute_score(self)
self.score = score[0]
self.score_details = score[1]
self.public_score = score[2]
self.public_score_details = score[3]
self.ranking_score_details = score[4]

def compiled(self):
"""Return whether the submission result has been compiled.
Expand Down
6 changes: 0 additions & 6 deletions cms/server/AdminWebServer.py
Original file line number Diff line number Diff line change
Expand Up @@ -474,8 +474,6 @@ def __init__(self, shard):
self.file_cacher = FileCacher(self)
self.evaluation_service = self.connect_to(
ServiceCoord("EvaluationService", 0))
self.scoring_service = self.connect_to(
ServiceCoord("ScoringService", 0))

ranking_enabled = len(config.rankings) > 0
self.proxy_service = self.connect_to(
Expand Down Expand Up @@ -1111,8 +1109,6 @@ def post(self, dataset_id):
# unloved, but are now part of an autojudged taskset.
self.application.service\
.evaluation_service.search_operations_not_done()
self.application.service\
.scoring_service.search_operations_not_done()

# Now send notifications to contestants.
datetime = make_datetime()
Expand Down Expand Up @@ -1157,8 +1153,6 @@ def get(self, dataset_id):
# unloved, but are now part of an autojudged taskset.
self.application.service\
.evaluation_service.search_operations_not_done()
self.application.service\
.scoring_service.search_operations_not_done()

self.redirect("/task/%s" % dataset.task_id)

Expand Down
2 changes: 0 additions & 2 deletions cms/server/ContestWebServer.py
Original file line number Diff line number Diff line change
Expand Up @@ -452,8 +452,6 @@ def __init__(self, shard, contest):
self.file_cacher = FileCacher(self)
self.evaluation_service = self.connect_to(
ServiceCoord("EvaluationService", 0))
self.scoring_service = self.connect_to(
ServiceCoord("ScoringService", 0))

ranking_enabled = len(config.rankings) > 0
self.proxy_service = self.connect_to(
Expand Down
4 changes: 2 additions & 2 deletions cms/server/templates/admin/reevaluation_buttons.html
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,13 @@
);"
title="Evaluation" >E</button>
<button onclick="cmsrpc_request('{{ url_root }}',
'ScoringService', 0,
'EvaluationService', 0,
'invalidate_submission',
{'{{ reevaluation_par_name }}_id': {{ reevaluation_par_value }},
{% if reevaluation_par_dataset_id is not None %}
'dataset_id': {{ reevaluation_par_dataset_id }},
{% end %}
},
'level': 'score'},
function(response) { utils.redirect_if_ok('{{ url }}', response); }
);"
title="Score" >S</button>
4 changes: 1 addition & 3 deletions cms/server/templates/admin/welcome.html
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,7 @@
}

var strings = []
strings.push('<tr><td>Scored</td><td>' + response['data']['scored'] + '</td></tr>');
if (response['data']['scoring'] != 0)
strings.push('<tr><td>Scoring...</td><td>' + response['data']['scoring'] + '</td></tr>');
strings.push('<tr><td>Evaluated</td><td>' + response['data']['evaluated'] + '</td></tr>');
if (response['data']['compilation_fail'] != 0)
strings.push('<tr><td>Compilation failed</td><td>' + response['data']['compilation_fail'] + '</td></tr>');
if (response['data']['compiling'] != 0)
Expand Down
74 changes: 41 additions & 33 deletions cms/service/EvaluationService.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@
get_datasets_to_judge
from cmscommon.datetime import make_datetime, make_timestamp
from cms.grading.Job import JobGroup
from cms.grading.scoretypes import get_score_type


logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -843,8 +844,8 @@ def __init__(self, shard, contest_id):
self.contest_id = contest_id
self.post_finish_lock = gevent.coros.RLock()

self.scoring_service = self.connect_to(
ServiceCoord("ScoringService", 0))
self.proxy_service = self.connect_to(
ServiceCoord("ProxyService", 0))

self.add_executor(EvaluationExecutor(self))
self.start_sweeper(117.0)
Expand Down Expand Up @@ -981,10 +982,7 @@ def submissions_status(self):
queries['max_evaluations'] = not_evaluated.filter(
SubmissionResult.evaluation_tries
>= EvaluationService.MAX_EVALUATION_TRIES)
queries['scoring'] = evaluated.filter(
not_(SubmissionResult.filter_scored()))
queries['scored'] = evaluated.filter(
SubmissionResult.filter_scored())
queries['evaluated'] = evaluated
queries['total'] = base_query

stats = {}
Expand Down Expand Up @@ -1234,12 +1232,13 @@ def action_finished(self, data, plus, error=None):

# Submission evaluation will be ended only when
# evaluation for each testcase is available.

dataset = Dataset.get_from_id(dataset_id, session)
if len(submission_result.evaluations) == \
len(dataset.testcases):
submission_result.set_evaluation_outcome()
submission_result.evaluation_tries += 1
submission_result.compute_score(
get_score_type(dataset=dataset))

This comment has been minimized.

Copy link
@giomasce

giomasce Mar 29, 2015

Member

It may be sensible to catch and log exceptions here, since we potentially execute provided by contest admins, which may behave badly. This probably means that we need to retain some status to identify submissions that have undergone the evaluation phase, but with a scoring failure. Unless we want to reject the whole evaluation in case scoring fails.

self.evaluation_ended(submission_result)

elif type_ == ESOperation.USER_TEST_COMPILATION:
Expand Down Expand Up @@ -1281,11 +1280,12 @@ def action_finished(self, data, plus, error=None):
session.commit()

def compilation_ended(self, submission_result):
"""Actions to be performed when we have a submission that has
ended compilation . In particular: we queue evaluation if
compilation was ok, we inform ScoringService if the
compilation failed for an error in the submission, or we
requeue the compilation if there was an error in CMS.
"""Action to be performed when we have compiled a submission.
In particular: we queue evaluation if compilation was ok, we
inform ProxyService if the compilation failed for an error in
the submission, or we requeue the compilation if there was an
error in CMS.
submission_result (SubmissionResult): the submission result.
Expand All @@ -1299,19 +1299,20 @@ def compilation_ended(self, submission_result):
submission_result.dataset_id)

# If instead submission failed compilation, we inform
# ScoringService of the new submission. We need to commit
# before so it has up to date information.
# ProxyService of the new submission. We need to commit before
# so it has up to date information.
elif submission_result.compilation_failed():
logger.info("Submission %d(%d) did not compile.",
submission_result.submission_id,
submission_result.dataset_id)
submission_result.sa_session.commit()
self.scoring_service.new_evaluation(
submission_id=submission_result.submission_id,
dataset_id=submission_result.dataset_id)
# If dataset is the active one, update RWS.
if submission_result.dataset is submission.task.active_dataset:
self.proxy_service.submission_scored(
submission_id=submission.id)

# If compilation failed for our fault, we log the error.
elif submission_result.compilation_outcome is None:
elif not submission_result.compiled():
logger.warning("Worker failed when compiling submission "
"%d(%d).",
submission_result.submission_id,
Expand All @@ -1333,27 +1334,28 @@ def compilation_ended(self, submission_result):
self.submission_enqueue_operations(submission)

def evaluation_ended(self, submission_result):
"""Actions to be performed when we have a submission that has
been evaluated. In particular: we inform ScoringService on
success, we requeue on failure.
"""Action to be performed when we have evaluated a submission.
In particular: we inform ProxyService on success, we requeue
on failure.
submission_result (SubmissionResult): the submission result.
"""
submission = submission_result.submission

# Evaluation successful, we inform ScoringService so it can
# update the score. We need to commit the session beforehand,
# otherwise the ScoringService wouldn't receive the updated
# submission.
# Evaluation successful, we inform ProxyService so it can send
# the score to RWS. We need to commit the session beforehand,
# otherwise it won't receive the updated submission.
if submission_result.evaluated():
logger.info("Submission %d(%d) was evaluated successfully.",
submission_result.submission_id,
submission_result.dataset_id)
submission_result.sa_session.commit()
self.scoring_service.new_evaluation(
submission_id=submission_result.submission_id,
dataset_id=submission_result.dataset_id)
# If dataset is the active one, update RWS.
if submission_result.dataset is submission.task.active_dataset:
self.proxy_service.submission_scored(
submission_id=submission.id)

# Evaluation unsuccessful, we log the error.
else:
Expand Down Expand Up @@ -1520,14 +1522,14 @@ def invalidate_submission(self,
None.
user_id (int|None): id of the user to invalidate, or None.
task_id (int|None): id of the task to invalidate, or None.
level (string): 'compilation' or 'evaluation'
level (string): 'compilation' or 'evaluation' or 'score'
"""
logger.info("Invalidation request received.")

# Validate arguments
# TODO Check that all these objects belong to this contest.
if level not in ("compilation", "evaluation"):
if level not in ("compilation", "evaluation", "score"):
raise ValueError(
"Unexpected invalidation level `%s'." % level)

Expand Down Expand Up @@ -1572,13 +1574,19 @@ def invalidate_submission(self,
submission_result.invalidate_compilation()
elif level == "evaluation":
submission_result.invalidate_evaluation()
elif level == "score":
submission_result.compute_score(
get_score_type(dataset=submission_result.dataset))

This comment has been minimized.

Copy link
@giomasce

giomasce Mar 29, 2015

Member

Here we may have to catch exceptions as well.


# Finally, we re-enqueue the operations for the
# submissions.
for submission in submissions:
self.submission_enqueue_operations(submission)
# submissions, if the level wasn't score (in that case we
# recompute it immediately).
if level in ("compilation", "evaluation"):
for submission in submissions:
self.submission_enqueue_operations(submission)

session.commit()
logger.info("Invalidation completed.")

@rpc_method
def disable_worker(self, shard):
Expand Down
9 changes: 5 additions & 4 deletions cms/service/ProxyService.py
Original file line number Diff line number Diff line change
Expand Up @@ -435,9 +435,10 @@ def reinitialize(self):
def submission_scored(self, submission_id):
"""Notice that a submission has been scored.
Usually called by ScoringService when it's done with scoring a
submission result. Since we don't trust anyone we verify that,
and then send data about the score to the rankings.
Usually called by EvaluationService when it's done with
scoring a submission result. Since we don't trust anyone we
verify that, and then send data about the score to the
rankings.
submission_id (int): the id of the submission that changed.
dataset_id (int): the id of the dataset to use.
Expand Down Expand Up @@ -497,7 +498,7 @@ def dataset_updated(self, task_id):
update all the scores for the task using the submission results
on the new active dataset. If some of them are not available
yet we keep the old scores (we don't delete them!) and wait for
ScoringService to notify us that the new ones are available.
EvaluationService to notify us that the new ones are available.
task_id (int): the ID of the task whose dataset has changed.
Expand Down
Loading

0 comments on commit 2fb9175

Please sign in to comment.