Skip to content

Commit

Permalink
Drop 'checksum' machinery
Browse files Browse the repository at this point in the history
It looks like the 'checksum' machinery is dead, so let's remove it.

Closes #316
  • Loading branch information
ylobankov committed Feb 15, 2022
1 parent 2604c46 commit c8db4f4
Show file tree
Hide file tree
Showing 5 changed files with 18 additions and 39 deletions.
8 changes: 2 additions & 6 deletions lib/test.py
Expand Up @@ -7,7 +7,6 @@
import sys
import traceback
from functools import partial
from hashlib import md5

from lib import Options
from lib.colorer import color_stdout
Expand Down Expand Up @@ -167,7 +166,7 @@ def run(self, server):
it to stdout.
Returns short status of the test as a string: 'skip', 'pass',
'new', 'updated' or 'fail' and results file checksum on fail.
'new', 'updated' or 'fail'.
There is also one possible value for short_status, 'disabled',
but it returned in the caller, TestSuite.run_test().
"""
Expand Down Expand Up @@ -237,7 +236,6 @@ def run(self, server):
self.is_valgrind_clean = not bool(non_empty_logs)

short_status = None
result_checksum = None

if self.skip:
short_status = 'skip'
Expand Down Expand Up @@ -272,8 +270,6 @@ def run(self, server):
if has_result:
safe_makedirs(self.var_suite_path)
shutil.copy(self.tmp_result, self.reject)
with open(self.tmp_result, mode='rb') as result_file:
result_checksum = md5(result_file.read()).hexdigest()
short_status = 'fail'
color_stdout("[ fail ]\n", schema='test_fail')

Expand All @@ -299,7 +295,7 @@ def run(self, server):
"Test failed! Output from log file "
"{0}:\n".format(log_file))
where = ": there were warnings in the valgrind log file(s)"
return short_status, result_checksum
return short_status

def print_diagnostics(self, log_file, message):
"""Print whole lines of client program output leading to test
Expand Down
13 changes: 3 additions & 10 deletions lib/test_suite.py
Expand Up @@ -205,12 +205,6 @@ def fragile_tests(self):
res.append(test)
return res

def get_test_fragile_checksums(self, test):
try:
return self.fragile['tests'][test]['checksums']
except Exception:
return []

def gen_server(self):
try:
return Server(self.ini, test_suite=self)
Expand Down Expand Up @@ -263,7 +257,7 @@ def stop_server(self, server, inspector, silent=False, cleanup=True):

def run_test(self, test, server, inspector):
""" Returns short status of the test as a string: 'skip', 'pass',
'new', 'fail', or 'disabled' and results file checksum on fail.
'new', 'fail', or 'disabled'.
"""
test.inspector = inspector
test_name = os.path.basename(test.name)
Expand All @@ -278,18 +272,17 @@ def run_test(self, test, server, inspector):

start_time = time.time()
if self.is_test_enabled(test, conf, server):
short_status, result_checksum = test.run(server)
short_status = test.run(server)
else:
color_stdout("[ disabled ]\n", schema='t_name')
short_status = 'disabled'
result_checksum = None
duration = time.time() - start_time

# cleanup only if test passed or if --force mode enabled
if Options().args.is_force or short_status == 'pass':
inspector.cleanup_nondefault()

return short_status, result_checksum, duration
return short_status, duration

def is_parallel(self):
return self.ini['is_parallel']
Expand Down
28 changes: 10 additions & 18 deletions lib/worker.py
Expand Up @@ -143,17 +143,15 @@ class WorkerTaskResult(BaseWorkerMessage):
""" Passed into the result queue when a task processed (done) by the
worker. The short_status (string) field intended to give short note whether
the task processed successfully or not, but with little more flexibility
than binary True/False. The result_checksum (string) field saves the results
file checksum on test fail. The task_id (any hashable object) field hold ID of
than binary True/False. The task_id (any hashable object) field hold ID of
the processed task. The is_long (boolean) field shows if task is in long test
list in suite.ini. The duration (float) field saves the task run time. The
show_reproduce_content configuration from suite.ini.
"""
def __init__(self, worker_id, worker_name, task_id,
short_status, result_checksum, is_long, duration, show_reproduce_content):
short_status, is_long, duration, show_reproduce_content):
super(WorkerTaskResult, self).__init__(worker_id, worker_name)
self.short_status = short_status
self.result_checksum = result_checksum
self.task_id = task_id
self.is_long = is_long
self.duration = duration
Expand Down Expand Up @@ -222,9 +220,8 @@ def current_task(self, task_id):
return WorkerCurrentTask(self.id, self.name, task_name, task_param,
task_result, task_tmp_result)

def wrap_result(self, task_id, short_status, result_checksum, duration):
def wrap_result(self, task_id, short_status, duration):
return WorkerTaskResult(self.id, self.name, task_id, short_status,
result_checksum,
self.suite.test_is_long(task_id), duration,
self.suite.show_reproduce_content())

Expand Down Expand Up @@ -317,7 +314,7 @@ def run_task(self, task_id):
with open(self.reproduce_file, 'a') as f:
task_id_str = yaml.safe_dump(task.id, default_flow_style=True)
f.write('- ' + task_id_str)
short_status, result_checksum, duration = self.suite.run_test(
short_status, duration = self.suite.run_test(
task, self.server, self.inspector)
except KeyboardInterrupt:
self.report_keyboard_interrupt()
Expand All @@ -327,7 +324,7 @@ def run_task(self, task_id):
'\nWorker "%s" received the following error; stopping...\n'
% self.name + traceback.format_exc() + '\n', schema='error')
raise
return short_status, result_checksum, duration
return short_status, duration

def run_loop(self, task_queue, result_queue):
""" called from 'run_all' """
Expand All @@ -342,11 +339,9 @@ def run_loop(self, task_queue, result_queue):
break

short_status = None
result_checksum = None
duration = 0.0
result_queue.put(self.current_task(task_id))
testname = os.path.basename(task_id[0])
fragile_checksums = self.suite.get_test_fragile_checksums(testname)
retries_left = self.suite.fragile_retries()
# let's run till short_status became 'pass'
while short_status != 'pass' and retries_left >= 0:
Expand All @@ -355,18 +350,15 @@ def run_loop(self, task_queue, result_queue):
if short_status == 'fail':
color_stdout(
'Test "%s", conf: "%s"\n'
'\tfrom "fragile" list failed with results'
' file checksum: "%s", rerunning ...\n'
% (task_id[0], task_id[1], result_checksum), schema='error')
'\tfrom "fragile" list failed, rerunning ...\n'
% (task_id[0], task_id[1]), schema='error')
# run task and save the result to short_status
short_status, result_checksum, duration = self.run_task(task_id)
# check if the results file checksum set on fail and if
# the newly created results file is known by checksum
if not result_checksum or (result_checksum not in fragile_checksums):
short_status, duration = self.run_task(task_id)
if testname not in self.suite.fragile['tests']:
break
retries_left = retries_left - 1

result_queue.put(self.wrap_result(task_id, short_status, result_checksum, duration))
result_queue.put(self.wrap_result(task_id, short_status, duration))
if short_status == 'fail':
if Options().args.is_force:
self.restart_server()
Expand Down
4 changes: 1 addition & 3 deletions listeners.py
Expand Up @@ -55,7 +55,6 @@ def process_result(self, obj):
if obj.short_status == 'fail':
self.failed_tasks.append((obj.task_id,
obj.worker_name,
obj.result_checksum,
obj.show_reproduce_content))

self.duration_stats[obj.task_id] = obj.duration
Expand Down Expand Up @@ -163,11 +162,10 @@ def print_statistics(self):
return False

color_stdout('Failed tasks:\n', schema='test_var')
for task_id, worker_name, result_checksum, show_reproduce_content in self.failed_tasks:
for task_id, worker_name, show_reproduce_content in self.failed_tasks:
logfile = self.get_logfile(worker_name)
task_id_str = yaml.safe_dump(task_id, default_flow_style=True)
color_stdout('- %s' % task_id_str, schema='test_var')
color_stdout('# results file checksum: %s\n' % result_checksum)
color_stdout('# logfile: %s\n' % logfile)
reproduce_file_path = get_reproduce_file(worker_name)
color_stdout('# reproduce file: %s\n' % reproduce_file_path)
Expand Down
4 changes: 2 additions & 2 deletions test-run.py
Expand Up @@ -175,8 +175,8 @@ def main_loop_consistent(failed_test_ids):
worker_id = 1
worker = task_group['gen_worker'](worker_id)
for task_id in task_ids:
# The 'run_task' method returns a tuple of three items:
# (short_status, result_checksum, duration). So taking the first
# The 'run_task' method returns a tuple of two items:
# (short_status, duration). So taking the first
# item of this tuple for failure check.
short_status = worker.run_task(task_id)[0]
if short_status == 'fail':
Expand Down

0 comments on commit c8db4f4

Please sign in to comment.