Skip to content

Commit

Permalink
test infra: catch bad decorators and import-time errors
Browse files Browse the repository at this point in the history
Summary:
This change enhances the LLDB test infrastructure to convert
load-time exceptions in a given Python test module into errors.
Before this change, specifying a non-existent test decorator,
or otherwise having some load-time error in a python test module,
would not get flagged as an error.

With this change, typos and other load-time errors in a python
test file get converted to errors and reported by the
test runner.

This change also includes test infrastructure tests that include
covering the new work here.  I'm going to wait until we have
these infrastructure tests runnable on the main platforms before
I try to work that into all the normal testing workflows.

The test infrastructure tests can be run by using the standard python module testing practice of doing the following:

cd packages/Python/lldbsuite/test_event
python -m unittest discover -s test/src -p 'Test*.py'

Those tests run the dotest inferior with a known broken test and verify that the errors are caught.  These tests did not pass until I modified dotest.py to capture them properly.

@zturner, if you have the chance, if you could try those steps above (the python -m unittest ... line) on Windows, that would be great if we can address any python2/3/Windows bits there.  I don't think there's anything fancy, but I didn't want to hook it into test flow until I know it works there.

I'll be slowly adding more tests that cover some of the other breakage I've occasionally seen that didn't get collected as part of the summarization.  This is the biggest one I'm aware of.

Reviewers: zturner, labath

Subscribers: zturner, lldb-commits

Differential Revision: http://reviews.llvm.org/D20193

llvm-svn: 269489
  • Loading branch information
tfiala committed May 13, 2016
1 parent ead771c commit 7c5f7ca
Show file tree
Hide file tree
Showing 11 changed files with 307 additions and 67 deletions.
133 changes: 79 additions & 54 deletions lldb/packages/Python/lldbsuite/test/dotest.py
Expand Up @@ -676,73 +676,98 @@ def setupSysPath():
# This is to locate the lldb.py module. Insert it right after sys.path[0].
sys.path[1:1] = [lldbPythonDir]


def visit_file(dir, name):
# Try to match the regexp pattern, if specified.
if configuration.regexp:
import re
if not re.search(configuration.regexp, name):
# We didn't match the regex, we're done.
return

# We found a match for our test. Add it to the suite.

# Update the sys.path first.
if not sys.path.count(dir):
sys.path.insert(0, dir)
base = os.path.splitext(name)[0]

# Thoroughly check the filterspec against the base module and admit
# the (base, filterspec) combination only when it makes sense.
filterspec = None
for filterspec in configuration.filters:
# Optimistically set the flag to True.
filtered = True
module = __import__(base)
parts = filterspec.split('.')
obj = module
for part in parts:
try:
parent, obj = obj, getattr(obj, part)
except AttributeError:
# The filterspec has failed.
filtered = False
break

# If filtered, we have a good filterspec. Add it.
if filtered:
# print("adding filter spec %s to module %s" % (filterspec, module))
configuration.suite.addTests(
unittest2.defaultTestLoader.loadTestsFromName(filterspec, module))
continue

# Forgo this module if the (base, filterspec) combo is invalid
if configuration.filters and not filtered:
return

if not filterspec or not filtered:
# Add the entire file's worth of tests since we're not filtered.
# Also the fail-over case when the filterspec branch
# (base, filterspec) combo doesn't make sense.
configuration.suite.addTests(unittest2.defaultTestLoader.loadTestsFromName(base))


def visit(prefix, dir, names):
"""Visitor function for os.path.walk(path, visit, arg)."""

dir_components = set(dir.split(os.sep))
excluded_components = set(['.svn', '.git'])
if dir_components.intersection(excluded_components):
#print("Detected an excluded dir component: %s" % dir)
return

for name in names:
if '.py' == os.path.splitext(name)[1] and name.startswith(prefix):
# Gather all the Python test file names that follow the Test*.py pattern.
python_test_files = [
name
for name in names
if name.endswith('.py') and name.startswith(prefix)]

# Visit all the python test files.
for name in python_test_files:
try:
# Ensure we error out if we have multiple tests with the same
# base name.
# Future improvement: find all the places where we work with base
# names and convert to full paths. We have directory structure
# to disambiguate these, so we shouldn't need this constraint.
if name in configuration.all_tests:
raise Exception("Found multiple tests with the name %s" % name)
configuration.all_tests.add(name)

# Try to match the regexp pattern, if specified.
if configuration.regexp:
import re
if re.search(configuration.regexp, name):
#print("Filename: '%s' matches pattern: '%s'" % (name, regexp))
pass
else:
#print("Filename: '%s' does not match pattern: '%s'" % (name, regexp))
continue

# We found a match for our test. Add it to the suite.

# Update the sys.path first.
if not sys.path.count(dir):
sys.path.insert(0, dir)
base = os.path.splitext(name)[0]

# Thoroughly check the filterspec against the base module and admit
# the (base, filterspec) combination only when it makes sense.
filterspec = None
for filterspec in configuration.filters:
# Optimistically set the flag to True.
filtered = True
module = __import__(base)
parts = filterspec.split('.')
obj = module
for part in parts:
try:
parent, obj = obj, getattr(obj, part)
except AttributeError:
# The filterspec has failed.
filtered = False
break

# If filtered, we have a good filterspec. Add it.
if filtered:
#print("adding filter spec %s to module %s" % (filterspec, module))
configuration.suite.addTests(
unittest2.defaultTestLoader.loadTestsFromName(filterspec, module))
continue

# Forgo this module if the (base, filterspec) combo is invalid
if configuration.filters and not filtered:
continue

# Add either the filtered test case(s) (which is done before) or the entire test class.
if not filterspec or not filtered:
# A simple case of just the module name. Also the failover case
# from the filterspec branch when the (base, filterspec) combo
# doesn't make sense.
configuration.suite.addTests(unittest2.defaultTestLoader.loadTestsFromName(base))
# Run the relevant tests in the python file.
visit_file(dir, name)
except Exception as ex:
# Convert this exception to a test event error for the file.
test_filename = os.path.abspath(os.path.join(dir, name))
if configuration.results_formatter_object is not None:
# Grab the backtrace for the exception.
import traceback
backtrace = traceback.format_exc()

# Generate the test event.
configuration.results_formatter_object.handle_event(
EventBuilder.event_for_job_test_add_error(
test_filename, ex, backtrace))
raise


def disabledynamics():
Expand Down
@@ -0,0 +1,13 @@
from __future__ import print_function
from lldbsuite.test import lldbtest
from lldbsuite.test import decorators


class NonExistentDecoratorTestCase(lldbtest.TestBase):

mydir = lldbtest.TestBase.compute_mydir(__file__)

@decorators.nonExistentDecorator(bugnumber="yt/1300")
def test(self):
"""Verify non-existent decorators are picked up by test runner."""
pass
13 changes: 13 additions & 0 deletions lldb/packages/Python/lldbsuite/test_event/event_builder.py
Expand Up @@ -320,6 +320,19 @@ def event_for_cleanup_error(test, error_tuple):
event["issue_phase"] = "cleanup"
return event

@staticmethod
def event_for_job_test_add_error(test_filename, exception, backtrace):
event = EventBuilder.bare_event(EventBuilder.TYPE_JOB_RESULT)
event["status"] = EventBuilder.STATUS_ERROR
if test_filename is not None:
event["test_filename"] = EventBuilder._assert_is_python_sourcefile(test_filename)
if exception is not None and "__class__" in dir(exception):
event["issue_class"] = exception.__class__
event["issue_message"] = exception
if backtrace is not None:
event["issue_backtrace"] = backtrace
return event

@staticmethod
def event_for_job_exceptional_exit(
pid, worker_index, exception_code, exception_description,
Expand Down
Expand Up @@ -84,6 +84,7 @@ def socket_closer(open_sock):
results_file_object = None
cleanup_func = None

file_is_stream = False
if config.filename:
# Open the results file for writing.
if config.filename == 'stdout':
Expand All @@ -102,6 +103,7 @@ def socket_closer(open_sock):
results_file_object, cleanup_func = create_socket(config.port)
default_formatter_name = (
"lldbsuite.test_event.formatter.pickled.RawPickledFormatter")
file_is_stream = True

# If we have a results formatter name specified and we didn't specify
# a results file, we should use stdout.
Expand Down Expand Up @@ -137,7 +139,10 @@ def socket_closer(open_sock):
command_line_options)

# Create the TestResultsFormatter given the processed options.
results_formatter_object = cls(results_file_object, formatter_options)
results_formatter_object = cls(
results_file_object,
formatter_options,
file_is_stream)

def shutdown_formatter():
"""Shuts down the formatter when it is no longer needed."""
Expand Down
4 changes: 2 additions & 2 deletions lldb/packages/Python/lldbsuite/test_event/formatter/curses.py
Expand Up @@ -27,9 +27,9 @@
class Curses(results_formatter.ResultsFormatter):
"""Receives live results from tests that are running and reports them to the terminal in a curses GUI"""

def __init__(self, out_file, options):
def __init__(self, out_file, options, file_is_stream):
# Initialize the parent
super(Curses, self).__init__(out_file, options)
super(Curses, self).__init__(out_file, options, file_is_stream)
self.using_terminal = True
self.have_curses = True
self.initialize_event = None
Expand Down
29 changes: 22 additions & 7 deletions lldb/packages/Python/lldbsuite/test_event/formatter/pickled.py
Expand Up @@ -30,9 +30,27 @@ def arg_parser(cls):
parser = super(RawPickledFormatter, cls).arg_parser()
return parser

def __init__(self, out_file, options):
super(RawPickledFormatter, self).__init__(out_file, options)
class StreamSerializer(object):
@staticmethod
def serialize(test_event, out_file):
# Send it as {serialized_length_of_serialized_bytes}{serialized_bytes}
import struct
msg = cPickle.dumps(test_event)
packet = struct.pack("!I%ds" % len(msg), len(msg), msg)
out_file.send(packet)

class BlockSerializer(object):
@staticmethod
def serialize(test_event, out_file):
cPickle.dump(test_event, out_file)

def __init__(self, out_file, options, file_is_stream):
super(RawPickledFormatter, self).__init__(out_file, options, file_is_stream)
self.pid = os.getpid()
if file_is_stream:
self.serializer = self.StreamSerializer()
else:
self.serializer = self.BlockSerializer()

def handle_event(self, test_event):
super(RawPickledFormatter, self).handle_event(test_event)
Expand All @@ -50,8 +68,5 @@ def handle_event(self, test_event):
# Tack on the pid.
test_event["pid"] = self.pid

# Send it as {serialized_length_of_serialized_bytes}{serialized_bytes}
import struct
msg = cPickle.dumps(test_event)
packet = struct.pack("!I%ds" % len(msg), len(msg), msg)
self.out_file.send(packet)
# Serialize the test event.
self.serializer.serialize(test_event, self.out_file)
Expand Up @@ -111,7 +111,7 @@ def arg_parser(cls):
'the summary output.'))
return parser

def __init__(self, out_file, options):
def __init__(self, out_file, options, file_is_stream):
super(ResultsFormatter, self).__init__()
self.out_file = out_file
self.options = options
Expand All @@ -120,6 +120,7 @@ def __init__(self, out_file, options):
raise Exception("ResultsFormatter created with no file object")
self.start_time_by_test = {}
self.terminate_called = False
self.file_is_stream = file_is_stream

# Store counts of test_result events by status.
self.result_status_counts = {
Expand Down
4 changes: 2 additions & 2 deletions lldb/packages/Python/lldbsuite/test_event/formatter/xunit.py
Expand Up @@ -153,14 +153,14 @@ def _build_regex_list_from_patterns(patterns):
regex_list.append(re.compile(pattern))
return regex_list

def __init__(self, out_file, options):
def __init__(self, out_file, options, file_is_stream):
"""Initializes the XunitFormatter instance.
@param out_file file-like object where formatted output is written.
@param options specifies a dictionary of options for the
formatter.
"""
# Initialize the parent
super(XunitFormatter, self).__init__(out_file, options)
super(XunitFormatter, self).__init__(out_file, options, file_is_stream)
self.text_encoding = "UTF-8"
self.invalid_xml_re = XunitFormatter._build_illegal_xml_regex()
self.total_test_count = 0
Expand Down
@@ -0,0 +1,13 @@
from __future__ import print_function
from lldbsuite.test import lldbtest
from lldbsuite.test import decorators


class NonExistentDecoratorTestCase(lldbtest.TestBase):

mydir = lldbtest.TestBase.compute_mydir(__file__)

@decorators.nonExistentDecorator(bugnumber="yt/1300")
def test(self):
"""Verify non-existent decorators are picked up by test runner."""
pass
@@ -0,0 +1,70 @@
#!/usr/bin/env python
"""
Tests that the event system reports issues during decorator
handling as errors.
"""
# System-provided imports
import os
import unittest

# Local-provided imports
import event_collector


class TestCatchInvalidDecorator(unittest.TestCase):

TEST_DIR = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
"resources",
"invalid_decorator")

def test_with_whole_file(self):
"""
Test that a non-existent decorator generates a test-event error
when running all tests in the file.
"""
# Determine the test case file we're using.
test_file = os.path.join(self.TEST_DIR, "TestInvalidDecorator.py")

# Collect all test events generated for this file.
error_results = _filter_error_results(
event_collector.collect_events_whole_file(test_file))

self.assertGreater(
len(error_results),
0,
"At least one job or test error result should have been returned")

def test_with_function_filter(self):
"""
Test that a non-existent decorator generates a test-event error
when running a filtered test.
"""
# Collect all test events generated during running of tests
# in a given directory using a test name filter. Internally,
# this runs through a different code path that needs to be
# set up to catch exceptions.
error_results = _filter_error_results(
event_collector.collect_events_for_directory_with_filter(
self.TEST_DIR,
"NonExistentDecoratorTestCase.test"))

self.assertGreater(
len(error_results),
0,
"At least one job or test error result should have been returned")


def _filter_error_results(events):
# Filter out job result events.
return [
event
for event in events
if event.get("event", None) in ["job_result", "test_result"] and
event.get("status", None) == "error"
]


if __name__ == "__main__":
unittest.main()

0 comments on commit 7c5f7ca

Please sign in to comment.