diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 34964e0..1e962ae 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -62,10 +62,14 @@ jobs: git clone --depth 1 https://github.com/w3c/json-ld-framing.git _json-ld-framing git clone --depth 1 https://github.com/json-ld/normalization.git _normalization - name: Test with Python=${{ matrix.python-version }} Loader=${{ matrix.loader }} + # run: | + # python tests/runtests.py ./_json-ld-api/tests -l ${{ matrix.loader }} + # python tests/runtests.py ./_json-ld-framing/tests -l ${{ matrix.loader }} + # python tests/runtests.py ./_normalization/tests -l ${{ matrix.loader }} run: | - python tests/runtests.py ./_json-ld-api/tests -l ${{ matrix.loader }} - python tests/runtests.py ./_json-ld-framing/tests -l ${{ matrix.loader }} - python tests/runtests.py ./_normalization/tests -l ${{ matrix.loader }} + pytest --tests=./_json-ld-api/tests --loader=${{ matrix.loader }} + pytest --tests=./_json-ld-framing/tests --loader=${{ matrix.loader }} + pytest --tests=./_normalization/tests --loader=${{ matrix.loader }} env: LOADER: ${{ matrix.loader }} #coverage: diff --git a/requirements-test.txt b/requirements-test.txt index 3930480..aad120b 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -1 +1,2 @@ flake8 +pytest \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..f74f76a --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,153 @@ +import os +import unittest +import pytest + +# Import the existing test runner module so we can reuse Manifest/Test +# implementations with minimal changes. +from . import runtests + + +def pytest_addoption(parser): + # Do only long options for pytest integration; pytest reserves + # lowercase single-letter short options for its own CLI flags. + parser.addoption('--tests', nargs='*', default=[], help='A manifest or directory to test') + parser.addoption('--earl', dest='earl', help='The filename to write an EARL report to') + parser.addoption('--loader', dest='loader', default='requests', help='The remote URL document loader: requests, aiohttp') + parser.addoption('--number', dest='number', help='Limit tests to those containing the specified test identifier') + + +def pytest_configure(config): + # Apply loader choice and selected test number globally so that the + # existing `runtests` helpers behave the same as the CLI runner. + loader = config.getoption('loader') + if loader == 'requests': + runtests.jsonld._default_document_loader = runtests.jsonld.requests_document_loader() + elif loader == 'aiohttp': + runtests.jsonld._default_document_loader = runtests.jsonld.aiohttp_document_loader() + + number = config.getoption('number') + if number: + runtests.ONLY_IDENTIFIER = number + # If an EARL output file was requested, create a session-level + # EarlReport instance we will populate per-test. + earl_fn = config.getoption('earl') + if earl_fn: + config._earl_report = runtests.EarlReport() + else: + config._earl_report = None + + +def _flatten_suite(suite): + """Yield TestCase instances from a unittest TestSuite (recursively).""" + if isinstance(suite, unittest.TestSuite): + for s in suite: + yield from _flatten_suite(s) + elif isinstance(suite, unittest.TestCase): + yield suite + + +def pytest_generate_tests(metafunc): + # Parametrize tests using the existing manifest loader if the test + # function needs a `manifest_test` argument. + if 'manifest_test' not in metafunc.fixturenames: + return + + config = metafunc.config + tests_arg = config.getoption('tests') or [] + + if len(tests_arg): + test_targets = tests_arg + else: + # Default sibling directories used by the original runner + sibling_dirs = [ + '../specifications/json-ld-api/tests/', + '../specifications/json-ld-framing/tests/', + '../specifications/normalization/tests/', + ] + test_targets = [] + for d in sibling_dirs: + if os.path.exists(d): + test_targets.append(d) + + if len(test_targets) == 0: + pytest.skip('No test manifest or directory specified (use --tests)') + + # Build a root manifest structure with target files and dirs (equivalent to the original runner). + root_manifest = { + '@context': 'https://w3c.github.io/tests/context.jsonld', + '@id': '', + '@type': 'mf:Manifest', + 'description': 'Top level PyLD test manifest', + 'name': 'PyLD', + 'sequence': [], + 'filename': '/' + } + + for test in test_targets: + if os.path.isfile(test): + root, ext = os.path.splitext(test) + if ext in ['.json', '.jsonld']: + root_manifest['sequence'].append(os.path.abspath(test)) + else: + raise Exception('Unknown test file ext', root, ext) + elif os.path.isdir(test): + filename = os.path.join(test, 'manifest.jsonld') + if os.path.exists(filename): + root_manifest['sequence'].append(os.path.abspath(filename)) + + # Use the existing Manifest loader to create a TestSuite and flatten it + suite = runtests.Manifest(root_manifest, root_manifest['filename']).load() + tests = list(_flatten_suite(suite)) + + # Parametrize the test function with Test instances and use their + # string representation as test ids for readability in pytest output. + metafunc.parametrize('manifest_test', tests, ids=[str(t) for t in tests]) + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_runtest_makereport(item): + # Hookwrapper gives us the final test report via `outcome.get_result()`. + outcome = yield + rep = outcome.get_result() + + # We only handle the main call phase to match + # the behaviour of the original runner which only reported passes + # and failures/errors. + if rep.when not in ('call'): + return + + # The parametrized pytest test attaches the original runtests.Test + # instance as the `manifest_test` fixture; retrieve it here. + manifest_test = item.funcargs.get('manifest_test') + if manifest_test is None: + return + + # If an EARL report was requested at configure time, add an assertion + # for this test based on the pytest outcome. + earl_report = getattr(item.config, '_earl_report', None) + if earl_report is None: + return + + # Map pytest outcomes to whether the test should be recorded as + # succeeded or failed. We skip 'skipped' outcomes to avoid polluting + # the EARL report with non-asserted tests. + if rep.outcome == 'skipped': + return + + success = (rep.outcome == 'passed') + try: + earl_report.add_assertion(manifest_test, success) + except Exception: + # Don't let EARL bookkeeping break test execution; be quiet on error. + pass + + +def pytest_sessionfinish(session, exitstatus): + # If the user requested an EARL report, write it using the existing + # `EarlReport` helper. We can't collect per-test assertions here + # The per-test assertions (if any) were appended to config._earl_report + # during test execution; write the report now if present. + earl = session.config.getoption('earl') + earl_report = getattr(session.config, '_earl_report', None) + if earl and earl_report is not None: + earl_report.write(os.path.abspath(earl)) diff --git a/tests/runtests.py b/tests/runtests.py index 9d2567e..6ef78fc 100644 --- a/tests/runtests.py +++ b/tests/runtests.py @@ -1,14 +1,59 @@ #!/usr/bin/env python """ -Test runner for JSON-LD. +Test runner for the JSON-LD test-suite. + +This module provides a small command-line test harness used to execute the +JSON-LD test manifests that accompany the project and to produce an EARL +(Evaluation and Report Language) report summarizing results. + +Behavior and features +- Loads one or more JSON-LD test manifest files (or directories containing a + `manifest.jsonld`) and constructs a `unittest.TestSuite` from the entries. +- Supports running tests from the local test directories bundled with the + repository or from manifests passed on the command line. +- Uses the `pyld` library under test to run each test case (expand, compact, + frame, normalize, to_rdf, etc.), then compares results against expected + outputs. Comparison is order-insensitive where appropriate. +- Can write an EARL report using `--earl ` for CI / interoperability + with W3C testing tools. + +Usage +- Run the script directly: `python tests/runtests.py [MANIFEST_OR_DIR ...]` +- Common options (see `-h` for full list): + - `-e, --earl ` : write an EARL report to `` + - `-b, --bail` : stop at the first failing test + - `-l, --loader` : choose the network loader (`requests` or `aiohttp`) + - `-n, --number` : focus on tests containing the given test identifier + - `-v, --verbose` : print verbose test data + +Key classes and functions +- `TestRunner`: command-line entrypoint; builds the root manifest and runs the + test suite. +- `Manifest`: loads a manifest document and converts its entries into + `unittest.TestSuite` / `Test` instances. +- `Test` (subclass of `unittest.TestCase`): encapsulates execution and + verification logic for a single JSON-LD test case. +- Utility helpers: `read_json`, `read_file`, `create_document_loader`, and + `equalUnordered` (used for order-insensitive comparisons). .. module:: runtests - :synopsis: Test harness for pyld + :synopsis: Test harness for pyld .. moduleauthor:: Dave Longley .. moduleauthor:: Olaf Conradi """ +# TODO: The code below contains a small CLI test-runner (`TestRunner`), a +# unittest-based `EarlTestResult`, and the `__main__` entrypoint. This file +# is still kept to reuse `Manifest`, `Test` and `EarlReport` from the +# original runner, but when running under `pytest` the separate +# `tests/conftest.py` + `tests/test_manifests.py` integration is used to +# drive tests. The `TestRunner` and `EarlTestResult` blocks can be removed +# (or converted to a separate backward-compatibility script) once the +# pytest migration is complete. +# Also, the module docstring can be updated to reflect the pytest-based +# testing approach once the legacy runner is removed. + import datetime import json import os @@ -16,6 +61,10 @@ import traceback import unittest import re +# NOTE: ArgumentParser and TextTestResult were used by the original +# TestRunner / EarlTestResult classes. They are obsolete because +# pytest now provides the test harness; these imports can be removed +# once the legacy CLI runner is deleted. from argparse import ArgumentParser from unittest import TextTestResult @@ -29,12 +78,22 @@ SKIP_TESTS = [] ONLY_IDENTIFIER = None +# `LOCAL_BASES` lists remote bases used by the official JSON-LD test +# repositories. When a test refers to a URL starting with one of these +# bases the runner attempts to map that URL to a local file in the +# test-suite tree (when possible) so tests can be run offline. + + LOCAL_BASES = [ 'https://w3c.github.io/json-ld-api/tests', 'https://w3c.github.io/json-ld-framing/tests', 'https://github.com/json-ld/normalization/tests' ] +# NOTE: The following TestRunner class can be removed because pytest now +# provides the test harness; this class can be removed once the legacy +# CLI runner is deleted. + class TestRunner(unittest.TextTestRunner): """ Loads test manifests and runs tests. @@ -44,7 +103,9 @@ def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1): unittest.TextTestRunner.__init__( self, stream, descriptions, verbosity) - # command line args + # The runner uses an ArgumentParser to accept a list of manifests or + # test directories and several runner-specific flags (e.g. which + # document loader to use or whether to bail on failure). self.options = {} self.parser = ArgumentParser() @@ -53,7 +114,7 @@ def _makeResult(self): def main(self): print('PyLD Tests') - print('Use -h or --help to view options.\n') + print('Use -h or --help to view options.\\n') # add program options self.parser.add_argument('tests', metavar='TEST', nargs='*', @@ -82,6 +143,11 @@ def main(self): elif self.options.loader == 'aiohttp': jsonld._default_document_loader = jsonld.aiohttp_document_loader() + # The document loader drives how remote HTTP documents are fetched. + # Tests can choose to run using the 'requests' based loader or the + # 'aiohttp' async loader; here we select the default based on the + # CLI option. + # config runner self.failfast = self.options.bail @@ -143,6 +209,9 @@ def main(self): global ROOT_MANIFEST_DIR #ROOT_MANIFEST_DIR = os.path.dirname(root_manifest['filename']) ROOT_MANIFEST_DIR = root_manifest['filename'] + # Build a Manifest object from the root manifest structure. The + # Manifest will recursively load manifests and produce a + # `unittest.TestSuite` containing all discovered tests. suite = Manifest(root_manifest, root_manifest['filename']).load() # run tests @@ -189,16 +258,29 @@ def load(self): if is_jsonld_type(entry, 'mf:Manifest'): self.suite = unittest.TestSuite( [self.suite, Manifest(entry, filename).load()]) + # If the entry is itself a manifest, recurse into it and + # append its TestSuite. This mirrors the structure of the + # W3C test manifests where manifests can include other + # manifests via 'entries' or 'sequence'. # don't add tests that are not focused # assume entry is a test elif not ONLY_IDENTIFIER or ONLY_IDENTIFIER in entry['@id']: self.suite.addTest(Test(self, entry, filename)) + # For simple test entries we construct a `Test` object which + # wraps the execution and assertion logic for that test case. + return self.suite class Test(unittest.TestCase): + """ + # A Test instance stores the manifest and test description (as + # loaded from the JSON-LD manifest). The boolean flags below are + # used to distinguish positive/negative/syntax tests so the + # runner knows whether an exception is the expected outcome. + """ def __init__(self, manifest, data, filename): unittest.TestCase.__init__(self) #self.maxDiff = None @@ -263,6 +345,10 @@ def setUp(self): os.path.basename(str.replace(manifest.filename, '.jsonld', '')) + data['@id']) self.base = self.manifest.data['baseIri'] + data['input'] + # When manifests define a `baseIri` the runner patches the test + # `@id` and computes a `base` URL used by the document loader so + # relative references are resolved consistently during testing. + # skip based on id regular expression skip_id_re = test_info.get('skip', {}).get('idRegex', []) for regex in skip_id_re: @@ -300,6 +386,11 @@ def setUp(self): if re.match(regex, data.get('@id', data.get('id', ''))): data['runLocal'] = True + # Tests listed under 'runLocal' are forced to use the local file + # loader variant rather than fetching remote URLs. This is useful + # for reproducing the official test-suite behavior without network + # access. + def runTest(self): data = self.data global TEST_TYPES @@ -315,8 +406,20 @@ def runTest(self): else: expect = read_test_property(self._get_expect_property())(self) + # The following try/except handles three primary scenarios: + # - Positive tests: compute the result and compare to expected JSON + # (order-insensitive where appropriate). + # - Negative tests: assert that the library raises the expected + # JSON-LD error code. + # - Pending tests: tests expected to fail are marked 'pending' and + # their unexpected success is reported specially. + try: result = getattr(jsonld, fn)(*params) + # Invoke the tested pyld function (e.g. `expand`, `compact`, + # `normalize`). `fn` is the function name and `params` is a + # list of callables that are invoked with `self` to produce + # the actual arguments (this allows lazy file loading). if self.is_negative and not self.pending: raise AssertionError('Expected an error; one was not raised') if self.is_syntax and not self.pending: @@ -382,6 +485,11 @@ def runTest(self): # Compare values with order-insensitive array tests def equalUnordered(result, expect): + """ + `equalUnordered` implements a simple structural equivalence check that + ignores ordering in lists. It is used to compare JSON-LD results where + arrays are considered unordered by the test-suite semantics. + """ if isinstance(result, list) and isinstance(expect, list): return(len(result) == len(expect) and all(any(equalUnordered(v1, v2) for v2 in expect) for v1 in result)) @@ -391,6 +499,8 @@ def equalUnordered(result, expect): else: return(result == expect) + + def is_jsonld_type(node, type_): node_types = [] node_types.extend(get_jsonld_values(node, '@type')) @@ -400,6 +510,20 @@ def is_jsonld_type(node, type_): def get_jsonld_values(node, property): + """ + Safely extract a (possibly multi-valued) property from a JSON-LD node. + + The JSON-LD manifests sometimes use single values or lists for the same + properties. This helper returns a list in either case so callers can + uniformly iterate over the returned value. + + Args: + node: dict-like JSON-LD node. + property: property name to extract (string). + + Returns: + A list of values for the property (empty list if property missing). + """ rval = [] if property in node: rval = node[property] @@ -409,6 +533,13 @@ def get_jsonld_values(node, property): def get_jsonld_error_code(err): + """ + Walk a JsonLdError chain to extract the most specific error `code`. + + Many pyld error types wrap a cause. This helper attempts to return the + structured `code` attribute from a `jsonld.JsonLdError` (if present), + otherwise it falls back to stringifying the exception. + """ if isinstance(err, jsonld.JsonLdError): if err.code: return err.code @@ -418,11 +549,22 @@ def get_jsonld_error_code(err): def read_json(filename): + """Read and parse a JSON file from `filename`. + + Returns the parsed Python object. + """ with open(filename) as f: return json.load(f) def read_file(filename): + """Read a file and return its contents as text. + + This wrapper ensures consistent text handling across Python 2/3 by + decoding bytes for older Python versions. In the current project we + expect Python 3, but the compatibility guard is kept to match the + original test-runner behavior. + """ with open(filename) as f: if sys.version_info[0] >= 3: return f.read() @@ -431,6 +573,14 @@ def read_file(filename): def read_test_url(property): + """ + Return a callable that reads a URL-like property from a test entry. + + Some test entries store input locations as relative paths resolved + against the manifest's `baseIri`. This factory returns a function that + accepts a `Test` instance and returns the fully-resolved URL (or + `None` if the property is missing). + """ def read(test): if property not in test.data: return None @@ -442,6 +592,15 @@ def read(test): def read_test_property(property): + """ + Return a callable that reads a test-local property and returns either + parsed JSON (for `.jsonld`) or raw text. + + The returned function accepts a `Test` instance and resolves the + filename relative to the test's directory. If the file ends with + `.jsonld` it is parsed as JSON; otherwise the raw file contents are + returned. + """ def read(test): if property not in test.data: return None @@ -454,6 +613,15 @@ def read(test): def create_test_options(opts=None): + """ + Factory returning a function that builds options for a pyld API call. + + The returned callable accepts a `Test` instance and produces the + options dictionary consumed by functions such as `expand`/`compact`. + It merges explicit test `option` values with any additional `opts` + passed to the factory, wires in the test-specific `documentLoader`, + and resolves `expandContext` files when present. + """ def create(test): http_options = ['contentType', 'httpLink', 'httpStatus', 'redirectTo'] test_options = test.data.get('option', {}) @@ -471,8 +639,18 @@ def create(test): def create_document_loader(test): + """ + create_document_loader returns a callable compatible with the JSON-LD + API's document loader interface. The returned `local_loader` will + decide whether to load a URL from the local test-tree (mapping + `LOCAL_BASES` to local files) or delegate to the normal network + loader. This enables deterministic tests without requiring network + access for suite-hosted resources. + """ loader = jsonld.get_document_loader() + + def is_test_suite_url(url): return any(url.startswith(base) for base in LOCAL_BASES) @@ -557,8 +735,18 @@ def local_loader(url, headers): return local_loader - +# NOTE: The EarlTestResult class can be removed because pytest now +# provides the test harness; this class can be removed once the legacy +# CLI runner is deleted. class EarlTestResult(TextTestResult): + """ + A `TextTestResult` subclass that records EARL assertions as tests run. + + This result object forwards normal test outcome bookkeeping to the + base `TextTestResult` and additionally records each assertion in an + `EarlReport` instance so a machine-readable report can be emitted at + the end of a test run. + """ def __init__(self, stream, descriptions, verbosity): TextTestResult.__init__(self, stream, descriptions, verbosity) self.report = EarlReport() @@ -585,11 +773,16 @@ class EarlReport(): """ def __init__(self): + # Load package metadata (version) from the library's __about__.py about = {} with open(os.path.join( os.path.dirname(__file__), '..', 'lib', 'pyld', '__about__.py')) as fp: exec(fp.read(), about) + # Timestamp used for test results self.now = datetime.datetime.utcnow().replace(microsecond=0) + # Build the base EARL report structure. The report is a JSON-LD + # document describing the project and the assertions made about + # test outcomes. self.report = { '@context': { 'doap': 'http://usefulinc.com/ns/doap#', @@ -640,6 +833,8 @@ def __init__(self): } def add_assertion(self, test, success): + # Append an EARL assertion describing a single test outcome. The + # `earl:outcome` is either `earl:passed` or `earl:failed`. self.report['subjectOf'].append({ '@type': 'earl:Assertion', 'earl:assertedBy': self.report['doap:developer']['@id'], @@ -654,6 +849,7 @@ def add_assertion(self, test, success): return self def write(self, filename): + # Serialize the EARL report as pretty-printed JSON-LD. with open(filename, 'w') as f: f.write(json.dumps(self.report, indent=2)) f.close() @@ -880,5 +1076,8 @@ def write(self, filename): } +# NOTE: The legacy command-line entrypoint can be removed because pytest is used to +# run the test-suite. Keep this here for reference; it can be removed once +# the pytest migration is finalized. if __name__ == '__main__': TestRunner(verbosity=2).main() diff --git a/tests/test_manifests.py b/tests/test_manifests.py new file mode 100644 index 0000000..46382be --- /dev/null +++ b/tests/test_manifests.py @@ -0,0 +1,29 @@ +import pytest +import unittest + +# Reuse the existing Test wrapper from `runtests.py`. The pytest test +# simply calls `setUp()` and `runTest()` on the original Test instance +# so that all existing behavior and comparison logic remains unchanged. + + +def _call_test(testcase): + """Call setUp and runTest on a `runtests.Test` instance. + + Convert unittest.SkipTest to pytest.skip so pytest reports skipped + tests correctly. + """ + try: + testcase.setUp() + except unittest.SkipTest as e: + pytest.skip(str(e)) + + try: + testcase.runTest() + except unittest.SkipTest as e: + pytest.skip(str(e)) + + +def test_manifest_case(manifest_test): + # manifest_test is a `runtests.Test` instance provided by the + # parametrization implemented in `conftest.py`. + _call_test(manifest_test)