Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 7 additions & 3 deletions .github/workflows/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -62,10 +62,14 @@ jobs:
git clone --depth 1 https://github.com/w3c/json-ld-framing.git _json-ld-framing
git clone --depth 1 https://github.com/json-ld/normalization.git _normalization
- name: Test with Python=${{ matrix.python-version }} Loader=${{ matrix.loader }}
# run: |
# python tests/runtests.py ./_json-ld-api/tests -l ${{ matrix.loader }}
# python tests/runtests.py ./_json-ld-framing/tests -l ${{ matrix.loader }}
# python tests/runtests.py ./_normalization/tests -l ${{ matrix.loader }}
run: |
python tests/runtests.py ./_json-ld-api/tests -l ${{ matrix.loader }}
python tests/runtests.py ./_json-ld-framing/tests -l ${{ matrix.loader }}
python tests/runtests.py ./_normalization/tests -l ${{ matrix.loader }}
pytest --tests=./_json-ld-api/tests --loader=${{ matrix.loader }}
pytest --tests=./_json-ld-framing/tests --loader=${{ matrix.loader }}
pytest --tests=./_normalization/tests --loader=${{ matrix.loader }}
env:
LOADER: ${{ matrix.loader }}
#coverage:
Expand Down
1 change: 1 addition & 0 deletions requirements-test.txt
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
flake8
pytest
Empty file added tests/__init__.py
Empty file.
153 changes: 153 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,153 @@
import os
import unittest
import pytest

# Import the existing test runner module so we can reuse Manifest/Test
# implementations with minimal changes.
from . import runtests


def pytest_addoption(parser):
# Do only long options for pytest integration; pytest reserves
# lowercase single-letter short options for its own CLI flags.
parser.addoption('--tests', nargs='*', default=[], help='A manifest or directory to test')
parser.addoption('--earl', dest='earl', help='The filename to write an EARL report to')
parser.addoption('--loader', dest='loader', default='requests', help='The remote URL document loader: requests, aiohttp')
parser.addoption('--number', dest='number', help='Limit tests to those containing the specified test identifier')


def pytest_configure(config):
# Apply loader choice and selected test number globally so that the
# existing `runtests` helpers behave the same as the CLI runner.
loader = config.getoption('loader')
if loader == 'requests':
runtests.jsonld._default_document_loader = runtests.jsonld.requests_document_loader()
elif loader == 'aiohttp':
runtests.jsonld._default_document_loader = runtests.jsonld.aiohttp_document_loader()

number = config.getoption('number')
if number:
runtests.ONLY_IDENTIFIER = number
# If an EARL output file was requested, create a session-level
# EarlReport instance we will populate per-test.
earl_fn = config.getoption('earl')
if earl_fn:
config._earl_report = runtests.EarlReport()
else:
config._earl_report = None


def _flatten_suite(suite):
"""Yield TestCase instances from a unittest TestSuite (recursively)."""
if isinstance(suite, unittest.TestSuite):
for s in suite:
yield from _flatten_suite(s)
elif isinstance(suite, unittest.TestCase):
yield suite


def pytest_generate_tests(metafunc):
# Parametrize tests using the existing manifest loader if the test
# function needs a `manifest_test` argument.
if 'manifest_test' not in metafunc.fixturenames:
return

config = metafunc.config
tests_arg = config.getoption('tests') or []

if len(tests_arg):
test_targets = tests_arg
else:
# Default sibling directories used by the original runner
sibling_dirs = [
'../specifications/json-ld-api/tests/',
'../specifications/json-ld-framing/tests/',
'../specifications/normalization/tests/',
]
test_targets = []
for d in sibling_dirs:
if os.path.exists(d):
test_targets.append(d)

if len(test_targets) == 0:
pytest.skip('No test manifest or directory specified (use --tests)')

# Build a root manifest structure with target files and dirs (equivalent to the original runner).
root_manifest = {
'@context': 'https://w3c.github.io/tests/context.jsonld',
'@id': '',
'@type': 'mf:Manifest',
'description': 'Top level PyLD test manifest',
'name': 'PyLD',
'sequence': [],
'filename': '/'
}

for test in test_targets:
if os.path.isfile(test):
root, ext = os.path.splitext(test)
if ext in ['.json', '.jsonld']:
root_manifest['sequence'].append(os.path.abspath(test))
else:
raise Exception('Unknown test file ext', root, ext)
elif os.path.isdir(test):
filename = os.path.join(test, 'manifest.jsonld')
if os.path.exists(filename):
root_manifest['sequence'].append(os.path.abspath(filename))

# Use the existing Manifest loader to create a TestSuite and flatten it
suite = runtests.Manifest(root_manifest, root_manifest['filename']).load()
tests = list(_flatten_suite(suite))

# Parametrize the test function with Test instances and use their
# string representation as test ids for readability in pytest output.
metafunc.parametrize('manifest_test', tests, ids=[str(t) for t in tests])


@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_runtest_makereport(item):
# Hookwrapper gives us the final test report via `outcome.get_result()`.
outcome = yield
rep = outcome.get_result()

# We only handle the main call phase to match
# the behaviour of the original runner which only reported passes
# and failures/errors.
if rep.when not in ('call'):
return

# The parametrized pytest test attaches the original runtests.Test
# instance as the `manifest_test` fixture; retrieve it here.
manifest_test = item.funcargs.get('manifest_test')
if manifest_test is None:
return

# If an EARL report was requested at configure time, add an assertion
# for this test based on the pytest outcome.
earl_report = getattr(item.config, '_earl_report', None)
if earl_report is None:
return

# Map pytest outcomes to whether the test should be recorded as
# succeeded or failed. We skip 'skipped' outcomes to avoid polluting
# the EARL report with non-asserted tests.
if rep.outcome == 'skipped':
return

success = (rep.outcome == 'passed')
try:
earl_report.add_assertion(manifest_test, success)
except Exception:
# Don't let EARL bookkeeping break test execution; be quiet on error.
pass


def pytest_sessionfinish(session, exitstatus):
# If the user requested an EARL report, write it using the existing
# `EarlReport` helper. We can't collect per-test assertions here
# The per-test assertions (if any) were appended to config._earl_report
# during test execution; write the report now if present.
earl = session.config.getoption('earl')
earl_report = getattr(session.config, '_earl_report', None)
if earl and earl_report is not None:
earl_report.write(os.path.abspath(earl))
Loading
Loading