Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions src/_pytest/config/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
from _pytest.outcomes import fail
from _pytest.outcomes import Skipped
from _pytest.pathlib import Path
from _pytest.store import Store
from _pytest.warning_types import PytestConfigWarning

if TYPE_CHECKING:
Expand Down Expand Up @@ -791,6 +792,9 @@ def __init__(self, pluginmanager, *, invocation_params=None) -> None:
self._override_ini = () # type: Sequence[str]
self._opt2dest = {} # type: Dict[str, str]
self._cleanup = [] # type: List[Callable[[], None]]
# A place where plugins can store information on the config for their
# own use. Currently only intended for internal plugins.
self._store = Store()
self.pluginmanager.register(self, "pytestconfig")
self._configured = False
self.hook.pytest_addoption.call_historic(
Expand Down
15 changes: 10 additions & 5 deletions src/_pytest/faulthandler.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,13 @@
import io
import os
import sys
from typing import TextIO

import pytest
from _pytest.store import StoreKey


fault_handler_stderr_key = StoreKey[TextIO]()


def pytest_addoption(parser):
Expand Down Expand Up @@ -46,8 +51,8 @@ def pytest_configure(self, config):
import faulthandler

stderr_fd_copy = os.dup(self._get_stderr_fileno())
config.fault_handler_stderr = os.fdopen(stderr_fd_copy, "w")
faulthandler.enable(file=config.fault_handler_stderr)
config._store[fault_handler_stderr_key] = open(stderr_fd_copy, "w")
faulthandler.enable(file=config._store[fault_handler_stderr_key])

def pytest_unconfigure(self, config):
import faulthandler
Expand All @@ -57,8 +62,8 @@ def pytest_unconfigure(self, config):
# re-enable the faulthandler, attaching it to the default sys.stderr
# so we can see crashes after pytest has finished, usually during
# garbage collection during interpreter shutdown
config.fault_handler_stderr.close()
del config.fault_handler_stderr
config._store[fault_handler_stderr_key].close()
del config._store[fault_handler_stderr_key]
faulthandler.enable(file=self._get_stderr_fileno())

@staticmethod
Expand All @@ -78,7 +83,7 @@ def get_timeout_config_value(config):
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_protocol(self, item):
timeout = self.get_timeout_config_value(item.config)
stderr = item.config.fault_handler_stderr
stderr = item.config._store[fault_handler_stderr_key]
if timeout > 0 and stderr is not None:
import faulthandler

Expand Down
18 changes: 11 additions & 7 deletions src/_pytest/junitxml.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,13 @@
from _pytest import deprecated
from _pytest import nodes
from _pytest.config import filename_arg
from _pytest.store import StoreKey
from _pytest.warnings import _issue_warning_captured


xml_key = StoreKey["LogXML"]()


class Junit(py.xml.Namespace):
pass

Expand Down Expand Up @@ -260,7 +264,7 @@ def _warn_incompatibility_with_xunit2(request, fixture_name):
"""Emits a PytestWarning about the given fixture being incompatible with newer xunit revisions"""
from _pytest.warning_types import PytestWarning

xml = getattr(request.config, "_xml", None)
xml = request.config._store.get(xml_key, None)
if xml is not None and xml.family not in ("xunit1", "legacy"):
request.node.warn(
PytestWarning(
Expand Down Expand Up @@ -312,7 +316,7 @@ def add_attr_noop(name, value):

attr_func = add_attr_noop

xml = getattr(request.config, "_xml", None)
xml = request.config._store.get(xml_key, None)
if xml is not None:
node_reporter = xml.node_reporter(request.node.nodeid)
attr_func = node_reporter.add_attribute
Expand Down Expand Up @@ -353,7 +357,7 @@ def record_func(name, value):
__tracebackhide__ = True
_check_record_param_type("name", name)

xml = getattr(request.config, "_xml", None)
xml = request.config._store.get(xml_key, None)
if xml is not None:
record_func = xml.add_global_property # noqa
return record_func
Expand Down Expand Up @@ -412,7 +416,7 @@ def pytest_configure(config):
if not junit_family:
_issue_warning_captured(deprecated.JUNIT_XML_DEFAULT_FAMILY, config.hook, 2)
junit_family = "xunit1"
config._xml = LogXML(
config._store[xml_key] = LogXML(
xmlpath,
config.option.junitprefix,
config.getini("junit_suite_name"),
Expand All @@ -421,13 +425,13 @@ def pytest_configure(config):
junit_family,
config.getini("junit_log_passing_tests"),
)
config.pluginmanager.register(config._xml)
config.pluginmanager.register(config._store[xml_key])


def pytest_unconfigure(config):
xml = getattr(config, "_xml", None)
xml = config._store.get(xml_key, None)
if xml:
del config._xml
del config._store[xml_key]
config.pluginmanager.unregister(xml)


Expand Down
11 changes: 9 additions & 2 deletions src/_pytest/mark/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
""" generic mechanism for marking and selecting python functions. """
from typing import Optional

from .legacy import matchkeyword
from .legacy import matchmark
from .structures import EMPTY_PARAMETERSET_OPTION
Expand All @@ -8,12 +10,17 @@
from .structures import MarkDecorator
from .structures import MarkGenerator
from .structures import ParameterSet
from _pytest.config import Config
from _pytest.config import hookimpl
from _pytest.config import UsageError
from _pytest.store import StoreKey

__all__ = ["Mark", "MarkDecorator", "MarkGenerator", "get_empty_parameterset_mark"]


old_mark_config_key = StoreKey[Optional[Config]]()


def param(*values, **kw):
"""Specify a parameter in `pytest.mark.parametrize`_ calls or
:ref:`parametrized fixtures <fixture-parametrize-marks>`.
Expand Down Expand Up @@ -145,7 +152,7 @@ def pytest_collection_modifyitems(items, config):


def pytest_configure(config):
config._old_mark_config = MARK_GEN._config
config._store[old_mark_config_key] = MARK_GEN._config
MARK_GEN._config = config

empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION)
Expand All @@ -158,4 +165,4 @@ def pytest_configure(config):


def pytest_unconfigure(config):
MARK_GEN._config = getattr(config, "_old_mark_config", None)
MARK_GEN._config = config._store.get(old_mark_config_key, None)
5 changes: 5 additions & 0 deletions src/_pytest/nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
from _pytest.mark.structures import NodeKeywords
from _pytest.outcomes import fail
from _pytest.outcomes import Failed
from _pytest.store import Store

if TYPE_CHECKING:
# Imported here due to circular import.
Expand Down Expand Up @@ -146,6 +147,10 @@ def __init__(
if self.name != "()":
self._nodeid += "::" + self.name

# A place where plugins can store information on the node for their
# own use. Currently only intended for internal plugins.
self._store = Store()

@classmethod
def from_parent(cls, parent: "Node", **kw):
"""
Expand Down
20 changes: 13 additions & 7 deletions src/_pytest/pastebin.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,12 @@
""" submit failure or test session information to a pastebin service. """
import tempfile
from typing import IO

import pytest
from _pytest.store import StoreKey


pastebinfile_key = StoreKey[IO[bytes]]()


def pytest_addoption(parser):
Expand All @@ -26,25 +31,26 @@ def pytest_configure(config):
# when using pytest-xdist, for example
if tr is not None:
# pastebin file will be utf-8 encoded binary file
config._pastebinfile = tempfile.TemporaryFile("w+b")
config._store[pastebinfile_key] = tempfile.TemporaryFile("w+b")
oldwrite = tr._tw.write

def tee_write(s, **kwargs):
oldwrite(s, **kwargs)
if isinstance(s, str):
s = s.encode("utf-8")
config._pastebinfile.write(s)
config._store[pastebinfile_key].write(s)

tr._tw.write = tee_write


def pytest_unconfigure(config):
if hasattr(config, "_pastebinfile"):
if pastebinfile_key in config._store:
pastebinfile = config._store[pastebinfile_key]
# get terminal contents and delete file
config._pastebinfile.seek(0)
sessionlog = config._pastebinfile.read()
config._pastebinfile.close()
del config._pastebinfile
pastebinfile.seek(0)
sessionlog = pastebinfile.read()
pastebinfile.close()
del config._store[pastebinfile_key]
# undo our patching in the terminal reporter
tr = config.pluginmanager.getplugin("terminalreporter")
del tr._tw.__dict__["write"]
Expand Down
13 changes: 9 additions & 4 deletions src/_pytest/resultlog.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,11 @@

import py

from _pytest.store import StoreKey


resultlog_key = StoreKey["ResultLog"]()


def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "resultlog plugin options")
Expand All @@ -26,8 +31,8 @@ def pytest_configure(config):
if not os.path.isdir(dirname):
os.makedirs(dirname)
logfile = open(resultlog, "w", 1) # line buffered
config._resultlog = ResultLog(config, logfile)
config.pluginmanager.register(config._resultlog)
config._store[resultlog_key] = ResultLog(config, logfile)
config.pluginmanager.register(config._store[resultlog_key])

from _pytest.deprecated import RESULT_LOG
from _pytest.warnings import _issue_warning_captured
Expand All @@ -36,10 +41,10 @@ def pytest_configure(config):


def pytest_unconfigure(config):
resultlog = getattr(config, "_resultlog", None)
resultlog = config._store.get(resultlog_key, None)
if resultlog:
resultlog.logfile.close()
del config._resultlog
del config._store[resultlog_key]
config.pluginmanager.unregister(resultlog)


Expand Down
34 changes: 20 additions & 14 deletions src/_pytest/skipping.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,12 @@
from _pytest.outcomes import fail
from _pytest.outcomes import skip
from _pytest.outcomes import xfail
from _pytest.store import StoreKey


skipped_by_mark_key = StoreKey[bool]()
evalxfail_key = StoreKey[MarkEvaluator]()
unexpectedsuccess_key = StoreKey[str]()


def pytest_addoption(parser):
Expand Down Expand Up @@ -68,22 +74,22 @@ def nop(*args, **kwargs):
@hookimpl(tryfirst=True)
def pytest_runtest_setup(item):
# Check if skip or skipif are specified as pytest marks
item._skipped_by_mark = False
item._store[skipped_by_mark_key] = False
eval_skipif = MarkEvaluator(item, "skipif")
if eval_skipif.istrue():
item._skipped_by_mark = True
item._store[skipped_by_mark_key] = True
skip(eval_skipif.getexplanation())

for skip_info in item.iter_markers(name="skip"):
item._skipped_by_mark = True
item._store[skipped_by_mark_key] = True
if "reason" in skip_info.kwargs:
skip(skip_info.kwargs["reason"])
elif skip_info.args:
skip(skip_info.args[0])
else:
skip("unconditional skip")

item._evalxfail = MarkEvaluator(item, "xfail")
item._store[evalxfail_key] = MarkEvaluator(item, "xfail")
check_xfail_no_run(item)


Expand All @@ -99,20 +105,20 @@ def pytest_pyfunc_call(pyfuncitem):
def check_xfail_no_run(item):
"""check xfail(run=False)"""
if not item.config.option.runxfail:
evalxfail = item._evalxfail
evalxfail = item._store[evalxfail_key]
if evalxfail.istrue():
if not evalxfail.get("run", True):
xfail("[NOTRUN] " + evalxfail.getexplanation())


def check_strict_xfail(pyfuncitem):
"""check xfail(strict=True) for the given PASSING test"""
evalxfail = pyfuncitem._evalxfail
evalxfail = pyfuncitem._store[evalxfail_key]
if evalxfail.istrue():
strict_default = pyfuncitem.config.getini("xfail_strict")
is_strict_xfail = evalxfail.get("strict", strict_default)
if is_strict_xfail:
del pyfuncitem._evalxfail
del pyfuncitem._store[evalxfail_key]
explanation = evalxfail.getexplanation()
fail("[XPASS(strict)] " + explanation, pytrace=False)

Expand All @@ -121,12 +127,12 @@ def check_strict_xfail(pyfuncitem):
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
evalxfail = getattr(item, "_evalxfail", None)
# unittest special case, see setting of _unexpectedsuccess
if hasattr(item, "_unexpectedsuccess") and rep.when == "call":

if item._unexpectedsuccess:
rep.longrepr = "Unexpected success: {}".format(item._unexpectedsuccess)
evalxfail = item._store.get(evalxfail_key, None)
# unittest special case, see setting of unexpectedsuccess_key
if unexpectedsuccess_key in item._store and rep.when == "call":
reason = item._store[unexpectedsuccess_key]
if reason:
rep.longrepr = "Unexpected success: {}".format(reason)
else:
rep.longrepr = "Unexpected success"
rep.outcome = "failed"
Expand Down Expand Up @@ -154,7 +160,7 @@ def pytest_runtest_makereport(item, call):
rep.outcome = "passed"
rep.wasxfail = explanation
elif (
getattr(item, "_skipped_by_mark", False)
item._store.get(skipped_by_mark_key, True)
and rep.skipped
and type(rep.longrepr) is tuple
):
Expand Down
Loading