Coverage for /opt/homebrew/lib/python3.11/site-packages/_pytest/skipping.py: 46%
162 statements
« prev ^ index » next coverage.py v7.2.3, created at 2023-05-04 13:14 +0700
« prev ^ index » next coverage.py v7.2.3, created at 2023-05-04 13:14 +0700
1"""Support for skip/xfail functions and markers."""
2import os
3import platform
4import sys
5import traceback
6from collections.abc import Mapping
7from typing import Generator
8from typing import Optional
9from typing import Tuple
10from typing import Type
12import attr
14from _pytest.config import Config
15from _pytest.config import hookimpl
16from _pytest.config.argparsing import Parser
17from _pytest.mark.structures import Mark
18from _pytest.nodes import Item
19from _pytest.outcomes import fail
20from _pytest.outcomes import skip
21from _pytest.outcomes import xfail
22from _pytest.reports import BaseReport
23from _pytest.runner import CallInfo
24from _pytest.stash import StashKey
27def pytest_addoption(parser: Parser) -> None:
28 group = parser.getgroup("general")
29 group.addoption(
30 "--runxfail",
31 action="store_true",
32 dest="runxfail",
33 default=False,
34 help="Report the results of xfail tests as if they were not marked",
35 )
37 parser.addini(
38 "xfail_strict",
39 "Default for the strict parameter of xfail "
40 "markers when not given explicitly (default: False)",
41 default=False,
42 type="bool",
43 )
46def pytest_configure(config: Config) -> None:
47 if config.option.runxfail:
48 # yay a hack
49 import pytest
51 old = pytest.xfail
52 config.add_cleanup(lambda: setattr(pytest, "xfail", old))
54 def nop(*args, **kwargs):
55 pass
57 nop.Exception = xfail.Exception # type: ignore[attr-defined]
58 setattr(pytest, "xfail", nop)
60 config.addinivalue_line(
61 "markers",
62 "skip(reason=None): skip the given test function with an optional reason. "
63 'Example: skip(reason="no way of currently testing this") skips the '
64 "test.",
65 )
66 config.addinivalue_line(
67 "markers",
68 "skipif(condition, ..., *, reason=...): "
69 "skip the given test function if any of the conditions evaluate to True. "
70 "Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. "
71 "See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-skipif",
72 )
73 config.addinivalue_line(
74 "markers",
75 "xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): "
76 "mark the test function as an expected failure if any of the conditions "
77 "evaluate to True. Optionally specify a reason for better reporting "
78 "and run=False if you don't even want to execute the test function. "
79 "If only specific exception(s) are expected, you can list them in "
80 "raises, and if the test fails in other ways, it will be reported as "
81 "a true failure. See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-xfail",
82 )
85def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool, str]:
86 """Evaluate a single skipif/xfail condition.
88 If an old-style string condition is given, it is eval()'d, otherwise the
89 condition is bool()'d. If this fails, an appropriately formatted pytest.fail
90 is raised.
92 Returns (result, reason). The reason is only relevant if the result is True.
93 """
94 # String condition.
95 if isinstance(condition, str):
96 globals_ = {
97 "os": os,
98 "sys": sys,
99 "platform": platform,
100 "config": item.config,
101 }
102 for dictionary in reversed(
103 item.ihook.pytest_markeval_namespace(config=item.config)
104 ):
105 if not isinstance(dictionary, Mapping):
106 raise ValueError(
107 "pytest_markeval_namespace() needs to return a dict, got {!r}".format(
108 dictionary
109 )
110 )
111 globals_.update(dictionary)
112 if hasattr(item, "obj"):
113 globals_.update(item.obj.__globals__) # type: ignore[attr-defined]
114 try:
115 filename = f"<{mark.name} condition>"
116 condition_code = compile(condition, filename, "eval")
117 result = eval(condition_code, globals_)
118 except SyntaxError as exc:
119 msglines = [
120 "Error evaluating %r condition" % mark.name,
121 " " + condition,
122 " " + " " * (exc.offset or 0) + "^",
123 "SyntaxError: invalid syntax",
124 ]
125 fail("\n".join(msglines), pytrace=False)
126 except Exception as exc:
127 msglines = [
128 "Error evaluating %r condition" % mark.name,
129 " " + condition,
130 *traceback.format_exception_only(type(exc), exc),
131 ]
132 fail("\n".join(msglines), pytrace=False)
134 # Boolean condition.
135 else:
136 try:
137 result = bool(condition)
138 except Exception as exc:
139 msglines = [
140 "Error evaluating %r condition as a boolean" % mark.name,
141 *traceback.format_exception_only(type(exc), exc),
142 ]
143 fail("\n".join(msglines), pytrace=False)
145 reason = mark.kwargs.get("reason", None)
146 if reason is None:
147 if isinstance(condition, str):
148 reason = "condition: " + condition
149 else:
150 # XXX better be checked at collection time
151 msg = (
152 "Error evaluating %r: " % mark.name
153 + "you need to specify reason=STRING when using booleans as conditions."
154 )
155 fail(msg, pytrace=False)
157 return result, reason
160@attr.s(slots=True, frozen=True, auto_attribs=True)
161class Skip:
162 """The result of evaluate_skip_marks()."""
164 reason: str = "unconditional skip"
167def evaluate_skip_marks(item: Item) -> Optional[Skip]:
168 """Evaluate skip and skipif marks on item, returning Skip if triggered."""
169 for mark in item.iter_markers(name="skipif"):
170 if "condition" not in mark.kwargs:
171 conditions = mark.args
172 else:
173 conditions = (mark.kwargs["condition"],)
175 # Unconditional.
176 if not conditions:
177 reason = mark.kwargs.get("reason", "")
178 return Skip(reason)
180 # If any of the conditions are true.
181 for condition in conditions:
182 result, reason = evaluate_condition(item, mark, condition)
183 if result:
184 return Skip(reason)
186 for mark in item.iter_markers(name="skip"):
187 try:
188 return Skip(*mark.args, **mark.kwargs)
189 except TypeError as e:
190 raise TypeError(str(e) + " - maybe you meant pytest.mark.skipif?") from None
192 return None
195@attr.s(slots=True, frozen=True, auto_attribs=True)
196class Xfail:
197 """The result of evaluate_xfail_marks()."""
199 reason: str
200 run: bool
201 strict: bool
202 raises: Optional[Tuple[Type[BaseException], ...]]
205def evaluate_xfail_marks(item: Item) -> Optional[Xfail]:
206 """Evaluate xfail marks on item, returning Xfail if triggered."""
207 for mark in item.iter_markers(name="xfail"):
208 run = mark.kwargs.get("run", True)
209 strict = mark.kwargs.get("strict", item.config.getini("xfail_strict"))
210 raises = mark.kwargs.get("raises", None)
211 if "condition" not in mark.kwargs:
212 conditions = mark.args
213 else:
214 conditions = (mark.kwargs["condition"],)
216 # Unconditional.
217 if not conditions:
218 reason = mark.kwargs.get("reason", "")
219 return Xfail(reason, run, strict, raises)
221 # If any of the conditions are true.
222 for condition in conditions:
223 result, reason = evaluate_condition(item, mark, condition)
224 if result:
225 return Xfail(reason, run, strict, raises)
227 return None
230# Saves the xfail mark evaluation. Can be refreshed during call if None.
231xfailed_key = StashKey[Optional[Xfail]]()
234@hookimpl(tryfirst=True)
235def pytest_runtest_setup(item: Item) -> None:
236 skipped = evaluate_skip_marks(item)
237 if skipped:
238 raise skip.Exception(skipped.reason, _use_item_location=True)
240 item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
241 if xfailed and not item.config.option.runxfail and not xfailed.run:
242 xfail("[NOTRUN] " + xfailed.reason)
245@hookimpl(hookwrapper=True)
246def pytest_runtest_call(item: Item) -> Generator[None, None, None]:
247 xfailed = item.stash.get(xfailed_key, None)
248 if xfailed is None:
249 item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
251 if xfailed and not item.config.option.runxfail and not xfailed.run:
252 xfail("[NOTRUN] " + xfailed.reason)
254 yield
256 # The test run may have added an xfail mark dynamically.
257 xfailed = item.stash.get(xfailed_key, None)
258 if xfailed is None:
259 item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
262@hookimpl(hookwrapper=True)
263def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
264 outcome = yield
265 rep = outcome.get_result()
266 xfailed = item.stash.get(xfailed_key, None)
267 if item.config.option.runxfail:
268 pass # don't interfere
269 elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception):
270 assert call.excinfo.value.msg is not None
271 rep.wasxfail = "reason: " + call.excinfo.value.msg
272 rep.outcome = "skipped"
273 elif not rep.skipped and xfailed:
274 if call.excinfo:
275 raises = xfailed.raises
276 if raises is not None and not isinstance(call.excinfo.value, raises):
277 rep.outcome = "failed"
278 else:
279 rep.outcome = "skipped"
280 rep.wasxfail = xfailed.reason
281 elif call.when == "call":
282 if xfailed.strict:
283 rep.outcome = "failed"
284 rep.longrepr = "[XPASS(strict)] " + xfailed.reason
285 else:
286 rep.outcome = "passed"
287 rep.wasxfail = xfailed.reason
290def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]:
291 if hasattr(report, "wasxfail"):
292 if report.skipped:
293 return "xfailed", "x", "XFAIL"
294 elif report.passed:
295 return "xpassed", "X", "XPASS"
296 return None