/
plugin.py
189 lines (154 loc) · 5.83 KB
/
plugin.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
#!/usr/bin/env python3
# Copyright (C) 2019 tribe29 GmbH - License: GNU General Public License v2
# This file is part of Checkmk (https://checkmk.com). It is subject to the terms and
# conditions defined in the file COPYING, which is part of this source code package.
"""some fixtures
the pytest-playwright addon's fixtures are too "aggressive" and are loaded in
all tests. So some functionality is inspierd from this module
See: https://github.com/microsoft/playwright-pytest
"""
import logging
import os
import typing as t
import pytest
from playwright.sync_api import (
Browser,
BrowserContext,
BrowserType,
Error,
Page,
Playwright,
sync_playwright,
)
logger = logging.getLogger(__name__)
@pytest.fixture(scope="session", name="browser_type_launch_args")
def _browser_type_launch_args(pytestconfig: t.Any) -> dict:
launch_options = {}
headed_option = pytestconfig.getoption("--headed")
if headed_option:
launch_options["headless"] = False
slowmo_option = pytestconfig.getoption("--slowmo")
if slowmo_option:
launch_options["slow_mo"] = slowmo_option
return launch_options
def _build_artifact_test_folder(
pytestconfig: t.Any, request: pytest.FixtureRequest, folder_or_file_name: str
) -> str:
output_dir = pytestconfig.getoption("--output")
return os.path.join(output_dir, request.node.nodeid, folder_or_file_name)
@pytest.fixture(scope="session", name="playwright")
def _playwright() -> t.Generator[Playwright, None, None]:
pw = sync_playwright().start()
yield pw
pw.stop()
@pytest.fixture(scope="session", name="browser_type")
def _browser_type(playwright: Playwright, browser_name: str) -> BrowserType:
return t.cast(BrowserType, getattr(playwright, browser_name))
@pytest.fixture(scope="session", name="browser")
def _browser(
browser_type: BrowserType, browser_type_launch_args: dict
) -> t.Generator[Browser, None, None]:
browser = browser_type.launch(**browser_type_launch_args)
yield browser
browser.close()
@pytest.fixture(name="context")
def _context(
browser: Browser,
pytestconfig: t.Any,
request: pytest.FixtureRequest,
) -> t.Generator[BrowserContext, None, None]:
pages: t.List[Page] = []
context = browser.new_context()
context.on("page", lambda page: pages.append(page)) # pylint: disable=unnecessary-lambda
yield context
try:
_may_create_screenshot(request, pytestconfig, pages)
finally:
context.close()
def _may_create_screenshot(
request: pytest.FixtureRequest,
pytestconfig: t.Any,
pages: t.List[Page],
) -> None:
failed = request.node.rep_call.failed if hasattr(request.node, "rep_call") else True
screenshot_option = pytestconfig.getoption("--screenshot")
capture_screenshot = screenshot_option == "on" or (
failed and screenshot_option == "only-on-failure"
)
if capture_screenshot:
# At the moment we're only using one page.
# Extend this here as soon we have a use case for multiple pages
assert len(pages) == 1
page = pages[0]
human_readable_status = "failed" if failed else "finished"
screenshot_path = _build_artifact_test_folder(
pytestconfig, request, f"test-{human_readable_status}.png"
)
try:
page.screenshot(timeout=5000, path=screenshot_path)
except Error as e:
logger.info("Failed to create screenshot of page %s due to: %s", page, e)
@pytest.fixture(name="page")
def _page(context: BrowserContext) -> t.Generator[Page, None, None]:
page = context.new_page()
yield page
@pytest.fixture(scope="session")
def is_webkit(browser_name: str) -> bool:
return browser_name == "webkit"
@pytest.fixture(scope="session")
def is_firefox(browser_name: str) -> bool:
return browser_name == "firefox"
@pytest.fixture(scope="session")
def is_chromium(browser_name: str) -> bool:
return browser_name == "chromium"
@pytest.fixture(name="browser_name", scope="session")
def _browser_name(pytestconfig: t.Any) -> str:
browser_names = t.cast(list[str], pytestconfig.getoption("--browser"))
if len(browser_names) == 0:
return "chromium"
if len(browser_names) == 1:
return browser_names[0]
raise NotImplementedError("When using unittest specifying multiple browsers is not supported")
# Making test result information available in fixtures
# https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item: t.Any) -> t.Generator[None, t.Any, None]:
# execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
# set a report attribute for each phase of a call, which can
# be "setup", "call", "teardown"
setattr(item, "rep_" + rep.when, rep)
def pytest_addoption(parser: t.Any) -> None:
group = parser.getgroup("playwright", "Playwright")
group.addoption(
"--browser",
action="append",
default=[],
help="Browser engine which should be used",
choices=["chromium", "firefox", "webkit"],
)
group.addoption(
"--headed",
action="store_true",
default=False,
help="Run tests in headed mode.",
)
group.addoption(
"--slowmo",
default=0,
type=int,
help="Run tests with slow mo",
)
group.addoption(
"--output",
default="test-results",
help="Directory for artifacts produced by tests, defaults to test-results.",
)
group.addoption(
"--screenshot",
default="off",
choices=["on", "off", "only-on-failure"],
help="Whether to automatically capture a screenshot after each test. "
"If you choose only-on-failure, a screenshot of the failing page only will be created.",
)