Skip to content

Commit

Permalink
Simplify timeouts in dash.testing functions
Browse files Browse the repository at this point in the history
We are experiencing flaky test timeouts when running tests on Azure.
We have been addressing this by increasing timeouts in affected calls
but this seems unsustainable. This commit instead allows us to change
the implicit and explicit waits (see Selenium and dash.testing docs)
for all tests. Connected to equinor#428.

It also adds optional arguments to pytest to change these timeouts at
runtime, and allows testkomodo.sh to detect the environment (azure vs
onprem) from an environment variable.

It also removes a hacky `wait_a_bit` function, replacing it in one
case with an undocumented wait_for_no_elements call from dash.testing.
  • Loading branch information
kwinkunks committed Apr 14, 2023
1 parent badb921 commit 16ffdaa
Show file tree
Hide file tree
Showing 8 changed files with 75 additions and 53 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/python.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ jobs:
mypy --config-file mypy.ini --package webviz_ert
- name: Test with pytest
run: |
pytest
pytest --implicit_timeout=4 --explicit-timeout=20
- name: Run black
uses: psf/black@stable
20 changes: 17 additions & 3 deletions ci/jenkins/testkomodo.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,22 @@ install_package () {
start_tests () {
start_integration_test
test_result=$?
if [ "$test_result" -gt 0 ];
if [ "$test_result" -gt 0 ]
then
exit $test_result
fi
pytest -vs -m "not spe1"
if [ $CI_RUNNER_LABEL == "azure" ]
then
# Double the timeouts on Azure.
pytest -vs -m "not spe1" --implicit-timeout=4 --explicit-timeout=20
else
pytest -vs -m "not spe1"
fi
}

start_integration_test () {

# Dynamically set up the chromedriver according to the installed version.
chromium_version=$(chromium-browser --version | grep -zoP '\d+\.\d+\.\d+')
chromedriver_version=$(curl -s https://chromedriver.storage.googleapis.com/LATEST_RELEASE_$chromium_version)
echo "Downloading chromedriver v$chromedriver_version for chromium-browser v$chromium_version"
Expand All @@ -38,7 +45,14 @@ start_integration_test () {
unset HOST

echo "Test for webviz-ert plugins..."
pytest ../../../ -vs -m spe1

if [ $CI_RUNNER_LABEL == "azure" ]
then
# Double the timeouts on Azure.
pytest ../../../ -vs -m spe1 --implicit-timeout=4 --explicit-timeout=20
else
pytest ../../../ -vs -m spe1
fi

popd
}
67 changes: 51 additions & 16 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,46 @@
from requests import HTTPError
import dash
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import TimeoutException


from tests.data.snake_oil_data import ensembles_response


def pytest_addoption(parser):
"""
Add some command-line options to pytest, so we can set timeout limits,
e.g. according to which platform we are running the tests on, Azure or
on-prem.
These are used by setup_plugin().
"""
parser.addoption(
"--implicit-timeout",
action="store",
type=int,
default=2,
help="implicit timeout in seconds; default 2 s",
)
parser.addoption(
"--explicit-timeout",
action="store",
type=int,
default=10,
help="explicit timeout in seconds; default 10 s",
)


# We want to access the options in conftest.py, not in tests;
# this is one way to make the options available as variables.
IMPLICIT_TIMEOUT, EXPLICIT_TIMEOUT = None, None


def pytest_configure(config):
global IMPLICIT_TIMEOUT, EXPLICIT_TIMEOUT
IMPLICIT_TIMEOUT = config.getoption("--implicit-timeout")
EXPLICIT_TIMEOUT = config.getoption("--explicit-timeout")


def pytest_setup_options():
options = Options()
options.add_argument("--headless")
Expand Down Expand Up @@ -80,8 +114,6 @@ def select_first(dash_duo, selector):
raise AssertionError(f"No selection option for selector {selector}")
text = options[0].text
options[0].click()
# TODO remove wait?
wait_a_bit(dash_duo, time_seconds=0.5)
return text


Expand All @@ -93,8 +125,6 @@ def select_by_name(dash_duo, selector, name):
for option in options:
if option.text == name:
option.click()
# TODO remove wait?
wait_a_bit(dash_duo, time_seconds=0.5)
return name
raise AssertionError(f"Option {name} not available in {selector}")

Expand All @@ -116,8 +146,22 @@ def setup_plugin(
plugin = plugin_class(app, project_identifier=project_identifier, beta=beta)
app.layout = plugin.layout
dash_duo.start_server(app)
windowsize = window_size
dash_duo.driver.set_window_size(*windowsize)
dash_duo.driver.set_window_size(*window_size)

# Change timeout periods to help fix flaky tests.
# These are changed with the command-line options,
# --implicit-timeout and --implicit-timeout. For example,
# pytest --explicit-timeout=20
# to double the explicit timeout on the wait_for_* calls.
# Read more about waits in Selenium:
# https://selenium-python.readthedocs.io/waits.html
#
# Implicit waits:
dash_duo.driver.implicitly_wait(IMPLICIT_TIMEOUT)

# Explicit waits:
dash_duo.wait_timeout = EXPLICIT_TIMEOUT # Requires dash>=2.9.0

return plugin


Expand All @@ -131,7 +175,6 @@ def select_ensemble(dash_duo, plugin, wanted_ensemble_name=None):
dash_duo.wait_for_contains_text(
"#" + plugin.uuid("selected-ensemble-dropdown"),
first_ensemble_name,
timeout=4,
)
return first_ensemble_name

Expand All @@ -140,7 +183,6 @@ def select_ensemble(dash_duo, plugin, wanted_ensemble_name=None):
dash_duo.wait_for_contains_text(
"#" + plugin.uuid("selected-ensemble-dropdown"),
wanted_ensemble_name,
timeout=4,
)
return wanted_ensemble_name

Expand Down Expand Up @@ -175,13 +217,6 @@ def select_parameter(dash_duo, plugin, parameter_name=None, wait_for_plot=True)
return parameter_name


def wait_a_bit(dash_duo, time_seconds=0.1):
try:
dash_duo.wait_for_element(".foo-elderberries-baarrrrr", timeout=time_seconds)
except TimeoutException:
pass


def verify_key_in_dropdown(dash_duo, selector, key):
verify_keys_in_dropdown(dash_duo, selector, [key])

Expand Down
2 changes: 0 additions & 2 deletions tests/views/test_general_stuff.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,6 @@ def test_selectors_visibility_toggle_button(plugin_class, skip, mock_data, dash_
f"{variable_container_class_prefix}-hide",
)

# assert dash_duo.get_logs() == [], "browser console should contain no error"


def test_response_selector_sorting(mock_data, dash_duo):
plugin = setup_plugin(dash_duo, __name__, ResponseComparison)
Expand Down
4 changes: 0 additions & 4 deletions tests/views/test_observation_view.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,6 @@ def test_observation_analyzer_view_ensemble_no_observations(
response_options_selector = f"#{plugin.uuid('response-selector')}"
dash_duo.wait_for_text_to_equal(response_options_selector, "Select...")

# assert dash_duo.get_logs() == [], "browser console should contain no error"


def test_observation_analyzer_view_ensemble_with_observations(
mock_data,
Expand All @@ -46,5 +44,3 @@ def test_observation_analyzer_view_ensemble_with_observations(
)

verify_key_in_dropdown(dash_duo, plugin.uuid("response-selector"), "FOPR")

# assert dash_duo.get_logs() == [], "browser console should contain no error"
3 changes: 0 additions & 3 deletions tests/views/test_parameter_selector.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ def test_parameter_selector(
parameter_selector_container = dash_duo.wait_for_element_by_css_selector(
".ert-parameter-selector-container-hide"
)
# assert dash_duo.get_logs() == []


def test_search_input_return_functionality(
Expand Down Expand Up @@ -125,8 +124,6 @@ def test_search_input_return_functionality(
"×OP1_DIVERGENCE_SCALE",
)

# assert dash_duo.get_logs() == []


def test_parameter_selector_sorting(
mock_data,
Expand Down
18 changes: 3 additions & 15 deletions tests/views/test_plot_view.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
select_ensemble,
select_parameter,
select_response,
wait_a_bit,
)


Expand All @@ -23,8 +22,6 @@ def test_plot_view(

select_parameter(dash_duo, plugin, "BPR_138_PERSISTENCE")

# assert dash_duo.get_logs() == [], "browser console should contain no error"


def test_clearing_parameters_view(
mock_data,
Expand Down Expand Up @@ -57,8 +54,6 @@ def test_clearing_parameters_view(
# verify only expected response plot is left in place
dash_duo.find_element(f".dash-graph[id*={response_name}]")

# assert dash_duo.get_logs() == [], "browser console should contain no error"


def test_clearing_ensembles_view(
mock_data,
Expand Down Expand Up @@ -86,12 +81,9 @@ def test_clearing_ensembles_view(
)
clear_all.click()

# wait a bit for the page to update
wait_a_bit(dash_duo)

# verify all plots are gone
plots = dash_duo.find_elements(".dash-graph")
assert len(plots) == 0
# Verify that the elements have been removed, using
# undocumented dash.testing feature (returns None).
_ = dash_duo.wait_for_no_elements(".dash-graph")

# verify no responses are selected
chosen_responses = dash_duo.find_elements(
Expand All @@ -105,8 +97,6 @@ def test_clearing_ensembles_view(
)
assert len(chosen_parameters) == 0

# assert dash_duo.get_logs() == [], "browser console should contain no error"


def test_axis_labels(mock_data, dash_duo):
"""test_axis_labels loads two different plots in the plot view and checks
Expand Down Expand Up @@ -137,5 +127,3 @@ def test_axis_labels(mock_data, dash_duo):

index_plot_id = plugin.uuid("FGPT")
dash_duo.wait_for_text_to_equal(f"#{index_plot_id} text.xtitle", "Index")

# assert dash_duo.get_logs() == [], "browser console should contain no error"
12 changes: 3 additions & 9 deletions tests/views/test_response_correlation.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,8 @@ def test_axes_labels(mock_data, dash_duo):

plot_id = plugin.uuid("response-overview")

# check that y axis label spells out "Value"; test is flaky so longer
# timeout.
dash_duo.wait_for_text_to_equal(f"#{plot_id} text.ytitle", "Value", timeout=20)
# check that y axis label spells out "Value".
dash_duo.wait_for_text_to_equal(f"#{plot_id} text.ytitle", "Value")

# check that one has date, the other has index as x axis label
if wanted_response == "FOPR":
Expand All @@ -50,8 +49,6 @@ def test_axes_labels(mock_data, dash_duo):
# wait for deselected option to reappear among available options
dash_duo.wait_for_contains_text(f"#{response_selector_id}", wanted_response)

# assert dash_duo.get_logs() == [], "browser console should contain no error"


def test_show_respo_with_obs(mock_data, dash_duo):
"""Test response observation filter works as expected"""
Expand All @@ -66,7 +63,6 @@ def test_show_respo_with_obs(mock_data, dash_duo):
dash_duo.wait_for_text_to_equal(
response_selector_id,
"\n".join(expected_responses_with_observations),
timeout=20,
)


Expand All @@ -86,6 +82,4 @@ def test_info_text_appears_as_expected(
expected_text = "".join(
[f"RESPONSE: {response}", f"INDEX: {index}", f"PARAMETER: {parameter}"]
)
dash_duo.wait_for_text_to_equal(info_text_selector, expected_text, timeout=20)

# assert dash_duo.get_logs() == [], "browser console should contain no error"
dash_duo.wait_for_text_to_equal(info_text_selector, expected_text)

0 comments on commit 16ffdaa

Please sign in to comment.