diff --git a/docs-requirements.txt b/docs-requirements.txt index 4e8a1003d7..b2814dc37e 100644 --- a/docs-requirements.txt +++ b/docs-requirements.txt @@ -1,5 +1,6 @@ docutils>=0.15.2,<0.17 pydata-sphinx-theme>=0.3.1 +astroid<=2.6.6 Sphinx>=2.0.1,<4.0.0 nbconvert>=5.5.0 nbsphinx>=0.8.5 diff --git a/docs/source/release_notes.rst b/docs/source/release_notes.rst index cde7647c97..4ab80cac75 100644 --- a/docs/source/release_notes.rst +++ b/docs/source/release_notes.rst @@ -13,6 +13,7 @@ Release Notes * Updated ``get_best_sampler_for_data`` to consider all non-numeric datatypes as categorical for SMOTE :pr:`2590` * Fixed inconsistent test results from `TargetDistributionDataCheck` :pr:`2608` * Adopted vectorized pd.NA checking for Woodwork 0.5.1 support :pr:`2626` + * Pinned upper version of astroid to 2.6.6 to keep ReadTheDocs working. :pr:`2638` * Changes * Renamed SMOTE samplers to SMOTE oversampler :pr:`2595` * Changed ``partial_dependence`` and ``graph_partial_dependence`` to raise a ``PartialDependenceError`` instead of ``ValueError``. This is not a breaking change because ``PartialDependenceError`` is a subclass of ``ValueError`` :pr:`2604` @@ -20,6 +21,7 @@ Release Notes * Documentation Changes * To avoid local docs build error, only add warning disable and download headers on ReadTheDocs builds, not locally :pr:`2617` * Testing Changes + * Updated partial_dependence tests to change the element-wise comparison per the Plotly 5.2.1 upgrade :pr:`2638` * Changed the lint CI job to only check against python 3.9 via the `-t` flag :pr:`2586` * Installed Prophet in linux nightlies test and fixed ``test_all_components`` :pr:`2598` * Refactored and fixed all ``make_pipeline`` tests to assert correct order and address new Woodwork Unknown type inference :pr:`2572` diff --git a/evalml/tests/model_understanding_tests/test_partial_dependence.py b/evalml/tests/model_understanding_tests/test_partial_dependence.py index 17c19ae150..d057b496e1 100644 --- a/evalml/tests/model_understanding_tests/test_partial_dependence.py +++ b/evalml/tests/model_understanding_tests/test_partial_dependence.py @@ -809,7 +809,7 @@ def test_graph_partial_dependence_regression_and_binary_categorical( ) plot_data = fig.to_dict()["data"][0] assert plot_data["type"] == "bar" - assert plot_data["x"] == ["0", "1", "2"] + assert list(plot_data["x"]) == ["0", "1", "2"] fig = graph_partial_dependence( pipeline, X, features=("0", "categorical_column"), grid_resolution=5 @@ -875,7 +875,7 @@ def test_partial_dependence_multiclass_categorical( for i, plot_data in enumerate(fig.to_dict()["data"]): assert plot_data["type"] == "bar" - assert plot_data["x"] == ["0", "1", "2"] + assert list(plot_data["x"]) == ["0", "1", "2"] if class_label is None: assert plot_data["name"] == f"class_{i}" else: @@ -1092,7 +1092,7 @@ def test_graph_partial_dependence_regression_and_binary_datetime( fig = graph_partial_dependence(pipeline, X, features="dt_column", grid_resolution=5) plot_data = fig.to_dict()["data"][0] assert plot_data["type"] == "scatter" - assert plot_data["x"] == list(pd.date_range("20200101", periods=5)) + assert list(plot_data["x"]) == list(pd.date_range("20200101", periods=5)) def test_graph_partial_dependence_regression_date_order(X_y_binary): @@ -1126,7 +1126,7 @@ def test_graph_partial_dependence_regression_date_order(X_y_binary): fig = graph_partial_dependence(pipeline, X, features="dt_column", grid_resolution=5) plot_data = fig.to_dict()["data"][0] assert plot_data["type"] == "scatter" - assert plot_data["x"] == list(pd.date_range("20200101", periods=5)) + assert list(plot_data["x"]) == list(pd.date_range("20200101", periods=5)) def test_partial_dependence_respect_grid_resolution(fraud_100):