Skip to content

Commit

Permalink
fix(cli): fail CLI script on failed import/export (#16976)
Browse files Browse the repository at this point in the history
* Test that failing export or import is done properly

For each CLI entry-point we will modify, we make sure that:

- a failing process exits with a non-0 exit code,
- an error is logged.

Signed-off-by: Étienne Boisseau-Sierra <etienne.boisseau-sierra@unipart.io>

* Exit process with error if export/import failed

Bubble exception up when failing import or export

During a CLI import or export of dashboards, if the process fails, the
exception it caught and a simple message is sent to the logger.
This makes that from a shell point of view, the script was successfull —
cf. #16956.

To prevent this, we want to ensure that the process exits with an error
(i.e., a non-0 exit-code) should the export or import fail mid-flight.

Signed-off-by: Étienne Boisseau-Sierra <etienne.boisseau-sierra@unipart.io>
  • Loading branch information
EBoisseauSierra committed Oct 29, 2021
1 parent 9ef9adf commit f0c0ef7
Show file tree
Hide file tree
Showing 2 changed files with 168 additions and 9 deletions.
27 changes: 18 additions & 9 deletions superset/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,10 +181,10 @@ def load_examples_run(
@click.option("--load-test-data", "-t", is_flag=True, help="Load additional test data")
@click.option("--load-big-data", "-b", is_flag=True, help="Load additional big data")
@click.option(
"--only-metadata", "-m", is_flag=True, help="Only load metadata, skip actual data"
"--only-metadata", "-m", is_flag=True, help="Only load metadata, skip actual data",
)
@click.option(
"--force", "-f", is_flag=True, help="Force load data even if table already exists"
"--force", "-f", is_flag=True, help="Force load data even if table already exists",
)
def load_examples(
load_test_data: bool,
Expand All @@ -200,10 +200,10 @@ def load_examples(
@superset.command()
@click.argument("directory")
@click.option(
"--overwrite", "-o", is_flag=True, help="Overwriting existing metadata definitions"
"--overwrite", "-o", is_flag=True, help="Overwriting existing metadata definitions",
)
@click.option(
"--force", "-f", is_flag=True, help="Force load data even if table already exists"
"--force", "-f", is_flag=True, help="Force load data even if table already exists",
)
def import_directory(directory: str, overwrite: bool, force: bool) -> None:
"""Imports configs from a given directory"""
Expand Down Expand Up @@ -296,6 +296,7 @@ def export_dashboards(dashboard_file: Optional[str] = None) -> None:
"There was an error when exporting the dashboards, please check "
"the exception traceback in the log"
)
sys.exit(1)

@superset.command()
@with_appcontext
Expand Down Expand Up @@ -325,6 +326,7 @@ def export_datasources(datasource_file: Optional[str] = None) -> None:
"There was an error when exporting the datasets, please check "
"the exception traceback in the log"
)
sys.exit(1)

@superset.command()
@with_appcontext
Expand Down Expand Up @@ -360,6 +362,7 @@ def import_dashboards(path: str, username: Optional[str]) -> None:
"There was an error when importing the dashboards(s), please check "
"the exception traceback in the log"
)
sys.exit(1)

@superset.command()
@with_appcontext
Expand Down Expand Up @@ -387,14 +390,18 @@ def import_datasources(path: str) -> None:
"There was an error when importing the dataset(s), please check the "
"exception traceback in the log"
)
sys.exit(1)


else:

@superset.command()
@with_appcontext
@click.option(
"--dashboard-file", "-f", default=None, help="Specify the the file to export to"
"--dashboard-file",
"-f",
default=None,
help="Specify the the file to export to",
)
@click.option(
"--print_stdout",
Expand Down Expand Up @@ -514,6 +521,7 @@ def import_dashboards(path: str, recursive: bool, username: str) -> None:
ImportDashboardsCommand(contents).run()
except Exception: # pylint: disable=broad-except
logger.exception("Error when importing dashboard")
sys.exit(1)

@superset.command()
@with_appcontext
Expand Down Expand Up @@ -566,6 +574,7 @@ def import_datasources(path: str, sync: str, recursive: bool) -> None:
ImportDatasetsCommand(contents, sync_columns, sync_metrics).run()
except Exception: # pylint: disable=broad-except
logger.exception("Error when importing dataset")
sys.exit(1)

@superset.command()
@with_appcontext
Expand Down Expand Up @@ -609,7 +618,7 @@ def update_datasources_cache() -> None:
@superset.command()
@with_appcontext
@click.option(
"--workers", "-w", type=int, help="Number of celery server workers to fire up"
"--workers", "-w", type=int, help="Number of celery server workers to fire up",
)
def worker(workers: int) -> None:
"""Starts a Superset worker for async SQL query execution."""
Expand All @@ -631,10 +640,10 @@ def worker(workers: int) -> None:
@superset.command()
@with_appcontext
@click.option(
"-p", "--port", default="5555", help="Port on which to start the Flower process"
"-p", "--port", default="5555", help="Port on which to start the Flower process",
)
@click.option(
"-a", "--address", default="localhost", help="Address on which to run the service"
"-a", "--address", default="localhost", help="Address on which to run the service",
)
def flower(port: int, address: str) -> None:
"""Runs a Celery Flower web server
Expand Down Expand Up @@ -676,7 +685,7 @@ def flower(port: int, address: str) -> None:
help="Only process dashboards",
)
@click.option(
"--charts_only", "-c", is_flag=True, default=False, help="Only process charts"
"--charts_only", "-c", is_flag=True, default=False, help="Only process charts",
)
@click.option(
"--force",
Expand Down
150 changes: 150 additions & 0 deletions tests/integration_tests/cli_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

import importlib
import json
import logging
from pathlib import Path
from unittest import mock
from zipfile import is_zipfile, ZipFile
Expand All @@ -31,6 +32,19 @@
load_birth_names_dashboard_with_slices,
)

logger = logging.getLogger(__name__)


def assert_cli_fails_properly(response, caplog):
"""
Ensure that a CLI command fails according to a predefined behaviour.
"""
# don't exit successfully
assert response.exit_code != 0

# end the logs with a record on an error
assert caplog.records[-1].levelname == "ERROR"


@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_export_dashboards_original(app_context, fs):
Expand Down Expand Up @@ -107,6 +121,35 @@ def test_export_dashboards_versioned_export(app_context, fs):
assert is_zipfile("dashboard_export_20210101T000000.zip")


@mock.patch.dict(
"superset.config.DEFAULT_FEATURE_FLAGS", {"VERSIONED_EXPORT": True}, clear=True
)
@mock.patch(
"superset.dashboards.commands.export.ExportDashboardsCommand.run",
side_effect=Exception(),
)
def test_failing_export_dashboards_versioned_export(
export_dashboards_command, app_context, fs, caplog
):
"""
Test that failing to export ZIP file is done elegantly.
"""
caplog.set_level(logging.DEBUG)

# pylint: disable=reimported, redefined-outer-name
import superset.cli # noqa: F811

# reload to define export_dashboards correctly based on the
# feature flags
importlib.reload(superset.cli)

runner = app.test_cli_runner()
with freeze_time("2021-01-01T00:00:00Z"):
response = runner.invoke(superset.cli.export_dashboards, ())

assert_cli_fails_properly(response, caplog)


@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
@mock.patch.dict(
"superset.config.DEFAULT_FEATURE_FLAGS", {"VERSIONED_EXPORT": True}, clear=True
Expand All @@ -132,6 +175,33 @@ def test_export_datasources_versioned_export(app_context, fs):
assert is_zipfile("dataset_export_20210101T000000.zip")


@mock.patch.dict(
"superset.config.DEFAULT_FEATURE_FLAGS", {"VERSIONED_EXPORT": True}, clear=True
)
@mock.patch(
"superset.dashboards.commands.export.ExportDatasetsCommand.run",
side_effect=Exception(),
)
def test_failing_export_datasources_versioned_export(
export_dashboards_command, app_context, fs, caplog
):
"""
Test that failing to export ZIP file is done elegantly.
"""
# pylint: disable=reimported, redefined-outer-name
import superset.cli # noqa: F811

# reload to define export_dashboards correctly based on the
# feature flags
importlib.reload(superset.cli)

runner = app.test_cli_runner()
with freeze_time("2021-01-01T00:00:00Z"):
response = runner.invoke(superset.cli.export_datasources, ())

assert_cli_fails_properly(response, caplog)


@mock.patch.dict(
"superset.config.DEFAULT_FEATURE_FLAGS", {"VERSIONED_EXPORT": True}, clear=True
)
Expand Down Expand Up @@ -171,6 +241,46 @@ def test_import_dashboards_versioned_export(import_dashboards_command, app_conte
import_dashboards_command.assert_called_with(expected_contents, overwrite=True)


@mock.patch.dict(
"superset.config.DEFAULT_FEATURE_FLAGS", {"VERSIONED_EXPORT": True}, clear=True
)
@mock.patch(
"superset.dashboards.commands.importers.dispatcher.ImportDashboardsCommand.run",
side_effect=Exception(),
)
def test_failing_import_dashboards_versioned_export(
import_dashboards_command, app_context, fs, caplog
):
"""
Test that failing to import either ZIP and JSON is done elegantly.
"""
# pylint: disable=reimported, redefined-outer-name
import superset.cli # noqa: F811

# reload to define export_dashboards correctly based on the
# feature flags
importlib.reload(superset.cli)

# write JSON file
with open("dashboards.json", "w") as fp:
fp.write('{"hello": "world"}')

runner = app.test_cli_runner()
response = runner.invoke(superset.cli.import_dashboards, ("-p", "dashboards.json"))

assert_cli_fails_properly(response, caplog)

# write ZIP file
with ZipFile("dashboards.zip", "w") as bundle:
with bundle.open("dashboards/dashboard.yaml", "w") as fp:
fp.write(b"hello: world")

runner = app.test_cli_runner()
response = runner.invoke(superset.cli.import_dashboards, ("-p", "dashboards.zip"))

assert_cli_fails_properly(response, caplog)


@mock.patch.dict(
"superset.config.DEFAULT_FEATURE_FLAGS", {"VERSIONED_EXPORT": True}, clear=True
)
Expand Down Expand Up @@ -208,3 +318,43 @@ def test_import_datasets_versioned_export(import_datasets_command, app_context,
assert response.exit_code == 0
expected_contents = {"dataset.yaml": "hello: world"}
import_datasets_command.assert_called_with(expected_contents, overwrite=True)


@mock.patch.dict(
"superset.config.DEFAULT_FEATURE_FLAGS", {"VERSIONED_EXPORT": True}, clear=True
)
@mock.patch(
"superset.datasets.commands.importers.dispatcher.ImportDatasetsCommand.run",
side_effect=Exception(),
)
def test_failing_import_datasets_versioned_export(
import_datasets_command, app_context, fs, caplog
):
"""
Test that failing to import either ZIP or YAML is done elegantly.
"""
# pylint: disable=reimported, redefined-outer-name
import superset.cli # noqa: F811

# reload to define export_datasets correctly based on the
# feature flags
importlib.reload(superset.cli)

# write YAML file
with open("datasets.yaml", "w") as fp:
fp.write("hello: world")

runner = app.test_cli_runner()
response = runner.invoke(superset.cli.import_datasources, ("-p", "datasets.yaml"))

assert_cli_fails_properly(response, caplog)

# write ZIP file
with ZipFile("datasets.zip", "w") as bundle:
with bundle.open("datasets/dataset.yaml", "w") as fp:
fp.write(b"hello: world")

runner = app.test_cli_runner()
response = runner.invoke(superset.cli.import_datasources, ("-p", "datasets.zip"))

assert_cli_fails_properly(response, caplog)

0 comments on commit f0c0ef7

Please sign in to comment.