Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Send exec status independent of report permissions #561

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 18 additions & 13 deletions cli/medperf/commands/dataset/prepare.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,8 +165,7 @@ def setup_parameters(self):

def run_prepare(self):
report_sender = ReportSender(self)
if self.allow_sending_reports:
report_sender.start()
report_sender.start()

prepare_params = {
"data_path": self.raw_data_path,
Expand All @@ -190,18 +189,15 @@ def run_prepare(self):
)
except Exception as e:
# Inform the server that a failure occured
if self.allow_sending_reports:
report_sender.stop("failed")
report_sender.stop("failed")
raise e
except KeyboardInterrupt:
except KeyboardInterrupt as e:
# Inform the server that the process is interrupted
if self.allow_sending_reports:
report_sender.stop("interrupted")
raise
report_sender.stop("interrupted")
raise e

self.ui.print("> Cube execution complete")
if self.allow_sending_reports:
report_sender.stop("finished")
report_sender.stop("finished")

def run_sanity_check(self):
sanity_check_timeout = config.sanity_check_timeout
Expand Down Expand Up @@ -313,8 +309,12 @@ def prompt_for_report_sending_approval(self):
dict_pretty_print(example)

msg = (
" \nDo you approve the automatic submission of summaries similar to the one above"
+ " to the MedPerf Server throughout the preparation process?[Y/n]"
"\nYou can decide whether or not to send information about your dataset preparation"
+ "\nProgress. Keep in mind that information about the execution status of the pipeline"
+ "\nwill be sent regardless (whether the pipeline is running, finished or failed)"
+ "\nto identify issues with the preparation procedure. Do you approve the automatic"
+ "\nsubmission of summaries similar to the one above to the MedPerf Server throughout"
+ "\nthe preparation process?[Y/n]"
)

self.allow_sending_reports = approval_prompt(msg)
Expand All @@ -327,7 +327,12 @@ def send_report(self, report_metadata):
return self._send_report(report_metadata)

def _send_report(self, report_metadata):
report_status_dict = self.__generate_report_dict()
if self.dataset.for_test:
# Test datasets don't have a registration on the server
return
report_status_dict = {}
if self.allow_sending_reports:
report_status_dict = self.__generate_report_dict()
report = {"progress": report_status_dict, **report_metadata}
if report == self.dataset.report:
# Watchdog may trigger an event even if contents didn't change
Expand Down
64 changes: 38 additions & 26 deletions cli/medperf/tests/commands/dataset/test_prepare.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

from medperf.tests.mocks.dataset import TestDataset
from medperf.tests.mocks.cube import TestCube
from medperf.commands.dataset.prepare import DataPreparation
from medperf.commands.dataset.prepare import DataPreparation, Observer

PATCH_REGISTER = "medperf.commands.dataset.prepare.{}"

Expand Down Expand Up @@ -54,37 +54,58 @@ def test_get_prep_cube_downloads_cube_file(mocker, data_preparation, cube):
spy.assert_called_once()


@pytest.mark.parametrize("dataset_for_test", [False, True])
def test_prepare_with_test_data_doesnt_send_reports(
mocker, data_preparation, dataset_for_test, cube, comms, fs
):
# Arrange
data_preparation.dataset.for_test = dataset_for_test
mocker.patch.object(cube, "run")
mocker.patch.object(data_preparation.dataset, "write")
mocked_obs = mocker.create_autospec(spec=Observer)
mocker.patch(PATCH_REGISTER.format("Observer", side_effect=mocked_obs))
send_report_spy = mocker.patch.object(comms, "update_dataset")

# Act
data_preparation.run_prepare()

# Assert
if dataset_for_test:
send_report_spy.assert_not_called()
else:
send_report_spy.assert_called()


@pytest.mark.parametrize("allow_sending_reports", [False, True])
def test_prepare_runs_then_stops_report_handler(
mocker, data_preparation, allow_sending_reports, cube
mocker, data_preparation, allow_sending_reports, cube, comms, fs
):
# Arrange
data_preparation.allow_sending_reports = allow_sending_reports
mocker.patch.object(cube, "run")
start_spy = mocker.patch(PATCH_REGISTER.format("ReportSender.start"))
stop_spy = mocker.patch(PATCH_REGISTER.format("ReportSender.stop"))
mocker.patch.object(data_preparation.dataset, "write")
mocked_obs = mocker.create_autospec(spec=Observer)
mocker.patch(PATCH_REGISTER.format("Observer"), side_effect=mocked_obs)
gen_report_spy = mocker.patch(PATCH_REGISTER.format("DataPreparation._DataPreparation__generate_report_dict"))

# Act
data_preparation.run_prepare()

# Assert
if allow_sending_reports:
start_spy.assert_called_once()
stop_spy.assert_called_once_with("finished")
gen_report_spy.assert_called()
else:
start_spy.assert_not_called()
stop_spy.assert_not_called()
gen_report_spy.assert_not_called()


@pytest.mark.parametrize("allow_sending_reports", [False, True])
def test_prepare_runs_then_stops_report_handler_on_failure(
mocker, data_preparation, allow_sending_reports, cube
mocker, data_preparation, cube
):
# Arrange
def _failure_run(*args, **kwargs):
raise Exception()

data_preparation.allow_sending_reports = allow_sending_reports
data_preparation.allow_sending_reports = True
mocker.patch.object(cube, "run", side_effect=_failure_run)
start_spy = mocker.patch(PATCH_REGISTER.format("ReportSender.start"))
stop_spy = mocker.patch(PATCH_REGISTER.format("ReportSender.stop"))
Expand All @@ -94,23 +115,18 @@ def _failure_run(*args, **kwargs):
data_preparation.run_prepare()

# Assert
if allow_sending_reports:
start_spy.assert_called_once()
stop_spy.assert_called_once_with("failed")
else:
start_spy.assert_not_called()
stop_spy.assert_not_called()
start_spy.assert_called_once()
stop_spy.assert_called_once_with("failed")


@pytest.mark.parametrize("allow_sending_reports", [False, True])
def test_prepare_runs_then_stops_report_handler_on_interrupt(
mocker, data_preparation, allow_sending_reports, cube
mocker, data_preparation, cube
):
# Arrange
def _failure_run(*args, **kwargs):
raise KeyboardInterrupt()

data_preparation.allow_sending_reports = allow_sending_reports
data_preparation.allow_sending_reports = True
mocker.patch.object(cube, "run", side_effect=_failure_run)
start_spy = mocker.patch(PATCH_REGISTER.format("ReportSender.start"))
stop_spy = mocker.patch(PATCH_REGISTER.format("ReportSender.stop"))
Expand All @@ -120,12 +136,8 @@ def _failure_run(*args, **kwargs):
data_preparation.run_prepare()

# Assert
if allow_sending_reports:
start_spy.assert_called_once()
stop_spy.assert_called_once_with("interrupted")
else:
start_spy.assert_not_called()
stop_spy.assert_not_called()
start_spy.assert_called_once()
stop_spy.assert_called_once_with("interrupted")


@pytest.mark.parametrize("report_specified", [False, True])
Expand Down
Loading