diff --git a/darwin/cli.py b/darwin/cli.py index b1657154a..f72ed9fd8 100644 --- a/darwin/cli.py +++ b/darwin/cli.py @@ -6,6 +6,7 @@ from pathlib import Path import requests.exceptions +from darwin.datatypes import AnnotatorReportGrouping from rich.console import Console import darwin.cli_functions as f @@ -220,6 +221,19 @@ def _run(args: Namespace, parser: ArgumentParser) -> None: output=args.output, ) + # Report generation commands + elif args.command == "report": + if args.action == "annotators": + f.report_annotators( + args.datasets, + args.start, + args.stop, + [AnnotatorReportGrouping(value) for value in args.group_by], + args.pretty, + ) + elif args.action == "help" or args.action is None: + f.help(parser, "report") + if __name__ == "__main__": main() diff --git a/darwin/cli_functions.py b/darwin/cli_functions.py index 74d77e550..5c17b46bc 100644 --- a/darwin/cli_functions.py +++ b/darwin/cli_functions.py @@ -35,6 +35,7 @@ from darwin.dataset.upload_manager import LocalFile from darwin.dataset.utils import get_release_path from darwin.datatypes import ( + AnnotatorReportGrouping, ExportParser, ImportParser, NumberLike, @@ -1348,6 +1349,90 @@ def post_comment( console.print(f"[red]{traceback.format_exc()}") +def report_annotators( + dataset_slugs: list[str], + start: datetime.datetime, + stop: datetime.datetime, + group_by: list[AnnotatorReportGrouping], + pretty: bool, +) -> None: + """ + Prints an annotators report in CSV format. + + Parameters + ---------- + dataset_slugs : list[str] + Slugs of datasets to include in the report. + start : datetime.datetime + Timezone aware report start DateTime. + stop : datetime.datetime + Timezone aware report end DateTime. + group_by: list[AnnotatorReportGrouping] + Non-empty list of grouping options for the report. + pretty : bool + If ``True``, it will print the output in a Rich formatted table. + """ + client: Client = _load_client() + console = Console(theme=_console_theme()) + + dataset_ids = [] + for dataset in client.list_remote_datasets(): + if dataset.slug in dataset_slugs: + dataset_ids.append(dataset.dataset_id) + dataset_slugs.remove(dataset.slug) + + if dataset_slugs: + _error(f"Datasets '{dataset_slugs}' do not exist.") + + report: str = client.get_annotators_report( + dataset_ids, + start, + stop, + group_by, + ).text + + # the API does not return CSV headers if the report is empty + if not report: + report = "timestamp,dataset_id,dataset_name,dataset_slug,workflow_id,workflow_name,current_stage_id,current_stage_name,actor_id,actor_type,actor_email,actor_full_name,active_time,total_annotations,review_pass_rate,total_items_annotated,time_per_annotation,time_per_item\n" + + if not pretty: + print(report) + return + + lines: List[str] = report.split("\n") + lines.pop(0) # remove csv headers + lines.pop() # remove last line, which is empty + + table: Table = Table(show_header=True, header_style="bold cyan") + + table.add_column("Date") + for header in [ + "Dataset Id", + "Dataset Name", + "Dataset Slug", + "Workflow Id", + "Workflow Name", + "Current Stage Id", + "Current Stage Name", + "User Id", + "User Type", + "Email", + "Full Name", + "Active Time", + "Total Annotations", + "Review Pass Rate", + "Total Items Annotated", + "Time Per Annotation", + "Time Per Item", + ]: + table.add_column(header, justify="right") + + for row in lines: + table.add_row(*row.split(",")) + + console.print(table) + + def help(parser: argparse.ArgumentParser, subparser: Optional[str] = None) -> None: """ Prints the help text for the given command. diff --git a/darwin/client.py b/darwin/client.py index 0b847d810..19f8ec9fd 100644 --- a/darwin/client.py +++ b/darwin/client.py @@ -2,23 +2,36 @@ import logging import os import zlib +from datetime import datetime from logging import Logger from pathlib import Path from typing import Dict, Iterator, List, Optional, Union, cast -from requests.exceptions import HTTPError + import requests from requests import Response from requests.adapters import HTTPAdapter -from tenacity import RetryCallState, retry, stop_after_attempt, wait_exponential_jitter +from requests.exceptions import HTTPError +from tenacity import ( + RetryCallState, + retry, + retry_if_exception_type, + stop_after_attempt, + stop_after_delay, + wait_exponential_jitter, +) +from tenacity.wait import wait_exponential + from darwin.backend_v2 import BackendV2 from darwin.config import Config from darwin.dataset.identifier import DatasetIdentifier from darwin.dataset.remote_dataset import RemoteDataset from darwin.dataset.remote_dataset_v2 import RemoteDatasetV2 from darwin.datatypes import ( + AnnotatorReportGrouping, DarwinVersionNumber, Feature, ObjectStore, + ReportJob, Team, UnknownType, ) @@ -55,6 +68,12 @@ MAX_WAIT = int(os.getenv("DARWIN_RETRY_MAX_WAIT", "300")) MAX_RETRIES = int(os.getenv("DARWIN_RETRY_MAX_ATTEMPTS", "10")) +HOUR = 60 * 60 + + +class JobPendingException(Exception): + """Raised when a requested job is not finished or failed.""" + def log_rate_limit_exceeded(retry_state: RetryCallState): wait_time = retry_state.next_action.sleep @@ -633,6 +652,105 @@ def get_report( the_team_slug, ) + def get_annotators_report( + self, + dataset_ids: list[int], + start: datetime, + stop: datetime, + group_by: list[AnnotatorReportGrouping], + team_slug: Optional[str] = None, + ) -> Response: + """ + Gets annotators the report for the given dataset. + + Parameters + ---------- + dataset_ids: int + Ids of the datasets to include in the report. + start : datetime.datetime + Timezone aware report start DateTime + stop : datetime.datetime + Timezone aware report end DateTime + group_by: list[AnnotatorReportGrouping] + Non-empty list of grouping options for the report. + team_slug: Optional[str] + Team slug of the team the dataset will belong to. Defaults to None. + + Returns + ------ + Response + The raw response of the report (CSV format) or None if the Team was not found. + + Raises + ------ + ValueError + If no team was found. If start or stop parameters are not timezone aware. If no group_by options provided. + """ + if start.utcoffset() is None or stop.utcoffset() is None: + raise ValueError( + "start and stop parameters must be timezone aware (e.g. 2024-11-04T00:00:00Z)" + ) + + if not group_by: + raise ValueError( + f"At least one group_by option is required, any of: {[option.value for option in AnnotatorReportGrouping]}" + ) + + the_team: Optional[Team] = self.config.get_team(team_slug or self.default_team) + + if not the_team: + raise ValueError("No team was found.") + + the_team_slug: str = the_team.slug + + response_data = self._post( + f"/v3/teams/{the_team_slug}/reports/annotator/jobs", + { + "start": start.isoformat(timespec="seconds"), + "stop": stop.isoformat(timespec="seconds"), + "dataset_ids": dataset_ids, + "group_by": [option.value for option in group_by], + "format": "csv", + "metrics": [ + "active_time", + "total_annotations", + "total_items_annotated", + "time_per_item", + "time_per_annotation", + "review_pass_rate", + ], + }, + the_team_slug, + ) + report_job = ReportJob.model_validate(response_data) + + finished_report_job = self.poll_pending_report_job(the_team_slug, report_job.id) + assert isinstance(finished_report_job.url, str) + + return self._get_raw_from_full_url(finished_report_job.url, the_team_slug) + + @retry( + reraise=True, + wait=wait_exponential(max=MAX_WAIT), + stop=stop_after_delay(2 * HOUR), + retry=retry_if_exception_type(JobPendingException), + ) + def poll_pending_report_job(self, team_slug: str, job_id: str) -> ReportJob: + job_status_url = f"/v3/teams/{team_slug}/reports/annotator/jobs/{job_id}" + + response_data = self._get(job_status_url, team_slug) + report_job = ReportJob.model_validate(response_data) + + if report_job.status == "failed": + raise ValueError("Building an annotator report failed, try again later.") + + if report_job.status != "finished": + raise JobPendingException( + f"Polling for generated report results timed out, job status can be requested manually: {urljoin(self.url, job_status_url)}" + ) + + return report_job + def fetch_binary(self, url: str) -> Response: """ Fetches binary data from the given url via a stream. diff --git a/darwin/datatypes.py b/darwin/datatypes.py index 189a6ce94..c1b1046a0 100644 --- a/darwin/datatypes.py +++ b/darwin/datatypes.py @@ -1567,3 +1567,17 @@ class StorageKeyDictModel(BaseModel): class StorageKeyListModel(BaseModel): storage_keys: List[str] + + +class ReportJob(BaseModel): + id: str + status: str + format: str + url: str | None + team_id: int + + +class AnnotatorReportGrouping(str, Enum): + ANNOTATORS = "annotators" + DATASETS = "datasets" + STAGES = "stages" diff --git a/darwin/options.py b/darwin/options.py index 90b79f6e2..f3eb9aa2a 100644 --- a/darwin/options.py +++ b/darwin/options.py @@ -1,8 +1,10 @@ import sys from argparse import ArgumentParser, Namespace +from datetime import datetime from typing import Any, Optional, Tuple import argcomplete +from darwin.datatypes import AnnotatorReportGrouping class Options: @@ -543,6 +545,50 @@ def cpu_default_types(input: Any) -> Optional[int]: # type: ignore # Help dataset_action.add_parser("help", help="Show this help message and exit.") + # REPORT + report = subparsers.add_parser( + "report", + help="Report related functions.", + description="Arguments to interact with reports", + ) + report_action = report.add_subparsers(dest="action") + + # Annotators + parser_annotators = report_action.add_parser( + "annotators", help="Report about the annotators." + ) + parser_annotators.add_argument( + "--datasets", + default=[], + type=lambda csv: [value.strip() for value in csv.split(",")], + help="List of comma-separated dataset slugs to include in the report.", + ) + parser_annotators.add_argument( + "--start", + required=True, + type=lambda dt: datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S%z"), + help="Report start DateTime in RFC3339 format (e.g. 2020-01-20T14:00:00Z).", + ) + parser_annotators.add_argument( + "--stop", + required=True, + type=lambda dt: datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S%z"), + help="Report end DateTime in RFC3339 format (e.g. 2020-01-20T15:00:00Z).", + ) + parser_annotators.add_argument( + "--group-by", + required=True, + type=lambda csv: [value.strip() for value in csv.split(",")], + help=f"Non-empty list of comma-separated grouping options for the report, any of: f{[name.value for name in AnnotatorReportGrouping]}.", + ) + parser_annotators.add_argument( + "-r", + "--pretty", + action="store_true", + default=False, + help="Prints the results formatted in a rich table.", + ) + # VERSION subparsers.add_parser( "version", help="Check current version of the repository. " diff --git a/tests/darwin/cli_functions_test.py b/tests/darwin/cli_functions_test.py index 6f783b78e..ee13582a6 100644 --- a/tests/darwin/cli_functions_test.py +++ b/tests/darwin/cli_functions_test.py @@ -1,11 +1,14 @@ import builtins import sys +from datetime import datetime, timedelta, timezone from unittest.mock import call, patch +from darwin.datatypes import AnnotatorReportGrouping import pytest import responses from rich.console import Console +from darwin import cli from darwin.cli_functions import ( delete_files, extract_video_artifacts, @@ -16,6 +19,7 @@ from darwin.config import Config from darwin.dataset import RemoteDataset from darwin.dataset.remote_dataset_v2 import RemoteDatasetV2 +from darwin.options import Options from darwin.utils import BLOCKED_UPLOAD_ERROR_ALREADY_EXISTS from tests.fixtures import * @@ -448,3 +452,104 @@ def test_extract_video(self, tmp_path): repair=False, storage_key_prefix="test/prefix", ) + + +class TestReportAnnotators: + def test_parses_datetimes_and_comma_separated_lists( + self, remote_dataset: RemoteDataset + ): + start_date = datetime(2024, 11, 4, tzinfo=timezone.utc) + stop_date = datetime(2025, 5, 1, tzinfo=timezone(timedelta(hours=2))) + group_by = [ + AnnotatorReportGrouping.STAGES, + AnnotatorReportGrouping.ANNOTATORS, + AnnotatorReportGrouping.DATASETS, + ] + dataset_id = remote_dataset.dataset_id + test_args = [ + "darwin", + "report", + "annotators", + "--start", + "2024-11-04T00:00:00Z", + "--stop", + "2025-05-01T00:00:00+02:00", + "--group-by", + " stages, annotators,datasets", + "--datasets", + remote_dataset.slug, + ] + + with ( + patch.object(sys, "argv", test_args), + patch.object(Client, "list_remote_datasets", return_value=[remote_dataset]), + patch.object(Client, "get_annotators_report") as get_report_mock, + ): + args, parser = Options().parse_args() + cli._run(args, parser) + + get_report_mock.assert_called_once_with( + [dataset_id], + start_date, + stop_date, + group_by, + ) + + def test_exits_with_error_if_dataset_not_found( + self, remote_dataset: RemoteDataset, capsys + ): + test_args = [ + "darwin", + "report", + "annotators", + "--start", + "2024-11-04T00:00:00Z", + "--stop", + "2025-05-01T00:00:00+02:00", + "--group-by", + "stages,annotators,datasets", + "--datasets", + f"{remote_dataset.slug},non-existent-dataset", + ] + + with ( + patch.object(sys, "argv", test_args), + patch.object(Client, "list_remote_datasets", return_value=[remote_dataset]), + ): + args, parser = Options().parse_args() + + with pytest.raises(SystemExit): + cli._run(args, parser) + + captured = capsys.readouterr() + assert ( + "Error: Datasets '['non-existent-dataset']' do not exist." + in captured.out + ) + + def test_raises_if_invalid_grouping_option_supplied( + self, remote_dataset: RemoteDataset, capsys + ): + test_args = [ + "darwin", + "report", + "annotators", + "--start", + "2024-11-04T00:00:00Z", + "--stop", + "2025-05-01T00:00:00+02:00", + "--group-by", + "annotators,bad-grouping-option", + ] + + with ( + patch.object(sys, "argv", test_args), + patch.object(Client, "list_remote_datasets", return_value=[remote_dataset]), + ): + args, parser = Options().parse_args() + + with pytest.raises( + ValueError, + match="'bad-grouping-option' is not a valid AnnotatorReportGrouping", + ): + cli._run(args, parser) diff --git a/tests/darwin/client_test.py b/tests/darwin/client_test.py index 3bb3cc070..ad8cf43d8 100644 --- a/tests/darwin/client_test.py +++ b/tests/darwin/client_test.py @@ -1,10 +1,20 @@ +from datetime import datetime, timezone from pathlib import Path -from typing import List +from typing import List, Optional +from unittest.mock import Mock, patch import pytest import responses - -from darwin.client import Client +from requests import HTTPError, Response +from responses.registries import OrderedRegistry +from tenacity import RetryError, stop_after_attempt + +from darwin.client import ( + MAX_RETRIES, + Client, + JobPendingException, + AnnotatorReportGrouping, +) from darwin.config import Config from darwin.dataset.remote_dataset import RemoteDataset from darwin.dataset.remote_dataset_v2 import RemoteDatasetV2 @@ -15,12 +25,6 @@ from tests.fixtures import * # noqa: F401, F403 -from unittest.mock import Mock, patch -from requests import Response, HTTPError -from darwin.client import MAX_RETRIES -from tenacity import RetryError - - @pytest.fixture def darwin_client( darwin_config_path: Path, darwin_datasets_path: Path, team_slug_darwin_json_v2: str @@ -680,3 +684,291 @@ def test_retry_respects_rate_limit_headers(self, mock_sleep, client): assert mock_get.call_count == MAX_RETRIES assert mock_sleep.called + + +@pytest.mark.usefixtures("file_read_write_test") +class TestGetAnnotatorsReport: + @pytest.mark.parametrize( + "start,stop", + [ + (datetime(2024, 1, 1), datetime(2024, 1, 31, tzinfo=timezone.utc)), + (datetime(2024, 1, 1, tzinfo=timezone.utc), datetime(2024, 1, 31)), + (datetime(2024, 1, 1), datetime(2024, 1, 31)), + ], + ) + def test_raises_if_start_or_stop_datetime_not_timezone_aware( + self, + darwin_client: Client, + start: datetime, + stop: datetime, + ) -> None: + dataset_id = 123 + + with pytest.raises( + ValueError, match="start and stop parameters must be timezone aware" + ): + darwin_client.get_annotators_report( + [dataset_id], + start, + stop, + group_by=[], + ) + + def test_raises_if_no_group_by_options_provided( + self, darwin_client: Client + ) -> None: + dataset_id = 123 + start_time = datetime(2024, 1, 1, tzinfo=timezone.utc) + end_time = datetime(2024, 1, 31, tzinfo=timezone.utc) + + with pytest.raises( + ValueError, match="At least one group_by option is required" + ): + darwin_client.get_annotators_report( + [dataset_id], + start_time, + end_time, + group_by=[], + ) + + @responses.activate + def test_raises_if_failed_to_start_report_generation( + self, darwin_client: Client, team_slug_darwin_json_v2: str + ) -> None: + dataset_id = 123 + start_time = datetime(2024, 1, 1, tzinfo=timezone.utc) + end_time = datetime(2024, 1, 31, tzinfo=timezone.utc) + + start_job_endpoint_mock = responses.add( + responses.POST, + darwin_client.url + + f"/v3/teams/{team_slug_darwin_json_v2}/reports/annotator/jobs", + status=400, + ) + + with pytest.raises(HTTPError): + darwin_client.get_annotators_report( + [dataset_id], + start_time, + end_time, + [AnnotatorReportGrouping.ANNOTATORS], + team_slug_darwin_json_v2, + ) + + assert start_job_endpoint_mock.call_count == 1 + + @responses.activate + def test_raises_if_report_generation_job_fails( + self, darwin_client: Client, team_slug_darwin_json_v2: str + ) -> None: + dataset_id = 123 + start_time = datetime(2024, 1, 1, tzinfo=timezone.utc) + end_time = datetime(2024, 1, 31, tzinfo=timezone.utc) + job_id = "5332fd45-3084-46d0-9916-171f6e0f808e" + + start_job_endpoint_mock = self.job_start_endpoint_mock( + darwin_client.url, team_slug_darwin_json_v2, job_id, dataset_id + ) + get_job_status_endpoint_mock = self.job_status_endpoint_mock( + darwin_client.url, + team_slug_darwin_json_v2, + job_id, + job_status="failed", + ) + + with pytest.raises( + ValueError, match="Building an annotator report failed, try again later." + ): + darwin_client.get_annotators_report( + [dataset_id], + start_time, + end_time, + [AnnotatorReportGrouping.ANNOTATORS], + team_slug_darwin_json_v2, + ) + + assert start_job_endpoint_mock.call_count == 1 + assert get_job_status_endpoint_mock.call_count == 1 + + @responses.activate(registry=OrderedRegistry) + def test_raises_if_started_job_cannot_be_queried( + self, + darwin_client: Client, + team_slug_darwin_json_v2: str, + ) -> None: + dataset_id = 123 + start_time = datetime(2024, 1, 1, tzinfo=timezone.utc) + end_time = datetime(2024, 1, 31, tzinfo=timezone.utc) + job_id = "5332fd45-3084-46d0-9916-171f6e0f808e" + + start_job_endpoint_mock = self.job_start_endpoint_mock( + darwin_client.url, team_slug_darwin_json_v2, job_id, dataset_id + ) + get_job_status_endpoint_mock = self.job_status_endpoint_mock( + darwin_client.url, + team_slug_darwin_json_v2, + job_id, + status_code=404, + ) + + with pytest.raises(NotFound): + with patch("tenacity.nap.time.sleep", return_value=None) as mock_sleep: + darwin_client.get_annotators_report( + [dataset_id], + start_time, + end_time, + [AnnotatorReportGrouping.ANNOTATORS], + team_slug_darwin_json_v2, + ) + assert mock_sleep.call_count == 1 + + assert start_job_endpoint_mock.call_count == 1 + assert get_job_status_endpoint_mock.call_count == 1 + + @responses.activate() + def test_raises_and_provides_job_status_url_if_polling_times_out( + self, + darwin_client: Client, + team_slug_darwin_json_v2: str, + ) -> None: + dataset_id = 123 + start_time = datetime(2024, 1, 1, tzinfo=timezone.utc) + end_time = datetime(2024, 1, 31, tzinfo=timezone.utc) + job_id = "5332fd45-3084-46d0-9916-171f6e0f808e" + report_url = f"http://localhost/s/data/teams/1/reports/annotator_2025-03-14T23%3A00%3A00.000Z_{job_id}.csv" + + start_job_endpoint_mock = self.job_start_endpoint_mock( + darwin_client.url, team_slug_darwin_json_v2, job_id, dataset_id + ) + get_job_status_endpoint_mock = self.job_status_endpoint_mock( + darwin_client.url, team_slug_darwin_json_v2, job_id, "running" + ) + get_report_file_endpoint_mock = responses.add( + responses.GET, report_url, status=200 + ) + + with pytest.raises( + JobPendingException, match="timed out, job status can be requested manually" + ): + with patch("tenacity.nap.time.sleep", return_value=None) as mock_sleep: + darwin_client.poll_pending_report_job.retry.stop = stop_after_attempt(2) + darwin_client.get_annotators_report( + [dataset_id], + start_time, + end_time, + [AnnotatorReportGrouping.ANNOTATORS], + team_slug_darwin_json_v2, + ) + assert mock_sleep.call_count == 1 + + assert start_job_endpoint_mock.call_count == 1 + assert get_job_status_endpoint_mock.call_count == 2 + assert get_report_file_endpoint_mock.call_count == 0 + + @pytest.mark.parametrize("job_status", ["pending", "running"]) + @responses.activate(registry=OrderedRegistry) + def test_retries_if_report_generation_job_is_not_finished( + self, darwin_client: Client, team_slug_darwin_json_v2: str, job_status: str + ) -> None: + dataset_id = 123 + start_time = datetime(2024, 1, 1, tzinfo=timezone.utc) + end_time = datetime(2024, 1, 31, tzinfo=timezone.utc) + job_id = "5332fd45-3084-46d0-9916-171f6e0f808e" + report_url = f"http://localhost/s/data/teams/1/reports/annotator_2025-03-14T23%3A00%3A00.000Z_{job_id}.csv" + + start_job_endpoint_mock = self.job_start_endpoint_mock( + darwin_client.url, team_slug_darwin_json_v2, job_id, dataset_id + ) + get_job_status_endpoint_mock = self.job_status_endpoint_mock( + darwin_client.url, team_slug_darwin_json_v2, job_id, job_status + ) + get_job_status_endpoint_mock_2 = self.job_status_endpoint_mock( + darwin_client.url, + team_slug_darwin_json_v2, + job_id, + job_status="finished", + report_url=report_url, + ) + get_report_file_endpoint_mock = responses.add( + responses.GET, + report_url, + body="some,csv,report", + status=200, + ) + + with patch("tenacity.nap.time.sleep", return_value=None) as mock_sleep: + darwin_client.get_annotators_report( + [dataset_id], + start_time, + end_time, + [AnnotatorReportGrouping.ANNOTATORS], + team_slug_darwin_json_v2, + ) + assert mock_sleep.call_count == 1 + + assert start_job_endpoint_mock.call_count == 1 + assert get_job_status_endpoint_mock.call_count == 1 + assert get_job_status_endpoint_mock_2.call_count == 1 + assert get_report_file_endpoint_mock.call_count == 1 + + def job_start_endpoint_mock( + self, + url: str, + team_slug: str, + job_id: str, + dataset_id: int, + status_code: int = 200, + ): + return responses.add( + responses.POST, + url + f"/v3/teams/{team_slug}/reports/annotator/jobs", + json={ + "id": job_id, + "status": "pending", + "format": "csv", + "url": None, + "team_id": 1, + }, + status=status_code, + match=[ + responses.matchers.json_params_matcher( + { + "start": "2024-01-01T00:00:00+00:00", + "stop": "2024-01-31T00:00:00+00:00", + "dataset_ids": [dataset_id], + "group_by": ["annotators"], + "format": "csv", + "metrics": [ + "active_time", + "total_annotations", + "total_items_annotated", + "time_per_item", + "time_per_annotation", + "review_pass_rate", + ], + }, + ), + ], + ) + + def job_status_endpoint_mock( + self, + url: str, + team_slug: str, + job_id: str, + job_status: str = "pending", + status_code: int = 200, + report_url: Optional[str] = None, + ): + return responses.add( + responses.GET, + url + f"/v3/teams/{team_slug}/reports/annotator/jobs/{job_id}", + json={ + "id": job_id, + "status": job_status, + "format": "csv", + "url": report_url, + "team_id": 1, + }, + status=status_code, + )