diff --git a/labelbox/schema/export_params.py b/labelbox/schema/export_params.py index af257cfdd..234be78d5 100644 --- a/labelbox/schema/export_params.py +++ b/labelbox/schema/export_params.py @@ -16,7 +16,7 @@ class DataRowParams(TypedDict): class ProjectExportParams(DataRowParams): project_details: Optional[bool] - labels: Optional[bool] + label_details: Optional[bool] performance_details: Optional[bool] diff --git a/labelbox/schema/model_run.py b/labelbox/schema/model_run.py index 48e118703..5d4223c65 100644 --- a/labelbox/schema/model_run.py +++ b/labelbox/schema/model_run.py @@ -487,10 +487,10 @@ def export_v2(self, # Arguments locked based on exectuion context "includeProjectDetails": False, - "includeLabels": - False, "includePerformanceDetails": False, + "includeLabelDetails": + False }, } } diff --git a/labelbox/schema/project.py b/labelbox/schema/project.py index 840dc0cd3..05f8f4646 100644 --- a/labelbox/schema/project.py +++ b/labelbox/schema/project.py @@ -406,8 +406,8 @@ def export_v2(self, "metadata_fields": False, "data_row_details": False, "project_details": False, - "labels": False, - "performance_details": False + "performance_details": False, + "label_details": False }) mutation_name = "exportDataRowsInProject" @@ -431,10 +431,10 @@ def export_v2(self, _params.get('data_row_details', False), "includeProjectDetails": _params.get('project_details', False), - "includeLabels": - _params.get('labels', False), "includePerformanceDetails": _params.get('performance_details', False), + "includeLabelDetails": + _params.get('label_details', False) }, } } diff --git a/tests/integration/annotation_import/test_model_run.py b/tests/integration/annotation_import/test_model_run.py index 00f627eca..3d0927fb5 100644 --- a/tests/integration/annotation_import/test_model_run.py +++ b/tests/integration/annotation_import/test_model_run.py @@ -117,7 +117,51 @@ def test_model_run_export_labels(model_run_with_model_run_data_rows): assert len(labels) == 3 -@pytest.mark.skip(reason="feature under development") +@pytest.mark.skipif(condition=os.environ['LABELBOX_TEST_ENVIRON'] == "onprem", + reason="does not work for onprem") +def test_model_run_status(model_run_with_model_run_data_rows): + + def get_model_run_status(): + return model_run_with_model_run_data_rows.client.execute( + """query trainingPipelinePyApi($modelRunId: ID!) { + trainingPipeline(where: {id : $modelRunId}) {status, errorMessage, metadata}} + """, {'modelRunId': model_run_with_model_run_data_rows.uid}, + experimental=True)['trainingPipeline'] + + model_run_status = get_model_run_status() + assert model_run_status['status'] is None + assert model_run_status['metadata'] is None + assert model_run_status['errorMessage'] is None + + status = "COMPLETE" + metadata = {'key1': 'value1'} + errorMessage = "an error" + model_run_with_model_run_data_rows.update_status(status, metadata, + errorMessage) + + model_run_status = get_model_run_status() + assert model_run_status['status'] == status + assert model_run_status['metadata'] == metadata + assert model_run_status['errorMessage'] == errorMessage + + extra_metadata = {'key2': 'value2'} + model_run_with_model_run_data_rows.update_status(status, extra_metadata) + model_run_status = get_model_run_status() + assert model_run_status['status'] == status + assert model_run_status['metadata'] == {**metadata, **extra_metadata} + assert model_run_status['errorMessage'] == errorMessage + + status = ModelRun.Status.FAILED + model_run_with_model_run_data_rows.update_status(status, metadata, + errorMessage) + model_run_status = get_model_run_status() + assert model_run_status['status'] == status.value + + with pytest.raises(ValueError): + model_run_with_model_run_data_rows.update_status( + "INVALID", metadata, errorMessage) + + def test_model_run_export_v2(model_run_with_model_run_data_rows, configured_project): task_name = "test_task" @@ -164,51 +208,6 @@ def download_result(result_url): assert prediction_id in label_ids_set -@pytest.mark.skipif(condition=os.environ['LABELBOX_TEST_ENVIRON'] == "onprem", - reason="does not work for onprem") -def test_model_run_status(model_run_with_model_run_data_rows): - - def get_model_run_status(): - return model_run_with_model_run_data_rows.client.execute( - """query trainingPipelinePyApi($modelRunId: ID!) { - trainingPipeline(where: {id : $modelRunId}) {status, errorMessage, metadata}} - """, {'modelRunId': model_run_with_model_run_data_rows.uid}, - experimental=True)['trainingPipeline'] - - model_run_status = get_model_run_status() - assert model_run_status['status'] is None - assert model_run_status['metadata'] is None - assert model_run_status['errorMessage'] is None - - status = "COMPLETE" - metadata = {'key1': 'value1'} - errorMessage = "an error" - model_run_with_model_run_data_rows.update_status(status, metadata, - errorMessage) - - model_run_status = get_model_run_status() - assert model_run_status['status'] == status - assert model_run_status['metadata'] == metadata - assert model_run_status['errorMessage'] == errorMessage - - extra_metadata = {'key2': 'value2'} - model_run_with_model_run_data_rows.update_status(status, extra_metadata) - model_run_status = get_model_run_status() - assert model_run_status['status'] == status - assert model_run_status['metadata'] == {**metadata, **extra_metadata} - assert model_run_status['errorMessage'] == errorMessage - - status = ModelRun.Status.FAILED - model_run_with_model_run_data_rows.update_status(status, metadata, - errorMessage) - model_run_status = get_model_run_status() - assert model_run_status['status'] == status.value - - with pytest.raises(ValueError): - model_run_with_model_run_data_rows.update_status( - "INVALID", metadata, errorMessage) - - def test_model_run_split_assignment(model_run, dataset, image_url): n_data_rows = 10 data_rows = dataset.create_data_rows([{ diff --git a/tests/integration/test_project.py b/tests/integration/test_project.py index 206cab208..68483b8d8 100644 --- a/tests/integration/test_project.py +++ b/tests/integration/test_project.py @@ -42,7 +42,6 @@ def test_project(client, rand_gen): assert project not in projects -@pytest.mark.skip(reason="feature under development") def test_project_export_v2(configured_project_with_label): project, _, _, label = configured_project_with_label label_id = label.uid