From 9963a0f3067724cc801eb7131f523a9a2873c81d Mon Sep 17 00:00:00 2001 From: Val Brodsky Date: Wed, 1 Mar 2023 14:29:34 -0800 Subject: [PATCH 1/4] Add logic to upload labels by project id to model run --- labelbox/schema/model_run.py | 55 ++++++++++++++++++++++++++++++++---- 1 file changed, 49 insertions(+), 6 deletions(-) diff --git a/labelbox/schema/model_run.py b/labelbox/schema/model_run.py index 214a2faf5..18a5ad932 100644 --- a/labelbox/schema/model_run.py +++ b/labelbox/schema/model_run.py @@ -47,22 +47,41 @@ class Status(Enum): COMPLETE = "COMPLETE" FAILED = "FAILED" - def upsert_labels(self, label_ids, timeout_seconds=3600): + def upsert_labels(self, + label_ids: Optional[List[str]] = None, + project_id: Optional[str] = None, + timeout_seconds=3600): """ Adds data rows and labels to a Model Run Args: label_ids (list): label ids to insert + project_id (string): project uuid, all project labels will be uploaded timeout_seconds (float): Max waiting time, in seconds. Returns: ID of newly generated async task """ - if len(label_ids) < 1: - raise ValueError("Must provide at least one label id") + use_label_ids = label_ids is not None and len(label_ids) > 0 + use_project_id = project_id is not None + if not use_label_ids and not use_project_id: + raise ValueError("Must provide at least one label id or a project id") + + if use_label_ids and use_project_id: + raise ValueError("Must only one of label ids, project id") + + if use_label_ids: + return self._upsert_labels_by_label_ids(label_ids) + else: # use_project_id + return self._upsert_labels_by_project_id(project_id) + + + + def _upsert_labels_by_label_ids(self, + label_ids: List[str]): mutation_name = 'createMEAModelRunLabelRegistrationTask' create_task_query_str = """mutation createMEAModelRunLabelRegistrationTaskPyApi($modelRunId: ID!, $labelIds : [ID!]!) { - %s(where : { id : $modelRunId}, data : {labelIds: $labelIds})} - """ % (mutation_name) + %s(where : { id : $modelRunId}, data : {labelIds: $labelIds})} + """ % (mutation_name) res = self.client.execute(create_task_query_str, { 'modelRunId': self.uid, @@ -78,7 +97,31 @@ def upsert_labels(self, label_ids, timeout_seconds=3600): status_query_str, {'where': { 'id': task_id }})['MEALabelRegistrationTaskStatus'], - timeout_seconds=timeout_seconds) + timeout_seconds=timeout_seconds) + + def _upsert_labels_by_project_id(self, + project_id: str): + mutation_name = 'createMEAModelRunProjectLabelRegistrationTask' + create_task_query_str = """mutation createMEAModelRunProjectLabelRegistrationTaskPyApi($modelRunId: ID!, $projectId : ID!) { + %s(where : { modelRunId : $modelRunId, projectId: $projectId}} + """ % (mutation_name) + + res = self.client.execute(create_task_query_str, { + 'modelRunId': self.uid, + 'projectId': project_id + }) + task_id = res[mutation_name] + + status_query_str = """query MEALabelRegistrationTaskStatusPyApi($where: WhereUniqueIdInput!){ + MEALabelRegistrationTaskStatus(where: $where) {status errorMessage} + } + """ + return self._wait_until_done(lambda: self.client.execute( + status_query_str, {'where': { + 'id': task_id + }})['MEALabelRegistrationTaskStatus'], + timeout_seconds=timeout_seconds) + def upsert_data_rows(self, data_row_ids=None, From 303a3be64aecabfc9a6fe63d8ddc3ca88b2fb373 Mon Sep 17 00:00:00 2001 From: Val Brodsky Date: Thu, 2 Mar 2023 12:47:57 -0800 Subject: [PATCH 2/4] Add tests --- labelbox/schema/model_run.py | 8 +++++--- tests/integration/annotation_import/conftest.py | 14 ++++++++++++++ .../test_mea_prediction_import.py | 13 +++++++++++++ 3 files changed, 32 insertions(+), 3 deletions(-) diff --git a/labelbox/schema/model_run.py b/labelbox/schema/model_run.py index 18a5ad932..b9f51e1fe 100644 --- a/labelbox/schema/model_run.py +++ b/labelbox/schema/model_run.py @@ -77,7 +77,8 @@ def upsert_labels(self, def _upsert_labels_by_label_ids(self, - label_ids: List[str]): + label_ids: List[str], + timeout_seconds=3600): mutation_name = 'createMEAModelRunLabelRegistrationTask' create_task_query_str = """mutation createMEAModelRunLabelRegistrationTaskPyApi($modelRunId: ID!, $labelIds : [ID!]!) { %s(where : { id : $modelRunId}, data : {labelIds: $labelIds})} @@ -100,10 +101,11 @@ def _upsert_labels_by_label_ids(self, timeout_seconds=timeout_seconds) def _upsert_labels_by_project_id(self, - project_id: str): + project_id: str, + timeout_seconds=3600): mutation_name = 'createMEAModelRunProjectLabelRegistrationTask' create_task_query_str = """mutation createMEAModelRunProjectLabelRegistrationTaskPyApi($modelRunId: ID!, $projectId : ID!) { - %s(where : { modelRunId : $modelRunId, projectId: $projectId}} + %s(where : { modelRunId : $modelRunId, projectId: $projectId})} """ % (mutation_name) res = self.client.execute(create_task_query_str, { diff --git a/tests/integration/annotation_import/conftest.py b/tests/integration/annotation_import/conftest.py index 424373d58..2c3296da3 100644 --- a/tests/integration/annotation_import/conftest.py +++ b/tests/integration/annotation_import/conftest.py @@ -439,6 +439,20 @@ def model_run_with_model_run_data_rows(client, configured_project, model_run.delete() # TODO: Delete resources when that is possible .. +@pytest.fixture +def model_run_with_model_run_all_project_data_rows(client, configured_project, + model_run_predictions, model_run): + configured_project.enable_model_assisted_labeling() + + upload_task = LabelImport.create_from_objects( + client, configured_project.uid, f"label-import-{uuid.uuid4()}", + model_run_predictions) + upload_task.wait_until_done() + model_run.upsert_labels(project_id=configured_project.uid) + time.sleep(3) + yield model_run + model_run.delete() + # TODO: Delete resources when that is possible .. class AnnotationImportTestHelpers: diff --git a/tests/integration/annotation_import/test_mea_prediction_import.py b/tests/integration/annotation_import/test_mea_prediction_import.py index 58a90fc38..78966147d 100644 --- a/tests/integration/annotation_import/test_mea_prediction_import.py +++ b/tests/integration/annotation_import/test_mea_prediction_import.py @@ -37,6 +37,19 @@ def test_create_from_objects(model_run_with_model_run_data_rows, annotation_import.input_file_url, object_predictions) annotation_import.wait_until_done() +def test_create_from_objects_all_project_labels(model_run_with_model_run_all_project_data_rows, + object_predictions, + annotation_import_test_helpers): + name = str(uuid.uuid4()) + + annotation_import = model_run_with_model_run_all_project_data_rows.add_predictions( + name=name, predictions=object_predictions) + + assert annotation_import.model_run_id == model_run_with_model_run_all_project_data_rows.uid + annotation_import_test_helpers.check_running_state(annotation_import, name) + annotation_import_test_helpers.assert_file_content( + annotation_import.input_file_url, object_predictions) + annotation_import.wait_until_done() def test_create_from_label_objects(model_run_with_model_run_data_rows, object_predictions, From f1633c034cc7365aeb223e1a23ec8556abc536ff Mon Sep 17 00:00:00 2001 From: Val Brodsky Date: Thu, 2 Mar 2023 13:00:19 -0800 Subject: [PATCH 3/4] YAPF --- labelbox/schema/model_run.py | 26 +++++++++---------- .../integration/annotation_import/conftest.py | 5 +++- .../test_mea_prediction_import.py | 8 +++--- 3 files changed, 21 insertions(+), 18 deletions(-) diff --git a/labelbox/schema/model_run.py b/labelbox/schema/model_run.py index b9f51e1fe..1046cfe78 100644 --- a/labelbox/schema/model_run.py +++ b/labelbox/schema/model_run.py @@ -48,9 +48,9 @@ class Status(Enum): FAILED = "FAILED" def upsert_labels(self, - label_ids: Optional[List[str]] = None, - project_id: Optional[str] = None, - timeout_seconds=3600): + label_ids: Optional[List[str]] = None, + project_id: Optional[str] = None, + timeout_seconds=3600): """ Adds data rows and labels to a Model Run Args: label_ids (list): label ids to insert @@ -64,21 +64,20 @@ def upsert_labels(self, use_project_id = project_id is not None if not use_label_ids and not use_project_id: - raise ValueError("Must provide at least one label id or a project id") + raise ValueError( + "Must provide at least one label id or a project id") if use_label_ids and use_project_id: raise ValueError("Must only one of label ids, project id") if use_label_ids: return self._upsert_labels_by_label_ids(label_ids) - else: # use_project_id + else: # use_project_id return self._upsert_labels_by_project_id(project_id) - - def _upsert_labels_by_label_ids(self, - label_ids: List[str], - timeout_seconds=3600): + label_ids: List[str], + timeout_seconds=3600): mutation_name = 'createMEAModelRunLabelRegistrationTask' create_task_query_str = """mutation createMEAModelRunLabelRegistrationTaskPyApi($modelRunId: ID!, $labelIds : [ID!]!) { %s(where : { id : $modelRunId}, data : {labelIds: $labelIds})} @@ -98,11 +97,11 @@ def _upsert_labels_by_label_ids(self, status_query_str, {'where': { 'id': task_id }})['MEALabelRegistrationTaskStatus'], - timeout_seconds=timeout_seconds) + timeout_seconds=timeout_seconds) def _upsert_labels_by_project_id(self, - project_id: str, - timeout_seconds=3600): + project_id: str, + timeout_seconds=3600): mutation_name = 'createMEAModelRunProjectLabelRegistrationTask' create_task_query_str = """mutation createMEAModelRunProjectLabelRegistrationTaskPyApi($modelRunId: ID!, $projectId : ID!) { %s(where : { modelRunId : $modelRunId, projectId: $projectId})} @@ -122,8 +121,7 @@ def _upsert_labels_by_project_id(self, status_query_str, {'where': { 'id': task_id }})['MEALabelRegistrationTaskStatus'], - timeout_seconds=timeout_seconds) - + timeout_seconds=timeout_seconds) def upsert_data_rows(self, data_row_ids=None, diff --git a/tests/integration/annotation_import/conftest.py b/tests/integration/annotation_import/conftest.py index 2c3296da3..4d4ef243c 100644 --- a/tests/integration/annotation_import/conftest.py +++ b/tests/integration/annotation_import/conftest.py @@ -439,9 +439,11 @@ def model_run_with_model_run_data_rows(client, configured_project, model_run.delete() # TODO: Delete resources when that is possible .. + @pytest.fixture def model_run_with_model_run_all_project_data_rows(client, configured_project, - model_run_predictions, model_run): + model_run_predictions, + model_run): configured_project.enable_model_assisted_labeling() upload_task = LabelImport.create_from_objects( @@ -454,6 +456,7 @@ def model_run_with_model_run_all_project_data_rows(client, configured_project, model_run.delete() # TODO: Delete resources when that is possible .. + class AnnotationImportTestHelpers: @classmethod diff --git a/tests/integration/annotation_import/test_mea_prediction_import.py b/tests/integration/annotation_import/test_mea_prediction_import.py index 78966147d..bbaf9ec29 100644 --- a/tests/integration/annotation_import/test_mea_prediction_import.py +++ b/tests/integration/annotation_import/test_mea_prediction_import.py @@ -37,9 +37,10 @@ def test_create_from_objects(model_run_with_model_run_data_rows, annotation_import.input_file_url, object_predictions) annotation_import.wait_until_done() -def test_create_from_objects_all_project_labels(model_run_with_model_run_all_project_data_rows, - object_predictions, - annotation_import_test_helpers): + +def test_create_from_objects_all_project_labels( + model_run_with_model_run_all_project_data_rows, object_predictions, + annotation_import_test_helpers): name = str(uuid.uuid4()) annotation_import = model_run_with_model_run_all_project_data_rows.add_predictions( @@ -51,6 +52,7 @@ def test_create_from_objects_all_project_labels(model_run_with_model_run_all_pro annotation_import.input_file_url, object_predictions) annotation_import.wait_until_done() + def test_create_from_label_objects(model_run_with_model_run_data_rows, object_predictions, annotation_import_test_helpers): From 978cb460e8e809e40f5cac41761d6ea84ea59df9 Mon Sep 17 00:00:00 2001 From: Val Brodsky Date: Sun, 5 Mar 2023 08:05:38 -0800 Subject: [PATCH 4/4] PR: added extra test to verify project labels in a model run --- labelbox/schema/model_run.py | 17 ++++++----- .../integration/annotation_import/conftest.py | 5 ++-- .../test_mea_prediction_import.py | 29 +++++++++++++++++-- 3 files changed, 37 insertions(+), 14 deletions(-) diff --git a/labelbox/schema/model_run.py b/labelbox/schema/model_run.py index 1046cfe78..6bf993a37 100644 --- a/labelbox/schema/model_run.py +++ b/labelbox/schema/model_run.py @@ -55,9 +55,11 @@ def upsert_labels(self, Args: label_ids (list): label ids to insert project_id (string): project uuid, all project labels will be uploaded + Either label_ids OR project_id is required but NOT both timeout_seconds (float): Max waiting time, in seconds. Returns: ID of newly generated async task + """ use_label_ids = label_ids is not None and len(label_ids) > 0 @@ -71,13 +73,13 @@ def upsert_labels(self, raise ValueError("Must only one of label ids, project id") if use_label_ids: - return self._upsert_labels_by_label_ids(label_ids) + return self._upsert_labels_by_label_ids(label_ids, timeout_seconds) else: # use_project_id - return self._upsert_labels_by_project_id(project_id) + return self._upsert_labels_by_project_id(project_id, + timeout_seconds) - def _upsert_labels_by_label_ids(self, - label_ids: List[str], - timeout_seconds=3600): + def _upsert_labels_by_label_ids(self, label_ids: List[str], + timeout_seconds: int): mutation_name = 'createMEAModelRunLabelRegistrationTask' create_task_query_str = """mutation createMEAModelRunLabelRegistrationTaskPyApi($modelRunId: ID!, $labelIds : [ID!]!) { %s(where : { id : $modelRunId}, data : {labelIds: $labelIds})} @@ -99,9 +101,8 @@ def _upsert_labels_by_label_ids(self, }})['MEALabelRegistrationTaskStatus'], timeout_seconds=timeout_seconds) - def _upsert_labels_by_project_id(self, - project_id: str, - timeout_seconds=3600): + def _upsert_labels_by_project_id(self, project_id: str, + timeout_seconds: int): mutation_name = 'createMEAModelRunProjectLabelRegistrationTask' create_task_query_str = """mutation createMEAModelRunProjectLabelRegistrationTaskPyApi($modelRunId: ID!, $projectId : ID!) { %s(where : { modelRunId : $modelRunId, projectId: $projectId})} diff --git a/tests/integration/annotation_import/conftest.py b/tests/integration/annotation_import/conftest.py index 4d4ef243c..e8cc860e1 100644 --- a/tests/integration/annotation_import/conftest.py +++ b/tests/integration/annotation_import/conftest.py @@ -441,9 +441,8 @@ def model_run_with_model_run_data_rows(client, configured_project, @pytest.fixture -def model_run_with_model_run_all_project_data_rows(client, configured_project, - model_run_predictions, - model_run): +def model_run_with_all_project_labels(client, configured_project, + model_run_predictions, model_run): configured_project.enable_model_assisted_labeling() upload_task = LabelImport.create_from_objects( diff --git a/tests/integration/annotation_import/test_mea_prediction_import.py b/tests/integration/annotation_import/test_mea_prediction_import.py index bbaf9ec29..afca122b6 100644 --- a/tests/integration/annotation_import/test_mea_prediction_import.py +++ b/tests/integration/annotation_import/test_mea_prediction_import.py @@ -39,20 +39,43 @@ def test_create_from_objects(model_run_with_model_run_data_rows, def test_create_from_objects_all_project_labels( - model_run_with_model_run_all_project_data_rows, object_predictions, + model_run_with_all_project_labels, object_predictions, annotation_import_test_helpers): name = str(uuid.uuid4()) - annotation_import = model_run_with_model_run_all_project_data_rows.add_predictions( + annotation_import = model_run_with_all_project_labels.add_predictions( name=name, predictions=object_predictions) - assert annotation_import.model_run_id == model_run_with_model_run_all_project_data_rows.uid + assert annotation_import.model_run_id == model_run_with_all_project_labels.uid annotation_import_test_helpers.check_running_state(annotation_import, name) annotation_import_test_helpers.assert_file_content( annotation_import.input_file_url, object_predictions) annotation_import.wait_until_done() +def test_model_run_project_labels(model_run_with_all_project_labels, + model_run_predictions): + model_run = model_run_with_all_project_labels + model_run_exported_labels = model_run.export_labels(download=True) + labels_indexed_by_schema_id = {} + for label in model_run_exported_labels: + # assuming exported array of label 'objects' has only one label per data row... as usually is when there are no label revisions + schema_id = label['Label']['objects'][0]['schemaId'] + labels_indexed_by_schema_id[schema_id] = label + + assert (len( + labels_indexed_by_schema_id.keys())) == len(model_run_predictions) + + # making sure the labels are in this model run are all labels uploaded to the project + # by comparing some 'immutable' attributes + for expected_label in model_run_predictions: + schema_id = expected_label['schemaId'] + actual_label = labels_indexed_by_schema_id[schema_id] + assert actual_label['Label']['objects'][0]['title'] == expected_label[ + 'name'] + assert actual_label['DataRow ID'] == expected_label['dataRow']['id'] + + def test_create_from_label_objects(model_run_with_model_run_data_rows, object_predictions, annotation_import_test_helpers):