From b9f2fee758f53415c6022219b5f4076035de16bd Mon Sep 17 00:00:00 2001 From: Yamini Kancharana Date: Wed, 2 Aug 2023 13:29:24 -0700 Subject: [PATCH 1/8] [AL-5942] exports integration tests --- .../test_mea_prediction_import.py | 1 + .../annotation_import/test_model_run.py | 49 ---- tests/integration/conftest.py | 18 +- tests/integration/export_v2/conftest.py | 258 ++++++++++++++++++ tests/integration/export_v2/test_data_rows.py | 24 ++ tests/integration/export_v2/test_dataset.py | 31 +++ .../integration/export_v2/test_exceptions.py | 34 +++ .../test_export_v2_video.py | 0 .../test_legacy_export.py} | 87 ++++++ tests/integration/export_v2/test_model_run.py | 50 ++++ tests/integration/export_v2/test_project.py | 180 ++++++++++++ .../integration/{ => export_v2}/test_slice.py | 2 +- .../test_data_row_media_attributes.py | 9 - tests/integration/test_issues.py | 20 -- tests/integration/test_label.py | 15 - tests/integration/test_project.py | 8 +- tests/integration/test_task.py | 1 + 17 files changed, 686 insertions(+), 101 deletions(-) create mode 100644 tests/integration/export_v2/conftest.py create mode 100644 tests/integration/export_v2/test_data_rows.py create mode 100644 tests/integration/export_v2/test_dataset.py create mode 100644 tests/integration/export_v2/test_exceptions.py rename tests/integration/{annotation_import => export_v2}/test_export_v2_video.py (100%) rename tests/integration/{test_export.py => export_v2/test_legacy_export.py} (62%) create mode 100644 tests/integration/export_v2/test_model_run.py create mode 100644 tests/integration/export_v2/test_project.py rename tests/integration/{ => export_v2}/test_slice.py (92%) delete mode 100644 tests/integration/test_data_row_media_attributes.py delete mode 100644 tests/integration/test_issues.py diff --git a/tests/integration/annotation_import/test_mea_prediction_import.py b/tests/integration/annotation_import/test_mea_prediction_import.py index c82701b4e..2871fba47 100644 --- a/tests/integration/annotation_import/test_mea_prediction_import.py +++ b/tests/integration/annotation_import/test_mea_prediction_import.py @@ -80,6 +80,7 @@ def test_create_from_objects_all_project_labels( def test_model_run_project_labels(model_run_with_all_project_labels, model_run_predictions): model_run = model_run_with_all_project_labels + # TODO: Move to export_v2 model_run_exported_labels = model_run.export_labels(download=True) labels_indexed_by_schema_id = {} for label in model_run_exported_labels: diff --git a/tests/integration/annotation_import/test_model_run.py b/tests/integration/annotation_import/test_model_run.py index 89fc16a19..5b8cd2474 100644 --- a/tests/integration/annotation_import/test_model_run.py +++ b/tests/integration/annotation_import/test_model_run.py @@ -7,23 +7,6 @@ from labelbox import DataSplit, ModelRun -def _model_run_export_v2_results(model_run, task_name, params, num_retries=5): - """Export model run results and retry if no results are returned.""" - while (num_retries > 0): - task = model_run.export_v2(task_name, params=params) - assert task.name == task_name - task.wait_till_done() - assert task.status == "COMPLETE" - assert task.errors is None - task_results = task.result - if len(task_results) == 0: - num_retries -= 1 - time.sleep(5) - else: - return task_results - return [] - - def test_model_run(client, configured_project_with_label, data_row, rand_gen): project, _, _, label = configured_project_with_label label_id = label.uid @@ -182,38 +165,6 @@ def get_model_run_status(): errorMessage) -def test_model_run_export_v2(model_run_with_data_rows, configured_project): - task_name = "test_task" - media_attributes = True - params = {"media_attributes": media_attributes, "predictions": True} - task_results = _model_run_export_v2_results(model_run_with_data_rows, - task_name, params) - label_ids = [label.uid for label in configured_project.labels()] - label_ids_set = set(label_ids) - - assert len(task_results) == len(label_ids) - - for task_result in task_results: - # Check export param handling - if media_attributes: - assert 'media_attributes' in task_result and task_result[ - 'media_attributes'] is not None - else: - assert 'media_attributes' not in task_result or task_result[ - 'media_attributes'] is None - model_run = task_result['experiments'][ - model_run_with_data_rows.model_id]['runs'][ - model_run_with_data_rows.uid] - task_label_ids_set = set( - map(lambda label: label['id'], model_run['labels'])) - task_prediction_ids_set = set( - map(lambda prediction: prediction['id'], model_run['predictions'])) - for label_id in task_label_ids_set: - assert label_id in label_ids_set - for prediction_id in task_prediction_ids_set: - assert prediction_id in label_ids_set - - def test_model_run_split_assignment_by_data_row_ids(model_run, dataset, image_url): n_data_rows = 10 diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 04153a914..3e0651706 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -11,7 +11,7 @@ import requests from labelbox import Client, MediaType -from labelbox import LabelingFrontend +from labelbox import LabelingFrontend, Dataset from labelbox import OntologyBuilder, Tool, Option, Classification, MediaType from labelbox.orm import query from labelbox.pagination import PaginatedCollection @@ -22,6 +22,7 @@ from labelbox.schema.user import User IMG_URL = "https://picsum.photos/200/300.jpg" +SMALL_DATASET_URL = "https://storage.googleapis.com/diagnostics-demo-data/coco/COCO_train2014_000000000034.jpg" DATA_ROW_PROCESSING_WAIT_TIMEOUT_SECONDS = 30 DATA_ROW_PROCESSING_WAIT_SLEEP_INTERNAL_SECONDS = 5 @@ -248,7 +249,20 @@ def unique_dataset(client, rand_gen): @pytest.fixture -def data_row(dataset, image_url, rand_gen): +def small_dataset(dataset: Dataset): + task = dataset.create_data_rows([ + { + "row_data": SMALL_DATASET_URL, + "external_id": "my-image" + }, + ] * 3) + task.wait_till_done() + + yield dataset + + +@pytest.fixture +def data_row(dataset, image_url): task = dataset.create_data_rows([ { "row_data": image_url, diff --git a/tests/integration/export_v2/conftest.py b/tests/integration/export_v2/conftest.py new file mode 100644 index 000000000..b0edf4512 --- /dev/null +++ b/tests/integration/export_v2/conftest.py @@ -0,0 +1,258 @@ +import pytest +from labelbox.schema.queue_mode import QueueMode +from labelbox.schema.media_type import MediaType +from labelbox.schema.labeling_frontend import LabelingFrontend + + +@pytest.fixture +def ontology(): + bbox_tool_with_nested_text = { + 'required': + False, + 'name': + 'bbox_tool_with_nested_text', + 'tool': + 'rectangle', + 'color': + '#a23030', + 'classifications': [{ + 'required': + False, + 'instructions': + 'nested', + 'name': + 'nested', + 'type': + 'radio', + 'options': [{ + 'label': + 'radio_option_1', + 'value': + 'radio_value_1', + 'options': [{ + 'required': + False, + 'instructions': + 'nested_checkbox', + 'name': + 'nested_checkbox', + 'type': + 'checklist', + 'options': [{ + 'label': 'nested_checkbox_option_1', + 'value': 'nested_checkbox_value_1', + 'options': [] + }, { + 'label': 'nested_checkbox_option_2', + 'value': 'nested_checkbox_value_2' + }] + }, { + 'required': False, + 'instructions': 'nested_text', + 'name': 'nested_text', + 'type': 'text', + 'options': [] + }] + },] + }] + } + + bbox_tool = { + 'required': + False, + 'name': + 'bbox', + 'tool': + 'rectangle', + 'color': + '#a23030', + 'classifications': [{ + 'required': + False, + 'instructions': + 'nested', + 'name': + 'nested', + 'type': + 'radio', + 'options': [{ + 'label': + 'radio_option_1', + 'value': + 'radio_value_1', + 'options': [{ + 'required': + False, + 'instructions': + 'nested_checkbox', + 'name': + 'nested_checkbox', + 'type': + 'checklist', + 'options': [{ + 'label': 'nested_checkbox_option_1', + 'value': 'nested_checkbox_value_1', + 'options': [] + }, { + 'label': 'nested_checkbox_option_2', + 'value': 'nested_checkbox_value_2' + }] + }] + },] + }] + } + + polygon_tool = { + 'required': False, + 'name': 'polygon', + 'tool': 'polygon', + 'color': '#FF34FF', + 'classifications': [] + } + polyline_tool = { + 'required': False, + 'name': 'polyline', + 'tool': 'line', + 'color': '#FF4A46', + 'classifications': [] + } + point_tool = { + 'required': False, + 'name': 'point--', + 'tool': 'point', + 'color': '#008941', + 'classifications': [] + } + entity_tool = { + 'required': False, + 'name': 'entity--', + 'tool': 'named-entity', + 'color': '#006FA6', + 'classifications': [] + } + segmentation_tool = { + 'required': False, + 'name': 'segmentation--', + 'tool': 'superpixel', + 'color': '#A30059', + 'classifications': [] + } + raster_segmentation_tool = { + 'required': False, + 'name': 'segmentation_mask', + 'tool': 'raster-segmentation', + 'color': '#ff0000', + 'classifications': [] + } + checklist = { + 'required': + False, + 'instructions': + 'checklist', + 'name': + 'checklist', + 'type': + 'checklist', + 'options': [{ + 'label': 'option1', + 'value': 'option1' + }, { + 'label': 'option2', + 'value': 'option2' + }, { + 'label': 'optionN', + 'value': 'optionn' + }] + } + checklist_index = { + 'required': + False, + 'instructions': + 'checklist_index', + 'name': + 'checklist_index', + 'type': + 'checklist', + 'scope': + 'index', + 'options': [{ + 'label': 'option1_index', + 'value': 'option1_index' + }, { + 'label': 'option2_index', + 'value': 'option2_index' + }, { + 'label': 'optionN_index', + 'value': 'optionn_index' + }] + } + free_form_text = { + 'required': False, + 'instructions': 'text', + 'name': 'text', + 'type': 'text', + 'options': [] + } + free_form_text_index = { + 'required': False, + 'instructions': 'text_index', + 'name': 'text_index', + 'type': 'text', + 'scope': 'index', + 'options': [] + } + radio = { + 'required': + False, + 'instructions': + 'radio', + 'name': + 'radio', + 'type': + 'radio', + 'options': [{ + 'label': 'first_radio_answer', + 'value': 'first_radio_answer', + 'options': [] + }, { + 'label': 'second_radio_answer', + 'value': 'second_radio_answer', + 'options': [] + }] + } + named_entity = { + 'tool': 'named-entity', + 'name': 'named-entity', + 'required': False, + 'color': '#A30059', + 'classifications': [], + } + + tools = [ + bbox_tool, + bbox_tool_with_nested_text, + polygon_tool, + polyline_tool, + point_tool, + entity_tool, + segmentation_tool, + raster_segmentation_tool, + named_entity, + ] + classifications = [ + checklist, checklist_index, free_form_text, free_form_text_index, radio + ] + return {"tools": tools, "classifications": classifications} + + +@pytest.fixture +def configured_project_without_data_rows(client, ontology, rand_gen): + project = client.create_project(name=rand_gen(str), + description=rand_gen(str), + queue_mode=QueueMode.Batch) + editor = list( + client.get_labeling_frontends( + where=LabelingFrontend.name == "editor"))[0] + project.setup(editor, ontology) + yield project + project.delete() diff --git a/tests/integration/export_v2/test_data_rows.py b/tests/integration/export_v2/test_data_rows.py new file mode 100644 index 000000000..edd79cfcf --- /dev/null +++ b/tests/integration/export_v2/test_data_rows.py @@ -0,0 +1,24 @@ +import time +from labelbox import DataRow +from labelbox.schema.media_type import MediaType + + +def test_export_data_rows(client, data_row, wait_for_data_row_processing): + # Ensure created data rows are indexed + data_row = wait_for_data_row_processing(client, data_row) + time.sleep(7) # temp fix for ES indexing delay + params = { + "include_performance_details": True, + "include_labels": True, + "media_type_override": MediaType.Image, + "project_details": True, + "data_row_details": True + } + task = DataRow.export_v2(client=client, data_rows=[data_row]) + task.wait_till_done() + assert task.status == "COMPLETE" + assert task.errors is None + assert len(task.result) == 1 + assert task.result[0]["data_row"]["id"] == data_row.uid + assert task.result[0]["data_row"]["external_id"] == data_row.external_id + assert task.result[0]["data_row"]["row_data"] == data_row.row_data diff --git a/tests/integration/export_v2/test_dataset.py b/tests/integration/export_v2/test_dataset.py new file mode 100644 index 000000000..d965c3ff8 --- /dev/null +++ b/tests/integration/export_v2/test_dataset.py @@ -0,0 +1,31 @@ +import pytest + + +@pytest.mark.parametrize('data_rows', [5], indirect=True) +def test_dataset_export_v2(export_v2_test_helpers, dataset, data_rows): + data_row_ids = [dr.uid for dr in data_rows] + params = {"performance_details": False, "label_details": False} + task_results = export_v2_test_helpers.run_dataset_export_v2_task( + dataset, params=params) + assert len(task_results) == len(data_row_ids) + assert set([dr['data_row']['id'] for dr in task_results + ]) == set(data_row_ids) + + +@pytest.mark.parametrize('data_rows', [5], indirect=True) +def test_dataset_export_v2_datarow_list(export_v2_test_helpers, dataset, + data_rows): + datarow_filter_size = 2 + data_row_ids = [dr.uid for dr in data_rows] + + params = {"performance_details": False, "label_details": False} + filters = {"data_row_ids": data_row_ids[:datarow_filter_size]} + + task_results = export_v2_test_helpers.run_dataset_export_v2_task( + dataset, filters=filters, params=params) + + # only 2 datarows should be exported + assert len(task_results) == datarow_filter_size + # only filtered datarows should be exported + assert set([dr['data_row']['id'] for dr in task_results + ]) == set(data_row_ids[:datarow_filter_size]) diff --git a/tests/integration/export_v2/test_exceptions.py b/tests/integration/export_v2/test_exceptions.py new file mode 100644 index 000000000..c2ce2bb5f --- /dev/null +++ b/tests/integration/export_v2/test_exceptions.py @@ -0,0 +1,34 @@ +import pytest + +from labelbox.schema.queue_mode import QueueMode +import labelbox.exceptions + + +@pytest.fixture +def project(client, rand_gen): + # project with no media type + project = client.create_project(name=rand_gen(str), + queue_mode=QueueMode.Batch, + media_type=None) + yield project + project.delete() + + +@pytest.skip +def test_export_v2_without_media_type(client, export_v2_test_helpers, + wait_for_data_row_processing): + data_row = wait_for_data_row_processing(client, data_row) + project.media_type = None + + task_name = "test_label_export_v2_without_media_type" + params = { + "include_performance_details": True, + "include_labels": True, + "media_type_override": None, + "project_details": True, + "data_row_details": True + } + with pytest.raises(labelbox.exceptions.LabelboxError): + export_v2_test_helpers.run_project_export_v2_task(project, + task_name=task_name, + params=params) diff --git a/tests/integration/annotation_import/test_export_v2_video.py b/tests/integration/export_v2/test_export_v2_video.py similarity index 100% rename from tests/integration/annotation_import/test_export_v2_video.py rename to tests/integration/export_v2/test_export_v2_video.py diff --git a/tests/integration/test_export.py b/tests/integration/export_v2/test_legacy_export.py similarity index 62% rename from tests/integration/test_export.py rename to tests/integration/export_v2/test_legacy_export.py index c415c644e..51c54063a 100644 --- a/tests/integration/test_export.py +++ b/tests/integration/export_v2/test_legacy_export.py @@ -1,8 +1,14 @@ import uuid import datetime +import time +import requests +import pytest from labelbox.data.annotation_types.annotation import ObjectAnnotation from labelbox.schema.annotation_import import LabelImport +from labelbox import Dataset, Project + +IMAGE_URL = "https://storage.googleapis.com/diagnostics-demo-data/coco/COCO_train2014_000000000034.jpg" def test_export_annotations_nested_checklist( @@ -151,3 +157,84 @@ def test_export_filtered_activity(client, last_activity_end=(datetime.datetime.now() - datetime.timedelta(days=1)).strftime("%Y-%m-%d")) assert len(empty_export) == 0 + + +def test_export_data_rows(project: Project, dataset: Dataset): + n_data_rows = 2 + task = dataset.create_data_rows([ + { + "row_data": IMAGE_URL, + "external_id": "my-image" + }, + ] * n_data_rows) + task.wait_till_done() + + data_rows = [dr.uid for dr in list(dataset.export_data_rows())] + batch = project.create_batch("batch test", data_rows) + + result = list(batch.export_data_rows()) + exported_data_rows = [dr.uid for dr in result] + + assert len(result) == n_data_rows + assert set(data_rows) == set(exported_data_rows) + + +def test_queued_data_row_export(configured_project): + result = configured_project.export_queued_data_rows() + assert len(result) == 1 + + +def test_label_export(configured_project_with_label): + project, _, _, label = configured_project_with_label + label_id = label.uid + # Wait for exporter to retrieve latest labels + time.sleep(10) + + # TODO: Move to export_v2 + exported_labels_url = project.export_labels() + assert exported_labels_url is not None + exported_labels = requests.get(exported_labels_url) + labels = [example['ID'] for example in exported_labels.json()] + assert labels[0] == label_id + #TODO: Add test for bulk export back. + # The new exporter doesn't work with the create_label mutation + + +def test_issues_export(project): + exported_issues_url = project.export_issues() + assert exported_issues_url + + exported_issues_url = project.export_issues("Open") + assert exported_issues_url + assert "?status=Open" in exported_issues_url + + exported_issues_url = project.export_issues("Resolved") + assert exported_issues_url + assert "?status=Resolved" in exported_issues_url + + invalidStatusValue = "Closed" + with pytest.raises(ValueError) as exc_info: + exported_issues_url = project.export_issues(invalidStatusValue) + assert "status must be in" in str(exc_info.value) + assert "Found %s" % (invalidStatusValue) in str(exc_info.value) + + +def test_dataset_export(dataset, image_url): + n_data_rows = 5 + ids = set() + for _ in range(n_data_rows): + ids.add(dataset.create_data_row(row_data=image_url)) + result = list(dataset.export_data_rows()) + assert len(result) == n_data_rows + assert set(result) == ids + + +def test_data_row_export_with_empty_media_attributes( + client, configured_project_with_label, wait_for_data_row_processing): + project, _, data_row, _ = configured_project_with_label + data_row = wait_for_data_row_processing(client, data_row) + labels = list(project.label_generator()) + assert len( + labels + ) == 1, "Label export job unexpectedly returned an empty result set`" + assert labels[0].data.media_attributes == {} diff --git a/tests/integration/export_v2/test_model_run.py b/tests/integration/export_v2/test_model_run.py new file mode 100644 index 000000000..caaef2e7f --- /dev/null +++ b/tests/integration/export_v2/test_model_run.py @@ -0,0 +1,50 @@ +import time + + +def _model_run_export_v2_results(model_run, task_name, params, num_retries=5): + """Export model run results and retry if no results are returned.""" + while (num_retries > 0): + task = model_run.export_v2(task_name, params=params) + assert task.name == task_name + task.wait_till_done() + assert task.status == "COMPLETE" + assert task.errors is None + task_results = task.result + if len(task_results) == 0: + num_retries -= 1 + time.sleep(5) + else: + return task_results + return [] + + +def test_model_run_export_v2(model_run_with_data_rows, configured_project): + task_name = "test_task" + media_attributes = True + params = {"media_attributes": media_attributes, "predictions": True} + task_results = _model_run_export_v2_results(model_run_with_data_rows, + task_name, params) + label_ids = [label.uid for label in configured_project.labels()] + label_ids_set = set(label_ids) + + assert len(task_results) == len(label_ids) + + for task_result in task_results: + # Check export param handling + if media_attributes: + assert 'media_attributes' in task_result and task_result[ + 'media_attributes'] is not None + else: + assert 'media_attributes' not in task_result or task_result[ + 'media_attributes'] is None + model_run = task_result['experiments'][ + model_run_with_data_rows.model_id]['runs'][ + model_run_with_data_rows.uid] + task_label_ids_set = set( + map(lambda label: label['id'], model_run['labels'])) + task_prediction_ids_set = set( + map(lambda prediction: prediction['id'], model_run['predictions'])) + for label_id in task_label_ids_set: + assert label_id in label_ids_set + for prediction_id in task_prediction_ids_set: + assert prediction_id in label_ids_set diff --git a/tests/integration/export_v2/test_project.py b/tests/integration/export_v2/test_project.py new file mode 100644 index 000000000..f9d4bbb36 --- /dev/null +++ b/tests/integration/export_v2/test_project.py @@ -0,0 +1,180 @@ +from datetime import datetime, timezone, timedelta + +import pytest + +from labelbox.schema.media_type import MediaType + +IMAGE_URL = "https://storage.googleapis.com/diagnostics-demo-data/coco/COCO_train2014_000000000034.jpg" + + +def test_project_export_v2(client, export_v2_test_helpers, + configured_project_with_label, + wait_for_data_row_processing): + project, dataset, data_row, label = configured_project_with_label + data_row = wait_for_data_row_processing(client, data_row) + label_id = label.uid + + task_name = "test_label_export_v2" + params = { + "include_performance_details": True, + "include_labels": True, + "media_type_override": MediaType.Image, + "project_details": True, + "data_row_details": True + } + + task_results = export_v2_test_helpers.run_project_export_v2_task( + project, task_name=task_name, params=params) + + for task_result in task_results: + task_media_attributes = task_result['media_attributes'] + task_project = task_result['projects'][project.uid] + task_project_label_ids_set = set( + map(lambda prediction: prediction['id'], task_project['labels'])) + task_project_details = task_project['project_details'] + task_data_row = task_result['data_row'] + task_data_row_details = task_data_row['details'] + + assert label_id in task_project_label_ids_set + # data row + assert task_data_row['id'] == data_row.uid + assert task_data_row['external_id'] == data_row.external_id + assert task_data_row['row_data'] == data_row.row_data + + # data row details + assert task_data_row_details['dataset_id'] == dataset.uid + assert task_data_row_details['dataset_name'] == dataset.name + + actual_time = datetime.fromisoformat( + task_data_row_details['created_at']) + expected_time = datetime.fromisoformat( + dataset.created_at.strftime("%Y-%m-%dT%H:%M:%S.%f")) + actual_time = actual_time.replace(tzinfo=timezone.utc) + expected_time = expected_time.replace(tzinfo=timezone.utc) + tolerance = timedelta(seconds=2) + assert abs(actual_time - expected_time) <= tolerance + + assert task_data_row_details['last_activity_at'] is not None + assert task_data_row_details['created_by'] is not None + + # media attributes + assert task_media_attributes['mime_type'] == data_row.media_attributes[ + 'mimeType'] + + # project name and details + assert task_project['name'] == project.name + batch = next(project.batches()) + assert task_project_details['batch_id'] == batch.uid + assert task_project_details['batch_name'] == batch.name + assert task_project_details['priority'] is not None + assert task_project_details[ + 'consensus_expected_label_count'] is not None + assert task_project_details['workflow_history'] is not None + + # label details + assert task_project['labels'][0]['id'] == label_id + + +def test_project_export_v2_date_filters(client, export_v2_test_helpers, + configured_project_with_label, + wait_for_data_row_processing): + project, _, data_row, label = configured_project_with_label + data_row = wait_for_data_row_processing(client, data_row) + label_id = label.uid + + task_name = "test_label_export_v2_date_filters" + + filters = { + "last_activity_at": ["2000-01-01 00:00:00", "2050-01-01 00:00:00"], + "label_created_at": ["2000-01-01 00:00:00", "2050-01-01 00:00:00"] + } + + # TODO: Right now we don't have a way to test this + include_performance_details = True + params = { + "include_performance_details": include_performance_details, + "include_labels": True, + "media_type_override": MediaType.Image + } + + task_results = export_v2_test_helpers.run_project_export_v2_task( + project, task_name=task_name, filters=filters, params=params) + + for task_result in task_results: + task_project = task_result['projects'][project.uid] + task_project_label_ids_set = set( + map(lambda prediction: prediction['id'], task_project['labels'])) + assert label_id in task_project_label_ids_set + + # TODO: Add back in when we have a way to test this + # if include_performance_details: + # assert 'include_performance_details' in task_result and task_result[ + # 'include_performance_details'] is not None + # else: + # assert 'include_performance_details' not in task_result or task_result[ + # 'include_performance_details'] is None + + filters = {"last_activity_at": [None, "2050-01-01 00:00:00"]} + export_v2_test_helpers.run_project_export_v2_task(project, filters=filters) + + filters = {"label_created_at": ["2000-01-01 00:00:00", None]} + export_v2_test_helpers.run_project_export_v2_task(project, filters=filters) + + +def test_project_export_v2_with_iso_date_filters(client, export_v2_test_helpers, + configured_project_with_label, + wait_for_data_row_processing): + project, _, data_row, label = configured_project_with_label + data_row = wait_for_data_row_processing(client, data_row) + label_id = label.uid + + task_name = "test_label_export_v2_with_iso_date_filters" + + filters = { + "last_activity_at": [ + "2000-01-01T00:00:00+0230", "2050-01-01T00:00:00+0230" + ], + "label_created_at": [ + "2000-01-01T00:00:00+0230", "2050-01-01T00:00:00+0230" + ] + } + task_results = export_v2_test_helpers.run_project_export_v2_task( + project, task_name=task_name, filters=filters) + assert label_id == task_results[0]['projects'][ + project.uid]['labels'][0]['id'] + + filters = {"last_activity_at": [None, "2050-01-01T00:00:00+0230"]} + task_results = export_v2_test_helpers.run_project_export_v2_task( + project, task_name=task_name, filters=filters) + assert label_id == task_results[0]['projects'][ + project.uid]['labels'][0]['id'] + + filters = {"label_created_at": ["2050-01-01T00:00:00+0230", None]} + task_results = export_v2_test_helpers.run_project_export_v2_task( + project, task_name=task_name, filters=filters) + assert len(task_results) == 0 + + +@pytest.mark.parametrize("data_rows", [3], indirect=True) +def test_project_export_v2_datarow_filter( + export_v2_test_helpers, + configured_batch_project_with_multiple_datarows): + project, _, data_rows = configured_batch_project_with_multiple_datarows + + data_row_ids = [dr.uid for dr in data_rows] + datarow_filter_size = 2 + + filters = { + "last_activity_at": ["2000-01-01 00:00:00", "2050-01-01 00:00:00"], + "label_created_at": ["2000-01-01 00:00:00", "2050-01-01 00:00:00"], + "data_row_ids": data_row_ids[:datarow_filter_size] + } + params = {"data_row_details": True, "media_type_override": MediaType.Image} + task_results = export_v2_test_helpers.run_project_export_v2_task( + project, filters=filters, params=params) + + # only 2 datarows should be exported + assert len(task_results) == datarow_filter_size + # only filtered datarows should be exported + assert set([dr['data_row']['id'] for dr in task_results + ]) == set(data_row_ids[:datarow_filter_size]) diff --git a/tests/integration/test_slice.py b/tests/integration/export_v2/test_slice.py similarity index 92% rename from tests/integration/test_slice.py rename to tests/integration/export_v2/test_slice.py index 9b1727393..2caa6b227 100644 --- a/tests/integration/test_slice.py +++ b/tests/integration/export_v2/test_slice.py @@ -5,7 +5,7 @@ 'Skipping until we have a way to create slices programatically') def test_export_v2_slice(client): # Since we don't have CRUD for slices, we'll just use the one that's already there - SLICE_ID = "clfgqf1c72mk107zx6ypo9bse" + SLICE_ID = "clk04g1e4000ryb0rgsvy1dty" slice = client.get_catalog_slice(SLICE_ID) task = slice.export_v2(params={ "performance_details": False, diff --git a/tests/integration/test_data_row_media_attributes.py b/tests/integration/test_data_row_media_attributes.py deleted file mode 100644 index 4e75513d4..000000000 --- a/tests/integration/test_data_row_media_attributes.py +++ /dev/null @@ -1,9 +0,0 @@ -def test_export_empty_media_attributes(client, configured_project_with_label, - wait_for_data_row_processing): - project, _, data_row, _ = configured_project_with_label - data_row = wait_for_data_row_processing(client, data_row) - labels = list(project.label_generator()) - assert len( - labels - ) == 1, "Label export job unexpectedly returned an empty result set`" - assert labels[0].data.media_attributes == {} diff --git a/tests/integration/test_issues.py b/tests/integration/test_issues.py deleted file mode 100644 index c23983cd0..000000000 --- a/tests/integration/test_issues.py +++ /dev/null @@ -1,20 +0,0 @@ -import pytest - - -def test_issues_export(project): - exported_issues_url = project.export_issues() - assert exported_issues_url - - exported_issues_url = project.export_issues("Open") - assert exported_issues_url - assert "?status=Open" in exported_issues_url - - exported_issues_url = project.export_issues("Resolved") - assert exported_issues_url - assert "?status=Resolved" in exported_issues_url - - invalidStatusValue = "Closed" - with pytest.raises(ValueError) as exc_info: - exported_issues_url = project.export_issues(invalidStatusValue) - assert "status must be in" in str(exc_info.value) - assert "Found %s" % (invalidStatusValue) in str(exc_info.value) diff --git a/tests/integration/test_label.py b/tests/integration/test_label.py index c6057cb4c..266b5f9a8 100644 --- a/tests/integration/test_label.py +++ b/tests/integration/test_label.py @@ -28,21 +28,6 @@ def test_labels(configured_project_with_label): assert list(data_row.labels()) == [] -def test_label_export(configured_project_with_label): - project, _, _, label = configured_project_with_label - label_id = label.uid - # Wait for exporter to retrieve latest labels - time.sleep(10) - - exported_labels_url = project.export_labels() - assert exported_labels_url is not None - exported_labels = requests.get(exported_labels_url) - labels = [example['ID'] for example in exported_labels.json()] - assert labels[0] == label_id - #TODO: Add test for bulk export back. - # The new exporter doesn't work with the create_label mutation - - # TODO: Skipping this test in staging due to label not updating @pytest.mark.skipif(condition=os.environ['LABELBOX_TEST_ENVIRON'] == "onprem" or os.environ['LABELBOX_TEST_ENVIRON'] == "staging" or diff --git a/tests/integration/test_project.py b/tests/integration/test_project.py index 6b701c5eb..6c0f1be94 100644 --- a/tests/integration/test_project.py +++ b/tests/integration/test_project.py @@ -370,11 +370,6 @@ def test_same_ontology_after_instructions( assert instructions is not None -def test_queued_data_row_export(configured_project): - result = configured_project.export_queued_data_rows() - assert len(result) == 1 - - def test_queue_mode(configured_project: Project): assert configured_project.queue_mode == QueueMode.Batch @@ -387,6 +382,7 @@ def test_batches(project: Project, dataset: Dataset, image_url): }, ] * 2) task.wait_till_done() + # TODO: Move to export_v2 data_rows = [dr.uid for dr in list(dataset.export_data_rows())] batch_one = f'batch one {uuid.uuid4()}' batch_two = f'batch two {uuid.uuid4()}' @@ -404,6 +400,7 @@ def test_create_batch_with_global_keys_sync(project: Project, data_rows): batch = project.create_batch(batch_name, global_keys=global_keys) # allow time for catapult to sync changes to ES time.sleep(5) + # TODO: Move to export_v2 batch_data_rows = set(batch.export_data_rows()) assert batch_data_rows == set(data_rows) @@ -413,6 +410,7 @@ def test_create_batch_with_global_keys_async(project: Project, data_rows): global_keys = [dr.global_key for dr in data_rows] batch_name = f'batch {uuid.uuid4()}' batch = project._create_batch_async(batch_name, global_keys=global_keys) + # TODO: Move to export_v2 batch_data_rows = set(batch.export_data_rows()) assert batch_data_rows == set(data_rows) diff --git a/tests/integration/test_task.py b/tests/integration/test_task.py index a6c9d0b26..d591b16a9 100644 --- a/tests/integration/test_task.py +++ b/tests/integration/test_task.py @@ -66,6 +66,7 @@ def test_task_success_json(dataset, image_url, snapshot): def test_task_success_label_export(client, configured_project_with_label): project, _, _, _ = configured_project_with_label + # TODO: Move to export_v2 project.export_labels() user = client.get_user() task = None From d985579fb4ca9b36be4b19dac344f62bd09550fe Mon Sep 17 00:00:00 2001 From: Yamini Kancharana Date: Mon, 7 Aug 2023 15:03:27 -0700 Subject: [PATCH 2/8] deleted exceptions file --- .../integration/export_v2/test_exceptions.py | 34 ------------------- 1 file changed, 34 deletions(-) delete mode 100644 tests/integration/export_v2/test_exceptions.py diff --git a/tests/integration/export_v2/test_exceptions.py b/tests/integration/export_v2/test_exceptions.py deleted file mode 100644 index c2ce2bb5f..000000000 --- a/tests/integration/export_v2/test_exceptions.py +++ /dev/null @@ -1,34 +0,0 @@ -import pytest - -from labelbox.schema.queue_mode import QueueMode -import labelbox.exceptions - - -@pytest.fixture -def project(client, rand_gen): - # project with no media type - project = client.create_project(name=rand_gen(str), - queue_mode=QueueMode.Batch, - media_type=None) - yield project - project.delete() - - -@pytest.skip -def test_export_v2_without_media_type(client, export_v2_test_helpers, - wait_for_data_row_processing): - data_row = wait_for_data_row_processing(client, data_row) - project.media_type = None - - task_name = "test_label_export_v2_without_media_type" - params = { - "include_performance_details": True, - "include_labels": True, - "media_type_override": None, - "project_details": True, - "data_row_details": True - } - with pytest.raises(labelbox.exceptions.LabelboxError): - export_v2_test_helpers.run_project_export_v2_task(project, - task_name=task_name, - params=params) From 851479464804e9f899c5339f65952692b9293f2a Mon Sep 17 00:00:00 2001 From: Yamini Kancharana Date: Wed, 9 Aug 2023 15:36:27 -0700 Subject: [PATCH 3/8] fix fixture and merge from develop --- tests/integration/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 3e0651706..4690396de 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -262,7 +262,7 @@ def small_dataset(dataset: Dataset): @pytest.fixture -def data_row(dataset, image_url): +def data_row(dataset, image_url, rand_gen): task = dataset.create_data_rows([ { "row_data": image_url, From f222bd9a83f6b4ab8b68745e0a555805c3ec0a6d Mon Sep 17 00:00:00 2001 From: Yamini Kancharana Date: Thu, 10 Aug 2023 09:46:32 -0700 Subject: [PATCH 4/8] rename files --- .../export_v2/{test_slice.py => test__export_slice.py} | 0 .../export_v2/{test_data_rows.py => test_export_data_rows.py} | 0 .../export_v2/{test_dataset.py => test_export_dataset.py} | 0 .../export_v2/{test_model_run.py => test_export_model_run.py} | 0 .../export_v2/{test_project.py => test_export_project.py} | 0 .../export_v2/{test_export_v2_video.py => test_export_video.py} | 0 6 files changed, 0 insertions(+), 0 deletions(-) rename tests/integration/export_v2/{test_slice.py => test__export_slice.py} (100%) rename tests/integration/export_v2/{test_data_rows.py => test_export_data_rows.py} (100%) rename tests/integration/export_v2/{test_dataset.py => test_export_dataset.py} (100%) rename tests/integration/export_v2/{test_model_run.py => test_export_model_run.py} (100%) rename tests/integration/export_v2/{test_project.py => test_export_project.py} (100%) rename tests/integration/export_v2/{test_export_v2_video.py => test_export_video.py} (100%) diff --git a/tests/integration/export_v2/test_slice.py b/tests/integration/export_v2/test__export_slice.py similarity index 100% rename from tests/integration/export_v2/test_slice.py rename to tests/integration/export_v2/test__export_slice.py diff --git a/tests/integration/export_v2/test_data_rows.py b/tests/integration/export_v2/test_export_data_rows.py similarity index 100% rename from tests/integration/export_v2/test_data_rows.py rename to tests/integration/export_v2/test_export_data_rows.py diff --git a/tests/integration/export_v2/test_dataset.py b/tests/integration/export_v2/test_export_dataset.py similarity index 100% rename from tests/integration/export_v2/test_dataset.py rename to tests/integration/export_v2/test_export_dataset.py diff --git a/tests/integration/export_v2/test_model_run.py b/tests/integration/export_v2/test_export_model_run.py similarity index 100% rename from tests/integration/export_v2/test_model_run.py rename to tests/integration/export_v2/test_export_model_run.py diff --git a/tests/integration/export_v2/test_project.py b/tests/integration/export_v2/test_export_project.py similarity index 100% rename from tests/integration/export_v2/test_project.py rename to tests/integration/export_v2/test_export_project.py diff --git a/tests/integration/export_v2/test_export_v2_video.py b/tests/integration/export_v2/test_export_video.py similarity index 100% rename from tests/integration/export_v2/test_export_v2_video.py rename to tests/integration/export_v2/test_export_video.py From 2ea2a900ba7d65da7e1dad40168139a6a7037521 Mon Sep 17 00:00:00 2001 From: Yamini Kancharana Date: Thu, 10 Aug 2023 13:36:39 -0700 Subject: [PATCH 5/8] fixtures --- tests/integration/export_v2/conftest.py | 245 ++++++++++++++++++++++++ 1 file changed, 245 insertions(+) diff --git a/tests/integration/export_v2/conftest.py b/tests/integration/export_v2/conftest.py index b0edf4512..9c81cf232 100644 --- a/tests/integration/export_v2/conftest.py +++ b/tests/integration/export_v2/conftest.py @@ -1,7 +1,10 @@ +import uuid +import time import pytest from labelbox.schema.queue_mode import QueueMode from labelbox.schema.media_type import MediaType from labelbox.schema.labeling_frontend import LabelingFrontend +from labelbox.schema.annotation_import import LabelImport, AnnotationImportState @pytest.fixture @@ -245,6 +248,53 @@ def ontology(): return {"tools": tools, "classifications": classifications} +@pytest.fixture +def polygon_inference(prediction_id_mapping): + polygon = prediction_id_mapping['polygon'].copy() + polygon.update({ + "polygon": [{ + "x": 147.692, + "y": 118.154 + }, { + "x": 142.769, + "y": 104.923 + }, { + "x": 57.846, + "y": 118.769 + }, { + "x": 28.308, + "y": 169.846 + }] + }) + del polygon['tool'] + return polygon + + +@pytest.fixture +def configured_project(client, initial_dataset, ontology, rand_gen, image_url): + dataset = initial_dataset + project = client.create_project( + name=rand_gen(str), + queue_mode=QueueMode.Batch, + ) + editor = list( + client.get_labeling_frontends( + where=LabelingFrontend.name == "editor"))[0] + project.setup(editor, ontology) + data_row_ids = [] + + for _ in range(len(ontology['tools']) + len(ontology['classifications'])): + data_row_ids.append(dataset.create_data_row(row_data=image_url).uid) + project.create_batch( + rand_gen(str), + data_row_ids, # sample of data row objects + 5 # priority between 1(Highest) - 5(lowest) + ) + project.data_row_ids = data_row_ids + yield project + project.delete() + + @pytest.fixture def configured_project_without_data_rows(client, ontology, rand_gen): project = client.create_project(name=rand_gen(str), @@ -256,3 +306,198 @@ def configured_project_without_data_rows(client, ontology, rand_gen): project.setup(editor, ontology) yield project project.delete() + + +@pytest.fixture +def model_run_with_data_rows(client, configured_project, model_run_predictions, + model_run, wait_for_label_processing): + configured_project.enable_model_assisted_labeling() + + upload_task = LabelImport.create_from_objects( + client, configured_project.uid, f"label-import-{uuid.uuid4()}", + model_run_predictions) + upload_task.wait_until_done() + assert upload_task.state == AnnotationImportState.FINISHED, "Label Import did not finish" + assert len( + upload_task.errors + ) == 0, f"Label Import {upload_task.name} failed with errors {upload_task.errors}" + labels = wait_for_label_processing(configured_project) + label_ids = [label.uid for label in labels] + model_run.upsert_labels(label_ids) + yield model_run + model_run.delete() + # TODO: Delete resources when that is possible .. + + +@pytest.fixture +def model_run_predictions(polygon_inference, rectangle_inference, + line_inference): + # Not supporting mask since there isn't a signed url representing a seg mask to upload + return [polygon_inference, rectangle_inference, line_inference] + + +@pytest.fixture +def model(client, rand_gen, configured_project): + ontology = configured_project.ontology() + data = {"name": rand_gen(str), "ontology_id": ontology.uid} + model = client.create_model(data["name"], data["ontology_id"]) + yield model + try: + model.delete() + except: + # Already was deleted by the test + pass + + +@pytest.fixture +def model_run(rand_gen, model): + name = rand_gen(str) + model_run = model.create_model_run(name) + yield model_run + try: + model_run.delete() + except: + # Already was deleted by the test + pass + + +@pytest.fixture +def wait_for_label_processing(): + """ + Do not use. Only for testing. + + Returns project's labels as a list after waiting for them to finish processing. + If `project.labels()` is called before label is fully processed, + it may return an empty set + """ + + def func(project): + timeout_seconds = 10 + while True: + labels = list(project.labels()) + if len(labels) > 0: + return labels + timeout_seconds -= 2 + if timeout_seconds <= 0: + raise TimeoutError( + f"Timed out waiting for label for project '{project.uid}' to finish processing" + ) + time.sleep(2) + + return func + + +@pytest.fixture +def prediction_id_mapping(configured_project): + # Maps tool types to feature schema ids + project = configured_project + ontology = project.ontology().normalized + result = {} + + for idx, tool in enumerate(ontology['tools'] + ontology['classifications']): + if 'tool' in tool: + tool_type = tool['tool'] + else: + tool_type = tool[ + 'type'] if 'scope' not in tool else f"{tool['type']}_{tool['scope']}" # so 'checklist' of 'checklist_index' + + # TODO: remove this once we have a better way to associate multiple tools instances with a single tool type + if tool_type == 'rectangle': + value = { + "uuid": str(uuid.uuid4()), + "schemaId": tool['featureSchemaId'], + "name": tool['name'], + "dataRow": { + "id": project.data_row_ids[idx], + }, + 'tool': tool + } + if tool_type not in result: + result[tool_type] = [] + result[tool_type].append(value) + else: + result[tool_type] = { + "uuid": str(uuid.uuid4()), + "schemaId": tool['featureSchemaId'], + "name": tool['name'], + "dataRow": { + "id": project.data_row_ids[idx], + }, + 'tool': tool + } + return result + + +@pytest.fixture +def line_inference(prediction_id_mapping): + line = prediction_id_mapping['line'].copy() + line.update( + {"line": [{ + "x": 147.692, + "y": 118.154 + }, { + "x": 150.692, + "y": 160.154 + }]}) + del line['tool'] + return line + + +@pytest.fixture +def polygon_inference(prediction_id_mapping): + polygon = prediction_id_mapping['polygon'].copy() + polygon.update({ + "polygon": [{ + "x": 147.692, + "y": 118.154 + }, { + "x": 142.769, + "y": 104.923 + }, { + "x": 57.846, + "y": 118.769 + }, { + "x": 28.308, + "y": 169.846 + }] + }) + del polygon['tool'] + return polygon + + +def find_tool_by_name(tool_instances, name): + for tool in tool_instances: + if tool['name'] == name: + return tool + return None + + +@pytest.fixture +def rectangle_inference(prediction_id_mapping): + tool_instance = find_tool_by_name(prediction_id_mapping['rectangle'], + 'bbox') + rectangle = tool_instance.copy() + rectangle.update({ + "bbox": { + "top": 48, + "left": 58, + "height": 65, + "width": 12 + }, + 'classifications': [{ + "schemaId": + rectangle['tool']['classifications'][0]['featureSchemaId'], + "name": + rectangle['tool']['classifications'][0]['name'], + "answer": { + "schemaId": + rectangle['tool']['classifications'][0]['options'][0] + ['featureSchemaId'], + "name": + rectangle['tool']['classifications'][0]['options'][0] + ['value'] + } + }] + }) + del rectangle['tool'] + return rectangle From adcd67cde06d0ca4fffbba11c3484b28d9c863ad Mon Sep 17 00:00:00 2001 From: Yamini Kancharana Date: Thu, 10 Aug 2023 14:12:23 -0700 Subject: [PATCH 6/8] fix tests --- tests/integration/export_v2/conftest.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/tests/integration/export_v2/conftest.py b/tests/integration/export_v2/conftest.py index 9c81cf232..af8b4c66f 100644 --- a/tests/integration/export_v2/conftest.py +++ b/tests/integration/export_v2/conftest.py @@ -271,7 +271,8 @@ def polygon_inference(prediction_id_mapping): @pytest.fixture -def configured_project(client, initial_dataset, ontology, rand_gen, image_url): +def configured_project_with_ontology(client, initial_dataset, ontology, + rand_gen, image_url): dataset = initial_dataset project = client.create_project( name=rand_gen(str), @@ -309,19 +310,20 @@ def configured_project_without_data_rows(client, ontology, rand_gen): @pytest.fixture -def model_run_with_data_rows(client, configured_project, model_run_predictions, - model_run, wait_for_label_processing): - configured_project.enable_model_assisted_labeling() +def model_run_with_data_rows(client, configured_project_with_ontology, + model_run_predictions, model_run, + wait_for_label_processing): + configured_project_with_ontology.enable_model_assisted_labeling() upload_task = LabelImport.create_from_objects( - client, configured_project.uid, f"label-import-{uuid.uuid4()}", - model_run_predictions) + client, configured_project_with_ontology.uid, + f"label-import-{uuid.uuid4()}", model_run_predictions) upload_task.wait_until_done() assert upload_task.state == AnnotationImportState.FINISHED, "Label Import did not finish" assert len( upload_task.errors ) == 0, f"Label Import {upload_task.name} failed with errors {upload_task.errors}" - labels = wait_for_label_processing(configured_project) + labels = wait_for_label_processing(configured_project_with_ontology) label_ids = [label.uid for label in labels] model_run.upsert_labels(label_ids) yield model_run @@ -388,9 +390,9 @@ def func(project): @pytest.fixture -def prediction_id_mapping(configured_project): +def prediction_id_mapping(configured_project_with_ontology): # Maps tool types to feature schema ids - project = configured_project + project = configured_project_with_ontology ontology = project.ontology().normalized result = {} From 15392283f29fb4fe01b8d1ae945a37c060605841 Mon Sep 17 00:00:00 2001 From: Yamini Kancharana Date: Thu, 10 Aug 2023 16:06:28 -0700 Subject: [PATCH 7/8] address PR comments --- tests/integration/conftest.py | 4 ++-- tests/integration/export_v2/test_export_dataset.py | 4 ++-- tests/integration/export_v2/test_export_project.py | 2 +- tests/integration/test_batch.py | 13 ------------- 4 files changed, 5 insertions(+), 18 deletions(-) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 4690396de..75474ab4d 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -22,7 +22,7 @@ from labelbox.schema.user import User IMG_URL = "https://picsum.photos/200/300.jpg" -SMALL_DATASET_URL = "https://storage.googleapis.com/diagnostics-demo-data/coco/COCO_train2014_000000000034.jpg" +SMALL_DATASET_URL = "https://storage.googleapis.com/lb-artifacts-testing-public/sdk_integration_test/potato.jpeg" DATA_ROW_PROCESSING_WAIT_TIMEOUT_SECONDS = 30 DATA_ROW_PROCESSING_WAIT_SLEEP_INTERNAL_SECONDS = 5 @@ -255,7 +255,7 @@ def small_dataset(dataset: Dataset): "row_data": SMALL_DATASET_URL, "external_id": "my-image" }, - ] * 3) + ] * 2) task.wait_till_done() yield dataset diff --git a/tests/integration/export_v2/test_export_dataset.py b/tests/integration/export_v2/test_export_dataset.py index d965c3ff8..4a341f74c 100644 --- a/tests/integration/export_v2/test_export_dataset.py +++ b/tests/integration/export_v2/test_export_dataset.py @@ -1,7 +1,7 @@ import pytest -@pytest.mark.parametrize('data_rows', [5], indirect=True) +@pytest.mark.parametrize('data_rows', [2], indirect=True) def test_dataset_export_v2(export_v2_test_helpers, dataset, data_rows): data_row_ids = [dr.uid for dr in data_rows] params = {"performance_details": False, "label_details": False} @@ -12,7 +12,7 @@ def test_dataset_export_v2(export_v2_test_helpers, dataset, data_rows): ]) == set(data_row_ids) -@pytest.mark.parametrize('data_rows', [5], indirect=True) +@pytest.mark.parametrize('data_rows', [3], indirect=True) def test_dataset_export_v2_datarow_list(export_v2_test_helpers, dataset, data_rows): datarow_filter_size = 2 diff --git a/tests/integration/export_v2/test_export_project.py b/tests/integration/export_v2/test_export_project.py index f9d4bbb36..42dd40d01 100644 --- a/tests/integration/export_v2/test_export_project.py +++ b/tests/integration/export_v2/test_export_project.py @@ -4,7 +4,7 @@ from labelbox.schema.media_type import MediaType -IMAGE_URL = "https://storage.googleapis.com/diagnostics-demo-data/coco/COCO_train2014_000000000034.jpg" +IMAGE_URL = "https://storage.googleapis.com/lb-artifacts-testing-public/sdk_integration_test/potato.jpeg" def test_project_export_v2(client, export_v2_test_helpers, diff --git a/tests/integration/test_batch.py b/tests/integration/test_batch.py index 175917977..485bf308c 100644 --- a/tests/integration/test_batch.py +++ b/tests/integration/test_batch.py @@ -26,19 +26,6 @@ def big_dataset(dataset: Dataset): yield dataset -@pytest.fixture -def small_dataset(dataset: Dataset): - task = dataset.create_data_rows([ - { - "row_data": IMAGE_URL, - "external_id": EXTERNAL_ID - }, - ] * 2) - task.wait_till_done() - - yield dataset - - @pytest.fixture(scope='function') def dataset_with_invalid_data_rows(unique_dataset: Dataset): upload_invalid_data_rows_for_dataset(unique_dataset) From 983c15fd08cd084928c02f71b51fdfd653887223 Mon Sep 17 00:00:00 2001 From: Yamini Kancharana Date: Thu, 10 Aug 2023 16:20:44 -0700 Subject: [PATCH 8/8] fix url --- tests/integration/export_v2/test_legacy_export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/export_v2/test_legacy_export.py b/tests/integration/export_v2/test_legacy_export.py index 51c54063a..763672e0f 100644 --- a/tests/integration/export_v2/test_legacy_export.py +++ b/tests/integration/export_v2/test_legacy_export.py @@ -8,7 +8,7 @@ from labelbox.schema.annotation_import import LabelImport from labelbox import Dataset, Project -IMAGE_URL = "https://storage.googleapis.com/diagnostics-demo-data/coco/COCO_train2014_000000000034.jpg" +IMAGE_URL = "https://storage.googleapis.com/lb-artifacts-testing-public/sdk_integration_test/potato.jpeg" def test_export_annotations_nested_checklist(