Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ def test_create_from_objects_all_project_labels(
def test_model_run_project_labels(model_run_with_all_project_labels,
model_run_predictions):
model_run = model_run_with_all_project_labels
# TODO: Move to export_v2
model_run_exported_labels = model_run.export_labels(download=True)
labels_indexed_by_schema_id = {}
for label in model_run_exported_labels:
Expand Down
49 changes: 0 additions & 49 deletions tests/integration/annotation_import/test_model_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,23 +7,6 @@
from labelbox import DataSplit, ModelRun


def _model_run_export_v2_results(model_run, task_name, params, num_retries=5):
"""Export model run results and retry if no results are returned."""
while (num_retries > 0):
task = model_run.export_v2(task_name, params=params)
assert task.name == task_name
task.wait_till_done()
assert task.status == "COMPLETE"
assert task.errors is None
task_results = task.result
if len(task_results) == 0:
num_retries -= 1
time.sleep(5)
else:
return task_results
return []


def test_model_run(client, configured_project_with_label, data_row, rand_gen):
project, _, _, label = configured_project_with_label
label_id = label.uid
Expand Down Expand Up @@ -182,38 +165,6 @@ def get_model_run_status():
errorMessage)


def test_model_run_export_v2(model_run_with_data_rows, configured_project):
task_name = "test_task"
media_attributes = True
params = {"media_attributes": media_attributes, "predictions": True}
task_results = _model_run_export_v2_results(model_run_with_data_rows,
task_name, params)
label_ids = [label.uid for label in configured_project.labels()]
label_ids_set = set(label_ids)

assert len(task_results) == len(label_ids)

for task_result in task_results:
# Check export param handling
if media_attributes:
assert 'media_attributes' in task_result and task_result[
'media_attributes'] is not None
else:
assert 'media_attributes' not in task_result or task_result[
'media_attributes'] is None
model_run = task_result['experiments'][
model_run_with_data_rows.model_id]['runs'][
model_run_with_data_rows.uid]
task_label_ids_set = set(
map(lambda label: label['id'], model_run['labels']))
task_prediction_ids_set = set(
map(lambda prediction: prediction['id'], model_run['predictions']))
for label_id in task_label_ids_set:
assert label_id in label_ids_set
for prediction_id in task_prediction_ids_set:
assert prediction_id in label_ids_set


def test_model_run_split_assignment_by_data_row_ids(model_run, dataset,
image_url):
n_data_rows = 10
Expand Down
16 changes: 15 additions & 1 deletion tests/integration/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
import requests

from labelbox import Client, MediaType
from labelbox import LabelingFrontend
from labelbox import LabelingFrontend, Dataset
from labelbox import OntologyBuilder, Tool, Option, Classification, MediaType
from labelbox.orm import query
from labelbox.pagination import PaginatedCollection
Expand All @@ -22,6 +22,7 @@
from labelbox.schema.user import User

IMG_URL = "https://picsum.photos/200/300.jpg"
SMALL_DATASET_URL = "https://storage.googleapis.com/lb-artifacts-testing-public/sdk_integration_test/potato.jpeg"
DATA_ROW_PROCESSING_WAIT_TIMEOUT_SECONDS = 30
DATA_ROW_PROCESSING_WAIT_SLEEP_INTERNAL_SECONDS = 5

Expand Down Expand Up @@ -247,6 +248,19 @@ def unique_dataset(client, rand_gen):
dataset.delete()


@pytest.fixture
def small_dataset(dataset: Dataset):
task = dataset.create_data_rows([
{
"row_data": SMALL_DATASET_URL,
"external_id": "my-image"
},
] * 2)
task.wait_till_done()

yield dataset


@pytest.fixture
def data_row(dataset, image_url, rand_gen):
task = dataset.create_data_rows([
Expand Down
Loading