Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion labelbox/schema/export_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ class DataRowParams(TypedDict):

class ProjectExportParams(DataRowParams):
project_details: Optional[bool]
labels: Optional[bool]
label_details: Optional[bool]
performance_details: Optional[bool]


Expand Down
4 changes: 2 additions & 2 deletions labelbox/schema/model_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -487,10 +487,10 @@ def export_v2(self,
# Arguments locked based on exectuion context
"includeProjectDetails":
False,
"includeLabels":
False,
"includePerformanceDetails":
False,
"includeLabelDetails":
False
},
}
}
Expand Down
8 changes: 4 additions & 4 deletions labelbox/schema/project.py
Original file line number Diff line number Diff line change
Expand Up @@ -406,8 +406,8 @@ def export_v2(self,
"metadata_fields": False,
"data_row_details": False,
"project_details": False,
"labels": False,
"performance_details": False
"performance_details": False,
"label_details": False
})

mutation_name = "exportDataRowsInProject"
Expand All @@ -431,10 +431,10 @@ def export_v2(self,
_params.get('data_row_details', False),
"includeProjectDetails":
_params.get('project_details', False),
"includeLabels":
_params.get('labels', False),
"includePerformanceDetails":
_params.get('performance_details', False),
"includeLabelDetails":
_params.get('label_details', False)
},
}
}
Expand Down
91 changes: 45 additions & 46 deletions tests/integration/annotation_import/test_model_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,51 @@ def test_model_run_export_labels(model_run_with_model_run_data_rows):
assert len(labels) == 3


@pytest.mark.skip(reason="feature under development")
@pytest.mark.skipif(condition=os.environ['LABELBOX_TEST_ENVIRON'] == "onprem",
reason="does not work for onprem")
def test_model_run_status(model_run_with_model_run_data_rows):

def get_model_run_status():
return model_run_with_model_run_data_rows.client.execute(
"""query trainingPipelinePyApi($modelRunId: ID!) {
trainingPipeline(where: {id : $modelRunId}) {status, errorMessage, metadata}}
""", {'modelRunId': model_run_with_model_run_data_rows.uid},
experimental=True)['trainingPipeline']

model_run_status = get_model_run_status()
assert model_run_status['status'] is None
assert model_run_status['metadata'] is None
assert model_run_status['errorMessage'] is None

status = "COMPLETE"
metadata = {'key1': 'value1'}
errorMessage = "an error"
model_run_with_model_run_data_rows.update_status(status, metadata,
errorMessage)

model_run_status = get_model_run_status()
assert model_run_status['status'] == status
assert model_run_status['metadata'] == metadata
assert model_run_status['errorMessage'] == errorMessage

extra_metadata = {'key2': 'value2'}
model_run_with_model_run_data_rows.update_status(status, extra_metadata)
model_run_status = get_model_run_status()
assert model_run_status['status'] == status
assert model_run_status['metadata'] == {**metadata, **extra_metadata}
assert model_run_status['errorMessage'] == errorMessage

status = ModelRun.Status.FAILED
model_run_with_model_run_data_rows.update_status(status, metadata,
errorMessage)
model_run_status = get_model_run_status()
assert model_run_status['status'] == status.value

with pytest.raises(ValueError):
model_run_with_model_run_data_rows.update_status(
"INVALID", metadata, errorMessage)


def test_model_run_export_v2(model_run_with_model_run_data_rows,
configured_project):
task_name = "test_task"
Expand Down Expand Up @@ -164,51 +208,6 @@ def download_result(result_url):
assert prediction_id in label_ids_set


@pytest.mark.skipif(condition=os.environ['LABELBOX_TEST_ENVIRON'] == "onprem",
reason="does not work for onprem")
def test_model_run_status(model_run_with_model_run_data_rows):

def get_model_run_status():
return model_run_with_model_run_data_rows.client.execute(
"""query trainingPipelinePyApi($modelRunId: ID!) {
trainingPipeline(where: {id : $modelRunId}) {status, errorMessage, metadata}}
""", {'modelRunId': model_run_with_model_run_data_rows.uid},
experimental=True)['trainingPipeline']

model_run_status = get_model_run_status()
assert model_run_status['status'] is None
assert model_run_status['metadata'] is None
assert model_run_status['errorMessage'] is None

status = "COMPLETE"
metadata = {'key1': 'value1'}
errorMessage = "an error"
model_run_with_model_run_data_rows.update_status(status, metadata,
errorMessage)

model_run_status = get_model_run_status()
assert model_run_status['status'] == status
assert model_run_status['metadata'] == metadata
assert model_run_status['errorMessage'] == errorMessage

extra_metadata = {'key2': 'value2'}
model_run_with_model_run_data_rows.update_status(status, extra_metadata)
model_run_status = get_model_run_status()
assert model_run_status['status'] == status
assert model_run_status['metadata'] == {**metadata, **extra_metadata}
assert model_run_status['errorMessage'] == errorMessage

status = ModelRun.Status.FAILED
model_run_with_model_run_data_rows.update_status(status, metadata,
errorMessage)
model_run_status = get_model_run_status()
assert model_run_status['status'] == status.value

with pytest.raises(ValueError):
model_run_with_model_run_data_rows.update_status(
"INVALID", metadata, errorMessage)


def test_model_run_split_assignment(model_run, dataset, image_url):
n_data_rows = 10
data_rows = dataset.create_data_rows([{
Expand Down
1 change: 0 additions & 1 deletion tests/integration/test_project.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ def test_project(client, rand_gen):
assert project not in projects


@pytest.mark.skip(reason="feature under development")
def test_project_export_v2(configured_project_with_label):
project, _, _, label = configured_project_with_label
label_id = label.uid
Expand Down