diff --git a/CHANGELOG.md b/CHANGELOG.md index f2427ffb2..94c06d256 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +# Version 3.61.2 (2024-01-29) +## Added +* `ModelSlice.get_data_row_identifiers` for Foundry data rows + +## Fixed +* `ModelSlice.get_data_row_identifiers` scoping by model run id + # Version 3.61.1 (2024-01-25) ## Fixed * Removed export API limit (5000) diff --git a/Dockerfile b/Dockerfile index 1b4b38f80..d9c33f9a5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ FROM python:3.9-slim-bullseye -RUN pip install pytest pytest-cases pytest-rerunfailures pytest-snapshot +RUN pip install pytest=="7.4.4" pytest-cases pytest-rerunfailures pytest-snapshot RUN apt-get -y update RUN apt install -y libsm6 \ libxext6 \ diff --git a/docs/requirements.txt b/docs/requirements.txt index c46338f90..5926fb23d 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,2 +1,2 @@ -Sphinx==4.5.0 +Sphinx~=5.3.0 sphinx-rtd-theme==0.5.1 diff --git a/docs/source/conf.py b/docs/source/conf.py index 192bedc2e..0cd5dc8d8 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -21,7 +21,7 @@ copyright = '2021, Labelbox' author = 'Labelbox' -release = '3.61.1' +release = '3.61.2' # -- General configuration --------------------------------------------------- diff --git a/examples/basics/basics.ipynb b/examples/basics/basics.ipynb index 5ddd46b1c..41e61898a 100644 --- a/examples/basics/basics.ipynb +++ b/examples/basics/basics.ipynb @@ -100,19 +100,7 @@ "print(\"Dataset Name:\" , dataset_name)" ], "cell_type": "code", - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Project ID: cl9smiqo23hk307y27k42cajv\n", - "Project Name: html-editor\n", - "----------------------------------------\n", - "Dataset ID: cl9sywtkj2gsv07vk2isaeadj\n", - "Dataset Name: text_test.json\n" - ] - } - ], + "outputs": [], "execution_count": null }, { @@ -133,16 +121,7 @@ "print(\"Dataset: \", dataset)" ], "cell_type": "code", - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Project: , 'name': 'html-editor', 'queue_mode': , 'setup_complete': None, 'uid': 'cl9smiqo23hk307y27k42cajv', 'updated_at': datetime.datetime(2022, 10, 28, 15, 47, 41, tzinfo=datetime.timezone.utc)}>\n", - "Dataset: \n" - ] - } - ], + "outputs": [], "execution_count": null }, { @@ -161,16 +140,7 @@ "print(dataset.name)" ], "cell_type": "code", - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "html-editor\n", - "text_test.json\n" - ] - } - ], + "outputs": [], "execution_count": null }, { @@ -187,15 +157,7 @@ "print(project.description)" ], "cell_type": "code", - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "new description field\n" - ] - } - ], + "outputs": [], "execution_count": null }, { @@ -218,16 +180,7 @@ "print(\"Number of labels :\", len(list(labels_paginated_collection)))" ], "cell_type": "code", - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Type of collection: \n", - "Number of labels : 0\n" - ] - } - ], + "outputs": [], "execution_count": null }, { @@ -242,15 +195,7 @@ " print(\"Project has no labels !\")" ], "cell_type": "code", - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Project has no labels !\n" - ] - } - ], + "outputs": [], "execution_count": null }, { @@ -280,18 +225,7 @@ "# We can see there is only one." ], "cell_type": "code", - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - ", 'name': 'html-editor', 'queue_mode': , 'setup_complete': None, 'uid': 'cl9smiqo23hk307y27k42cajv', 'updated_at': datetime.datetime(2022, 11, 1, 19, 18, 21, tzinfo=datetime.timezone.utc)}>\n", - "None\n", - "None\n" - ] - } - ], + "outputs": [], "execution_count": null }, { @@ -324,16 +258,7 @@ " print(f\" Name of batches in project: {b.name}\")" ], "cell_type": "code", - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " Name of project : html-editor\n", - " Name of batches in project: testsss\n" - ] - } - ], + "outputs": [], "execution_count": null } ] diff --git a/examples/basics/ontologies.ipynb b/examples/basics/ontologies.ipynb index a9dc0b4f9..7af496fc6 100644 --- a/examples/basics/ontologies.ipynb +++ b/examples/basics/ontologies.ipynb @@ -1,16 +1,18 @@ { + "nbformat": 4, + "nbformat_minor": 5, + "metadata": {}, "cells": [ { - "cell_type": "markdown", "metadata": {}, "source": [ "\n", " \n", "" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "\n", @@ -22,10 +24,10 @@ "\n", "" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "markdown", "metadata": {}, "source": [ "# Ontologies\n", @@ -35,49 +37,48 @@ "* Helpful Links:\n", " * [Ontology documentation](https://docs.labelbox.com/docs/labelbox-ontology)\n", " * [Project Setup Using Ontologies](https://github.com/Labelbox/labelbox-python/blob/master/examples/project_configuration/project_setup.ipynb)" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "!pip install labelbox -q" - ] + ], + "cell_type": "code", + "outputs": [], + "execution_count": null }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "import labelbox as lb\n", "import json" - ] + ], + "cell_type": "code", + "outputs": [], + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "# API Key and Client\n", "Provide a valid api key below in order to properly connect to the Labelbox Client." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "# Add your api key\n", "API_KEY = \"\"\n", "client = lb.Client(api_key=API_KEY)" - ] + ], + "cell_type": "code", + "outputs": [], + "execution_count": null }, { - "cell_type": "markdown", - "id": "1d40f49d", "metadata": {}, "source": [ "### Create Ontology From Normalized Data\n", @@ -94,13 +95,11 @@ "| Segmentation mask | raster-segmentation |\n", "| Entity | named-entity |\n", "| Relationship | edge |" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "# This will automatically create new feature schema\n", "ontology_name = \"sdk-ontology\"\n", @@ -117,22 +116,22 @@ "ontology = client.create_ontology(name=ontology_name,\n", " normalized=ontology_normalized_json)\n", "print(ontology)" - ] + ], + "cell_type": "code", + "outputs": [], + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Create Ontology From Existing Feature Schemas\n", "* It is often useful to support the same features in multiple ontologies. \n", "* Labelbox supports this workflow by allowing users to create ontologies using existing feature schemas." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "# First create the feature schema\n", "feature_schema_cat = client.create_feature_schema(feature_schema_cat_normalized)\n", @@ -140,23 +139,23 @@ "print(feature_schema_cat.uid)\n", "ontology = client.create_ontology_from_feature_schemas(ontology_name,\n", " [feature_schema_cat.uid])" - ] + ], + "cell_type": "code", + "outputs": [], + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Create Ontology From a Mix of New and Existing Feature Schemas\n", "* If we want to create a new ontology that expands upon a previous ontology it is helpful to be able to share a portion of the features.\n", "* To do this we will create the new schema ids that we want. Then we will create an ontology from the new list of ids.\n", "* Note that for additional customization you can also combine the normalized json and use the create_ontology() method (not covered here)." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "# Create new dog schema id\n", "feature_schema_dog_normalized = {\n", @@ -170,101 +169,103 @@ "# (ie. the cat feature schema will not be re-created)\n", "ontology = client.create_ontology_from_feature_schemas(\n", " ontology_name, [feature_schema_cat.uid, feature_schema_dog.uid])" - ] + ], + "cell_type": "code", + "outputs": [], + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Read\n", "* We can directly query by id for ontologies and feature schemas\n", "* We also can search for both by name" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "#### Fetch by ID\n", "feature_schema = client.get_feature_schema(feature_schema_cat.uid)\n", "ontology = client.get_ontology(ontology.uid)\n", "print(feature_schema)\n", "print(ontology)" - ] + ], + "cell_type": "code", + "outputs": [], + "execution_count": null }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "#### Search by name\n", "feature_schema = next(client.get_feature_schemas(\"cat\"))\n", "ontology = next(client.get_ontologies(ontology_name))\n", "print(feature_schema)\n", "print(ontology)" - ] + ], + "cell_type": "code", + "outputs": [], + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Update and Delete" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "# Check if feature is archived\n", "feature_schema = next(client.get_feature_schemas(\"cat\"))\n", "client.is_feature_schema_archived(ontology_id=ontology.uid, feature_schema_id=feature_schema.uid)" - ] + ], + "cell_type": "code", + "outputs": [], + "execution_count": null }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "# Update a feature's title \n", "client.update_feature_schema_title(feature_schema_id=feature_schema.uid, title=\"cat-2\")\n", "feature = client.get_feature_schema(feature_schema_id=feature_schema.uid)\n", "print(\"Feature: \", feature)" - ] + ], + "cell_type": "code", + "outputs": [], + "execution_count": null }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "# Replace a feature \n", "tool = lb.Tool(feature_schema_id=feature_schema.uid, name=\"tool-cat-upserted\", tool=lb.Tool.Type.BBOX, color=\"#FF0000\")\n", "upserted_feature_schema_id = client.upsert_feature_schema(tool.asdict()).uid\n", "feature = client.get_feature_schema(feature_schema_id=upserted_feature_schema_id)\n", "print(\"Updated feature: \", feature)" - ] + ], + "cell_type": "code", + "outputs": [], + "execution_count": null }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "# Insert a new feature \n", "tool = lb.Tool(name=\"tool-cat-2\", tool=lb.Tool.Type.RASTER_SEGMENTATION)\n", "feature_schema_id_new = client.create_feature_schema(tool.asdict()).uid\n", "client.insert_feature_schema_into_ontology(feature_schema_id=feature_schema_id_new, ontology_id=ontology.uid , position=2)\n", "print(\"Updated ontology: \", client.get_ontology(ontology_id=ontology.uid))" - ] + ], + "cell_type": "code", + "outputs": [], + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "Delete or Archived a feature:\n", @@ -272,42 +273,40 @@ "If the feature schema is a root level node with associated labels, it will be archived.\n", "If the feature schema is a nested node in the ontology and does not have associated labels, it will be deleted.\n", "If the feature schema is a nested node in the ontology and has associated labels, it will not be deleted." - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "client.delete_feature_schema_from_ontology(ontology_id=ontology.uid, feature_schema_id=feature_schema_id_new)" - ] + ], + "cell_type": "code", + "outputs": [], + "execution_count": null }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "# Only features with annotations will be archived, features without annotations will be deleted. \n", "feature_schema_id_with_annotations = \"\" \n", "ontology_id = \"\"\n", "client.unarchive_feature_schema_node(ontology_id=ontology_id, root_feature_schema_id=feature_schema_id_with_annotations)" - ] + ], + "cell_type": "code", + "outputs": [], + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "### Ontology Builder\n", "* The ontology builder is a tool for creating and modifying normalized json" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "# Create normalized json with a bounding box and segmentation tool\n", "ontology_builder = lb.OntologyBuilder(tools=[\n", @@ -318,20 +317,20 @@ "ontology = client.create_ontology(\"ontology-builder-ontology\",\n", " ontology_builder.asdict())\n", "print(json.dumps(ontology.normalized, indent=2))" - ] + ], + "cell_type": "code", + "outputs": [], + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "* Alternative syntax for defining the ontology via the OntologyBuilder" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "# Create\n", "ontology_builder = lb.OntologyBuilder()\n", @@ -343,20 +342,20 @@ "ontology = client.create_ontology(\"ontology-builder-ontology\",\n", " ontology_builder.asdict())\n", "print(json.dumps(ontology.normalized, indent=2))" - ] + ], + "cell_type": "code", + "outputs": [], + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "* Classifications are supported too (Both for top level and as subclassifications)" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "ontology_builder = lb.OntologyBuilder(\n", " tools=[\n", @@ -375,20 +374,20 @@ " lb.Option(value=\"blurry\")])\n", " ])\n", "print(json.dumps(ontology_builder.asdict(), indent=2))" - ] + ], + "cell_type": "code", + "outputs": [], + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "Example of how to add sub-classfication within an option" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "# We will use add_classification to add this classification to a previously built ontology_builder or you can create new ontology_builder = OntologyBuilder() \n", "radio_classification = lb.Classification(class_type=lb.Classification.Type.RADIO,\n", @@ -404,20 +403,20 @@ "ontology = client.create_ontology(\"example of nested classification\",\n", " ontology_builder.asdict())\n", "print(json.dumps(ontology.normalized, indent=2))" - ] + ], + "cell_type": "code", + "outputs": [], + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "* All Tool objects are constructed the same way:" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "bbox_tool = lb.Tool(tool=lb.Tool.Type.BBOX, name=\"dog_box\")\n", "poly_tool = lb.Tool(tool=lb.Tool.Type.POLYGON, name=\"dog_poly\")\n", @@ -426,21 +425,21 @@ "line_tool = lb.Tool(tool=lb.Tool.Type.LINE, name=\"dog_orientation\")\n", "ner_tool = lb.Tool(tool=lb.Tool.Type.NER, name=\"dog_reference\")\n", "relationship_tool = lb.Tool(tool=lb.Tool.Type.RELATIONSHIP, name=\"relationship\")" - ] + ], + "cell_type": "code", + "outputs": [], + "execution_count": null }, { - "cell_type": "markdown", "metadata": {}, "source": [ "* Classifications are all constructed the same way (except text which doesn't require options)\n", "* Classifications can be global or subclasses to a tool (ie dog bounding box, with a breed classification)" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": null, "metadata": {}, - "outputs": [], "source": [ "text_classification = lb.Classification(class_type=lb.Classification.Type.TEXT,\n", " name=\"dog_name\")\n", @@ -451,14 +450,10 @@ " class_type=lb.Classification.Type.CHECKLIST,\n", " name=\"background\",\n", " options=[lb.Option(\"at_park\"), lb.Option(\"has_leash\")])" - ] - } - ], - "metadata": { - "language_info": { - "name": "python" + ], + "cell_type": "code", + "outputs": [], + "execution_count": null } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + ] +} \ No newline at end of file diff --git a/examples/basics/projects.ipynb b/examples/basics/projects.ipynb index 239501de0..e05adb08e 100644 --- a/examples/basics/projects.ipynb +++ b/examples/basics/projects.ipynb @@ -270,7 +270,7 @@ "source": [ "project.move_data_rows_to_task_queue(data_row_ids=lb.GlobalKeys(global_keys), #Provide a list of global keys\n", " task_queue_id=task_queues[2].uid #Passing None moves data rows to \"Done\" task queue\n", - ")" + " )" ], "cell_type": "code", "outputs": [], diff --git a/examples/foundry/object_detection.ipynb b/examples/foundry/object_detection.ipynb index d699bb9d0..8bc442d96 100644 --- a/examples/foundry/object_detection.ipynb +++ b/examples/foundry/object_detection.ipynb @@ -125,16 +125,7 @@ "print(f\"Failed data rows: {task.failed_data_rows}\")" ], "cell_type": "code", - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Errors: None\n", - "Failed data rows: None\n" - ] - } - ], + "outputs": [], "execution_count": null }, { @@ -243,15 +234,7 @@ "MODEL_RUN_ID = task.metadata[\"modelRunId\"]" ], "cell_type": "code", - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Errors: None\n" - ] - } - ], + "outputs": [], "execution_count": null }, { @@ -264,13 +247,13 @@ "#### Workflow\n", "\n", "1. Navigate to your dataset you created for your model run\n", - "2. Select **Select all** in the top right corner\n", - "3. Select **Manage selection** > **Send to Annotate**\n", + "2. Select ***Select all*** in the top right corner\n", + "3. Select ***Manage selection*** > ***Send to Annotate***\n", "4. Specify the project we created from the project dropdown menu\n", "5. Selecting a workflow step is not required since we are not sending annotations from the UI to a project using this notebook \n", - "6. Mark **Include model predictions** then scroll down and select **Map**\n", + "6. Mark ***Include model predictions*** then scroll down and select ***Map***\n", "7. Select the incoming ontology and matching ontology feature for both Car and Person\n", - "8. Once both features are mapped press the **Copy ontology mapping as JSON** in the top right corner\n", + "8. Once both features are mapped press the ***Copy ontology mapping as JSON*** in the top right corner\n", "9. Do not save this configuration, since we are not sending predictions to a project using this UI modal. We will be sending predictions in the following steps using the SDK" ], "cell_type": "markdown" @@ -338,15 +321,7 @@ "print(f\"Errors: {task.errors}\")" ], "cell_type": "code", - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Errors: None\n" - ] - } - ], + "outputs": [], "execution_count": null }, { diff --git a/labelbox/__init__.py b/labelbox/__init__.py index c5c0b1a53..8e98f1a8e 100644 --- a/labelbox/__init__.py +++ b/labelbox/__init__.py @@ -1,6 +1,6 @@ name = "labelbox" -__version__ = "3.61.1" +__version__ = "3.61.2" from labelbox.client import Client from labelbox.schema.project import Project diff --git a/labelbox/schema/annotation_import.py b/labelbox/schema/annotation_import.py index 02a4e0f20..1cd4c0660 100644 --- a/labelbox/schema/annotation_import.py +++ b/labelbox/schema/annotation_import.py @@ -51,6 +51,7 @@ def inputs(self) -> List[Dict[str, Any]]: def errors(self) -> List[Dict[str, Any]]: """ Errors for each individual annotation uploaded. This is a subset of statuses + Returns: List of dicts containing error messages. Empty list means there were no errors See `AnnotationImport.statuses` for more details. @@ -63,22 +64,26 @@ def errors(self) -> List[Dict[str, Any]]: def statuses(self) -> List[Dict[str, Any]]: """ Status for each individual annotation uploaded. + Returns: A status for each annotation if the upload is done running. See below table for more details + .. list-table:: - :widths: 15 150 - :header-rows: 1 - * - Field - - Description - * - uuid - - Specifies the annotation for the status row. - * - dataRow - - JSON object containing the Labelbox data row ID for the annotation. - * - status - - Indicates SUCCESS or FAILURE. - * - errors - - An array of error messages included when status is FAILURE. Each error has a name, message and optional (key might not exist) additional_info. + :widths: 15 150 + :header-rows: 1 + + * - Field + - Description + * - uuid + - Specifies the annotation for the status row. + * - dataRow + - JSON object containing the Labelbox data row ID for the annotation. + * - status + - Indicates SUCCESS or FAILURE. + * - errors + - An array of error messages included when status is FAILURE. Each error has a name, message and optional (key might not exist) additional_info. + * This information will expire after 24 hours. """ self.wait_until_done() diff --git a/labelbox/schema/dataset.py b/labelbox/schema/dataset.py index 8ef194c78..9f7cba6ed 100644 --- a/labelbox/schema/dataset.py +++ b/labelbox/schema/dataset.py @@ -68,10 +68,10 @@ def data_rows( """ Custom method to paginate data_rows via cursor. - Params: + Args: from_cursor (str): Cursor (data row id) to start from, if none, will start from the beginning where (dict(str,str)): Filter to apply to data rows. Where value is a data row column name and key is the value to filter on. - example: {'external_id': 'my_external_id'} to get a data row with external_id = 'my_external_id' + example: {'external_id': 'my_external_id'} to get a data row with external_id = 'my_external_id' NOTE: diff --git a/labelbox/schema/model_run.py b/labelbox/schema/model_run.py index ba56f2640..b2de493c7 100644 --- a/labelbox/schema/model_run.py +++ b/labelbox/schema/model_run.py @@ -57,7 +57,9 @@ def upsert_labels(self, label_ids: Optional[List[str]] = None, project_id: Optional[str] = None, timeout_seconds=3600): - """ Adds data rows and labels to a Model Run + """ + Adds data rows and labels to a Model Run + Args: label_ids (list): label ids to insert project_id (string): project uuid, all project labels will be uploaded @@ -271,14 +273,16 @@ def add_predictions( name: str, predictions: Union[str, Path, Iterable[Dict], Iterable["Label"]], ) -> 'MEAPredictionImport': # type: ignore - """ Uploads predictions to a new Editor project. + """ + Uploads predictions to a new Editor project. + Args: name (str): name of the AnnotationImport job - predictions (str or Path or Iterable): - url that is publicly accessible by Labelbox containing an + predictions (str or Path or Iterable): url that is publicly accessible by Labelbox containing an ndjson file OR local path to an ndjson file OR iterable of annotation rows + Returns: AnnotationImport """ diff --git a/labelbox/schema/slice.py b/labelbox/schema/slice.py index e7a538da7..505585eb8 100644 --- a/labelbox/schema/slice.py +++ b/labelbox/schema/slice.py @@ -239,9 +239,10 @@ class ModelSlice(Slice): @classmethod def query_str(cls): query_str = """ - query getDataRowIdenfifiersBySavedModelQueryPyApi($id: ID!, $from: DataRowIdentifierCursorInput, $first: Int!) { + query getDataRowIdenfifiersBySavedModelQueryPyApi($id: ID!, $modelRunId: ID, $from: DataRowIdentifierCursorInput, $first: Int!) { getDataRowIdentifiersBySavedModelQuery(input: { savedQueryId: $id, + modelRunId: $modelRunId, after: $from first: $first }) { @@ -263,17 +264,23 @@ def query_str(cls): """ return query_str - def get_data_row_ids(self) -> PaginatedCollection: + def get_data_row_ids(self, model_run_id: str) -> PaginatedCollection: """ Fetches all data row ids that match this Slice + Params + model_run_id: str, required, uid or cuid of model run + Returns: A PaginatedCollection of data row ids """ return PaginatedCollection( client=self.client, query=ModelSlice.query_str(), - params={'id': str(self.uid)}, + params={ + 'id': str(self.uid), + 'modelRunId': model_run_id + }, dereferencing=['getDataRowIdentifiersBySavedModelQuery', 'nodes'], obj_class=lambda _, data_row_id_and_gk: data_row_id_and_gk.get('id' ), @@ -282,17 +289,24 @@ def get_data_row_ids(self) -> PaginatedCollection: 'endCursor' ]) - def get_data_row_identifiers(self) -> PaginatedCollection: + def get_data_row_identifiers(self, + model_run_id: str) -> PaginatedCollection: """ Fetches all data row ids and global keys (where defined) that match this Slice + Params: + model_run_id : str, required, uid or cuid of model run + Returns: A PaginatedCollection of Slice.DataRowIdAndGlobalKey """ return PaginatedCollection( client=self.client, query=ModelSlice.query_str(), - params={'id': str(self.uid)}, + params={ + 'id': str(self.uid), + 'modelRunId': model_run_id + }, dereferencing=['getDataRowIdentifiersBySavedModelQuery', 'nodes'], obj_class=lambda _, data_row_id_and_gk: Slice.DataRowIdAndGlobalKey( data_row_id_and_gk.get('id'),