diff --git a/examples/prediction_upload/image_predictions.ipynb b/examples/prediction_upload/image_predictions.ipynb index d6daa8ce7..25c1beba9 100644 --- a/examples/prediction_upload/image_predictions.ipynb +++ b/examples/prediction_upload/image_predictions.ipynb @@ -1,1135 +1,1239 @@ { + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + } + }, "cells": [ { "cell_type": "markdown", - "id": "a6a048e8-b5fe-418b-aec4-829b5b6802e5", "metadata": { "id": "a6a048e8-b5fe-418b-aec4-829b5b6802e5" }, "source": [ "\n", - " \n", + " \n", "" ] }, { "cell_type": "markdown", - "id": "51cf1362-1cde-4749-aac7-5fb94473baa7", "metadata": { "id": "51cf1362-1cde-4749-aac7-5fb94473baa7" }, "source": [ + "\n", "\n", - "\n", "\n", "\n", "\n", - "\n", "" ] }, { "cell_type": "markdown", - "id": "339795d3-e36c-4470-8605-62bfdd5eea29", - "metadata": { - "id": "339795d3-e36c-4470-8605-62bfdd5eea29" - }, "source": [ "# Image Prediction Import\n", + "\n", "* This notebook walks you through the process of uploading model predictions to a Model Run. This notebook provides an example for each supported prediction type for image assets. \n", "\n", - "A Model Run is a container for the predictions, annotations and metrics of a specific experiment in your ML model development cycle." - ] - }, - { - "cell_type": "markdown", - "id": "e76f007b-9465-4acd-9008-20e25e4a4b98", + "A Model Run is a container for the predictions, annotations and metrics of a specific experiment in your ML model development cycle.\n", + "\n", + "**Supported annotations that can be uploaded through the SDK**\n", + "\n", + "- Bounding box \n", + "- Polygon\n", + "- Point\n", + "- Polyline \n", + "- Classification free-text\n", + "- Classification - radio\n", + "- Classification - checklist\n", + "\n", + "\n" + ], "metadata": { - "id": "e76f007b-9465-4acd-9008-20e25e4a4b98" - }, - "source": [ - "* For information on what types of predictions are supported per data type, refer to this documentation:\n", - " * https://docs.labelbox.com/docs/upload-model-predictions#step-6-create-the-predictions-payload" - ] + "id": "9znxMjDYGi0Y" + } }, { "cell_type": "markdown", - "id": "f53d50fc-8d3c-452b-9aaf-f6170aaa5576", - "metadata": { - "id": "f53d50fc-8d3c-452b-9aaf-f6170aaa5576" - }, "source": [ "* Notes:\n", " * If you are importing more than 1,000 mask predictions at a time, consider submitting separate jobs, as they can take longer than other prediction types to import.\n", " * After the execution of this notebook a complete Model Run with predictions will be created in your organization. " - ] + ], + "metadata": { + "id": "8uOiTLI413Kj" + } }, { "cell_type": "markdown", - "id": "5effdaa3-e701-4804-aa33-bbbaed99eb92", - "metadata": { - "id": "5effdaa3-e701-4804-aa33-bbbaed99eb92" - }, "source": [ - "# Installs" - ] + "## Setup" + ], + "metadata": { + "id": "UtJHIuE8HDRI" + } }, { "cell_type": "code", - "execution_count": 1, - "id": "4d63074b-2379-48af-b9d6-2a66190f03c4", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "4d63074b-2379-48af-b9d6-2a66190f03c4", - "outputId": "5945adac-cd52-4d1c-9a6d-7429150e2d50" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[K |████████████████████████████████| 184 kB 5.2 MB/s \n", - "\u001b[K |████████████████████████████████| 7.8 MB 58.9 MB/s \n", - "\u001b[?25h Building wheel for pygeotile (setup.py) ... \u001b[?25l\u001b[?25hdone\n" - ] - } - ], "source": [ "!pip install -q 'labelbox[data]'" - ] - }, - { - "cell_type": "markdown", - "id": "6f3cdca1-524f-4247-a63b-2d4371b0257d", + ], "metadata": { - "id": "6f3cdca1-524f-4247-a63b-2d4371b0257d" + "id": "cm8xMaLbGb7v" }, - "source": [ - "# Imports" - ] + "execution_count": 36, + "outputs": [] }, { "cell_type": "code", - "execution_count": 2, - "id": "01fca8c9-0680-4a9c-a11e-1b49f31e9121", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "01fca8c9-0680-4a9c-a11e-1b49f31e9121", - "outputId": "b6197a3c-c819-48ea-ef11-52a4973b1f32" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "3.33.1\n" - ] - } - ], "source": [ - "import labelbox\n", "from labelbox.schema.ontology import OntologyBuilder, Tool, Classification, Option\n", - "from labelbox.schema.queue_mode import QueueMode\n", - "from labelbox import Client, LabelingFrontend, LabelImport, MediaType\n", + "from labelbox import Client, MALPredictionImport, LabelImport\n", + "from labelbox.data.serialization import NDJsonConverter\n", + "from labelbox.schema.media_type import MediaType\n", "from labelbox.data.annotation_types import (\n", " Label, ImageData, ObjectAnnotation, MaskData,\n", " Rectangle, Point, Line, Mask, Polygon,\n", " Radio, Checklist, Text,\n", " ClassificationAnnotation, ClassificationAnswer\n", ")\n", - "from labelbox.data.serialization import NDJsonConverter\n", - "import json\n", "import uuid\n", - "import copy\n", "import numpy as np\n", - "print(labelbox.__version__)" - ] + "from labelbox.schema.queue_mode import QueueMode" + ], + "metadata": { + "id": "NIq-6M9kHKSs" + }, + "execution_count": 37, + "outputs": [] }, { "cell_type": "markdown", - "id": "a72d96e8-33ce-434d-b330-393e1c31702a", - "metadata": { - "id": "a72d96e8-33ce-434d-b330-393e1c31702a" - }, "source": [ - "# API Key and Client\n", - "Provide a valid api key below in order to properly connect to the Labelbox Client." - ] + "## Replace with your API Key \n", + "Guides on [Create an API key](https://docs.labelbox.com/docs/create-an-api-key)" + ], + "metadata": { + "id": "pZ2rBqY8HQoe" + } }, { "cell_type": "code", - "execution_count": 3, - "id": "86003724-4807-4281-95c1-5284a6f9609f", + "source": [ + "API_KEY = None\n", + "client = Client(API_KEY)" + ], "metadata": { - "id": "86003724-4807-4281-95c1-5284a6f9609f" + "id": "z7ZLKLYLHP__" }, - "outputs": [], - "source": [ - "# Add your api key as a string\n", - "API_KEY = \"\"\n", - "client = Client(api_key=API_KEY)" - ] + "execution_count": 38, + "outputs": [] }, { "cell_type": "markdown", - "id": "960998ac-bde4-4184-8b7d-26d8e019cc7f", - "metadata": { - "id": "960998ac-bde4-4184-8b7d-26d8e019cc7f" - }, "source": [ - "---- \n", - "### Steps\n", - "1. Make sure project is setup\n", - "2. Collect annotations\n", - "3. Upload" - ] + "## Supported Predictions" + ], + "metadata": { + "id": "OePiibbed0nG" + } }, { - "cell_type": "markdown", - "id": "82a7381b-2409-4ed3-9d25-881a1e1d8ca6", + "cell_type": "code", + "source": [ + "########### Radio Classification ###########\n", + "\n", + "# Python annotation\n", + "radio_prediction = ClassificationAnnotation(\n", + " name=\"radio_question\", \n", + " value=Radio(answer = ClassificationAnswer(name = \"second_radio_answer\", confidence=0.5))\n", + ")\n", + "\n", + "# NDJSON\n", + "radio_prediction_ndjson = {\n", + " 'name': 'radio_question',\n", + " 'answer': {'name': 'second_radio_answer', 'confidence': 0.5}\n", + "} " + ], "metadata": { - "id": "82a7381b-2409-4ed3-9d25-881a1e1d8ca6" + "id": "v5wL6oojz9Ge" }, - "source": [ - "### Create a Model Run (for predictions) and a Project (for annotations)" - ] + "execution_count": 39, + "outputs": [] }, { - "cell_type": "markdown", - "id": "d51fd2e1-bf5f-4c61-bca1-929f43b076ed", + "cell_type": "code", + "source": [ + "########## Nested Classifications are only supported with NDJSON tools ##########\n", + "\n", + "nested_radio_prediction_ndjson = {\n", + " \"name\": \"nested_radio_question\",\n", + " \"confidence\": 0.5 ,\n", + " \"answer\": { \"name\": \"first_radio_answer\", \"confidence\": 0.5 },\n", + " \"classifications\" : [\n", + " {\n", + " \"name\": \"sub_radio_question\", \n", + " \"answer\": {\"name\": \"first_sub_radio_answer\", \"confidence\": 0.5 }\n", + " }\n", + " ]\n", + "}\n", + "\n", + "\n", + "nested_checklist_prediction_ndjson = {\n", + " \"name\": \"nested_checklist_question\",\n", + " \"confidence\": 0.5 ,\n", + " \"answer\": [{\n", + " \"name\": \"first_checklist_answer\", \n", + " \"confidence\": 0.5,\n", + " \"classifications\" : [\n", + " {\n", + " \"name\": \"sub_checklist_question\", \n", + " \"answer\": {\"name\": \"first_sub_checklist_answer\", \"confidence\": 0.5 }\n", + " } \n", + " ] \n", + " }]\n", + "}\n", + "\n" + ], "metadata": { - "id": "d51fd2e1-bf5f-4c61-bca1-929f43b076ed" + "id": "I75K-wx7_sDs" }, - "source": [ - "We will be creating \n", - "- a Model and a Model Run to contain model predictions\n", - "- a project to contain annotations" - ] + "execution_count": 40, + "outputs": [] }, { - "cell_type": "markdown", - "id": "49i_juOUr6av", - "metadata": { - "id": "49i_juOUr6av" - }, + "cell_type": "code", "source": [ - "First, we create an ontology with all the possible tools and classifications supported for images. The official list of supported predictions and annotations that can be uploaded can be found:\n", - "- [predictions that can be uploaded to a Model Run](https://docs.labelbox.com/docs/upload-model-predictions#step-6-create-the-predictions-payload)\n", - "- [annotations that can be imported in a project as ground-truths](https://docs.labelbox.com/docs/import-ground-truth)\n", + "############ Checklist ############\n", "\n", - "Note: the ontology of the Model Run does not need to match the ontology of the project. However, only the features present in the Model Run ontology can be uploaded as predictions and annotations to the Model Run." - ] + "# Python Annotations\n", + "checklist_prediction = ClassificationAnnotation(\n", + " name=\"checklist_question\", # must match your ontology feature's name\n", + " value=Checklist(\n", + " answer = [\n", + " ClassificationAnswer(\n", + " name = \"first_checklist_answer\", \n", + " confidence=0.5\n", + " ), \n", + " ClassificationAnswer(\n", + " name = \"second_checklist_answer\", \n", + " confidence=0.5\n", + " )\n", + " ]\n", + " )\n", + " )\n", + "\n", + "# NDJSON\n", + "checklist_prediction_ndjson = {\n", + " 'name': 'checklist_question',\n", + " 'answer': [\n", + " {'name': 'first_checklist_answer' , 'confidence': 0.5},\n", + " {'name': 'second_checklist_answer', 'confidence': 0.5}\n", + " ]\n", + "}" + ], + "metadata": { + "id": "b2UjSoYez9I1" + }, + "execution_count": 41, + "outputs": [] }, { "cell_type": "code", - "execution_count": 6, - "id": "f9f9287c-aad7-4914-bc87-1453fb8bce81", + "source": [ + "####### Bounding box #######\n", + "\n", + "\n", + "# Python Annotation \n", + "bbox_prediction = ObjectAnnotation(\n", + " name = \"bounding_box\", # must match your ontology feature's name\n", + " confidence=0.5, \n", + " value=Rectangle(\n", + " start=Point(x=977, y=1690), # Top left\n", + " end=Point(x=330, y=225), # Bottom right\n", + " ),\n", + " \n", + ")\n", + "\n", + "#NDJSON \n", + "bbox_prediction_ndjson = {\n", + " 'name': 'bounding_box', \n", + " 'confidence': 0.5,\n", + " 'bbox': {\n", + " \"top\": 977,\n", + " \"left\": 1690,\n", + " \"height\": 330,\n", + " \"width\": 225\n", + " }\n", + "}\n" + ], "metadata": { - "id": "f9f9287c-aad7-4914-bc87-1453fb8bce81" + "id": "xCU4JRP0z9Nh" }, - "outputs": [], + "execution_count": 42, + "outputs": [] + }, + { + "cell_type": "code", "source": [ - "ontology_builder = OntologyBuilder(\n", - " tools=[ # List of Tool objects\n", - " Tool( # Bounding Box tool given the name \"box\"\n", - " tool=Tool.Type.BBOX, \n", - " name=\"box\"), \n", - " Tool( # Polyline tool given the name \"line\"\n", - " tool=Tool.Type.LINE, \n", - " name=\"line\"), \n", - " Tool( # Point tool given the name \"point\"\n", - " tool=Tool.Type.POINT, \n", - " name=\"point\"), \n", - " Tool( # Polygon tool given the name \"polygon\"\n", - " tool=Tool.Type.POLYGON, \n", - " name=\"polygon\"), \n", - " Tool( # Segmentation mask tool given the name \"mask\"\n", - " tool=Tool.Type.SEGMENTATION, \n", - " name=\"mask\")], \n", - " classifications=[ # List of Classification objects\n", - " Classification( # Text classification given the name \"text\"\n", - " class_type=Classification.Type.TEXT,\n", - " instructions=\"text\"), \n", - " Classification( # Checklist classification given the name \"text\" with two options: \"first_checklist_answer\" and \"second_checklist_answer\"\n", - " class_type=Classification.Type.CHECKLIST, \n", - " instructions=\"checklist\", \n", - " options=[\n", - " Option(value=\"first_checklist_answer\"),\n", - " Option(value=\"second_checklist_answer\") \n", - " ]\n", - " ), \n", - " Classification( # Radio classification given the name \"text\" with two options: \"first_radio_answer\" and \"second_radio_answer\"\n", - " class_type=Classification.Type.RADIO, \n", - " instructions=\"radio\", \n", - " options=[\n", - " Option(value=\"first_radio_answer\"),\n", - " Option(value=\"second_radio_answer\")\n", - " ]\n", + "####### Bounding box with nested classification #######\n", + "bbox_with_radio_subclass_prediction = ObjectAnnotation(\n", + " name=\"bbox_with_radio_subclass\",\n", + " confidence=0.5, # must match your ontology feature's name\n", + " value=Rectangle(\n", + " start=Point(x=933, y=541), # Top left\n", + " end=Point(x=191, y=330), # Bottom right\n", + " ),\n", + " classifications=[\n", + " \tClassificationAnnotation(\n", + " \tname=\"sub_radio_question\",\n", + " \t\tvalue=Radio(answer=ClassificationAnswer(name=\"first_sub_radio_answer\", confidence=0.5))\n", " )\n", " ]\n", ")\n", "\n", - "ontology = client.create_ontology(\"Ontology Image\", ontology_builder.asdict())" - ] + "\n", + "## NDJSON\n", + "bbox_with_radio_subclass_prediction_ndjson = {\n", + " \"name\": \"bbox_with_radio_subclass\", \n", + " \"confidence\": 0.5,\n", + " \"classifications\": [{\n", + " \"name\": \"sub_radio_question\",\n", + " \"confidence\": 0.5,\n", + " \"answer\": \n", + " { \"name\":\"first_sub_radio_answer\", \"confidence\": 0.5}\n", + " \n", + " }],\n", + " \"bbox\": {\n", + " \"top\": 933,\n", + " \"left\": 541,\n", + " \"height\": 191,\n", + " \"width\": 330\n", + " }\n", + "}" + ], + "metadata": { + "id": "gAIzsxEjLmhv" + }, + "execution_count": 43, + "outputs": [] }, { - "cell_type": "markdown", - "id": "1GdimALBuzRU", + "cell_type": "code", + "source": [ + "########## Polygon ##########\n", + "# Python Anotation \n", + "polygon_prediction = ObjectAnnotation(\n", + " name = \"polygon\", # must match your ontology feature's name \n", + " confidence = 0.5, \n", + " value=Polygon( # Coordinates for the verticies of your polygon\n", + " points=[Point(x=1489.581,y=183.934),Point(x=2278.306,y=256.885),Point(x=2428.197,y=200.437),Point(x=2560.0,y=335.419),\n", + " Point(x=2557.386,y=503.165),Point(x=2320.596,y=503.103),Point(x=2156.083, y=628.943),Point(x=2161.111,y=785.519),\n", + " Point(x=2002.115, y=894.647),Point(x=1838.456,y=877.874),Point(x=1436.53,y=874.636),Point(x=1411.403,y=758.579),\n", + " Point(x=1353.853,y=751.74),Point(x=1345.264, y=453.461),Point(x=1426.011,y=421.129)]\n", + " ),\n", + ")\n", + "\n", + "\n", + "# NDJSON\n", + "\n", + "polygon_prediction_ndjson = {\n", + " 'name': 'polygon',\n", + " 'confidence': 0.5,\n", + " 'polygon': [\n", + " {'x': 1489.581, 'y': 183.934},\n", + " {'x': 2278.306, 'y': 256.885},\n", + " {'x': 2428.197, 'y': 200.437},\n", + " {'x': 2560.0, 'y': 335.419},\n", + " {'x': 2557.386, 'y': 503.165},\n", + " {'x': 2320.596, 'y': 503.103},\n", + " {'x': 2156.083, 'y': 628.943},\n", + " {'x': 2161.111, 'y': 785.519},\n", + " {'x': 2002.115, 'y': 894.647},\n", + " {'x': 1838.456, 'y': 877.874},\n", + " {'x': 1436.53, 'y': 874.636},\n", + " {'x': 1411.403, 'y': 758.579},\n", + " {'x': 1353.853, 'y': 751.74},\n", + " {'x': 1345.264, 'y': 453.461},\n", + " {'x': 1426.011, 'y': 421.129},\n", + " {'x': 1489.581, 'y': 183.934}\n", + " ]\n", + "}" + ], "metadata": { - "id": "1GdimALBuzRU" + "id": "jRwfE4MFz9Ph" }, - "source": [ - "We create a Model and a Model Run, to contain the predictions. " - ] + "execution_count": 44, + "outputs": [] }, { "cell_type": "code", - "execution_count": 7, - "id": "hANaXddn5Krs", + "source": [ + "####### Free text #######\n", + "# Confidence is not supported for text prediction\n", + "# Python annotation\n", + "text_annotation = ClassificationAnnotation(\n", + " name=\"free_text\", # must match your ontology feature's name\n", + " value=Text(answer=\"sample text\")\n", + ")\n", + "\n", + "# NDJSON\n", + "text_annotation_ndjson = {\n", + " 'name': 'free_text',\n", + " 'answer': 'sample text',\n", + "}" + ], "metadata": { - "id": "hANaXddn5Krs" + "id": "PBB37YpWTiVR" }, - "outputs": [], - "source": [ - "# create Model\n", - "model = client.create_model(name=\"image_model_run\", \n", - " ontology_id=ontology.uid)\n", - "# create Model Run\n", - "model_run = model.create_model_run(\"iteration 1\")" - ] + "execution_count": 45, + "outputs": [] }, { - "cell_type": "markdown", - "id": "EC_D3TFX5LBo", + "cell_type": "code", + "source": [ + "######### Segmentation mask #########\n", + "\n", + "# Python \n", + "# Identifying what values in the numpy array correspond to the mask annotation\n", + "color = (0, 0, 0)\n", + "\n", + "# convert a polygon to mask\n", + "im_height, im_width = 100,100 #need to provide the height and width of image.\n", + "mask_data = MaskData(arr=\n", + " polygon_prediction.value.draw(height=im_height,width=im_width,color=color))\n", + "\n", + "# convert a 2D array to 3D array\n", + "arr_2d = np.zeros((100,100), dtype='uint8')\n", + "mask_data = MaskData.from_2D_arr(arr_2d)\n", + "\n", + "# a 3D array where 3rd axis is RGB values.\n", + "mask_data = MaskData(arr= np.zeros([400,450,3],dtype='uint8'))\n", + "\n", + "mask_prediction = ObjectAnnotation(\n", + " name = \"mask\", # must match your ontology feature's name\n", + " confidence=0.5,\n", + " value=Mask(mask=mask_data, color=color),\n", + ")\n", + "\n", + "\n", + "# NDJSON\n", + "mask_prediction_ndjson = {\n", + " 'name': 'mask',\n", + " 'confidence': 0.5,\n", + " 'classifications': [],\n", + " 'mask': {'instanceURI': 'https://storage.labelbox.com/cjhfn5y6s0pk507024nz1ocys%2F1d60856c-59b7-3060-2754-83f7e93e0d01-1?Expires=1666901963361&KeyName=labelbox-assets-key-3&Signature=t-2s2DB4YjFuWEFak0wxYqfBfZA',\n", + " 'colorRGB': (0, 0, 0)}\n", + "}\n", + "\n" + ], "metadata": { - "id": "EC_D3TFX5LBo" + "id": "39vz-tYsz9Ry" }, - "source": [ - "We create a project, to contain the annotations." - ] + "execution_count": 46, + "outputs": [] }, { "cell_type": "code", - "execution_count": 8, - "id": "044e9194-d21d-403e-b64c-047c1063b0fe", + "source": [ + "######## Point ########\n", + "\n", + "# Python Annotation\n", + "point_prediction = ObjectAnnotation(\n", + " name = \"point\", # must match your ontology feature's name\n", + " confidence=0.5,\n", + " value = Point(x=1166.606, y=1441.768),\n", + ")\n", + "\n", + "\n", + "# NDJSON\n", + "point_prediction_ndjson = {\n", + " 'name': 'point',\n", + " 'confidence': 0.5,\n", + " 'classifications': [],\n", + " 'point': {'x': 1166.606, 'y': 1441.768}\n", + "}" + ], "metadata": { - "id": "044e9194-d21d-403e-b64c-047c1063b0fe" + "id": "UelSiWN2z9Tg" }, - "outputs": [], - "source": [ - "# Create a Labelbox project\n", - "project = client.create_project(name=\"image_project\", \n", - " queue_mode=QueueMode.Batch,\n", - " # Quality Settings setup \n", - " auto_audit_percentage=1,\n", - " auto_audit_number_of_labels=1,\n", - " media_type=MediaType.Image)\n", - "project.setup_editor(ontology)" - ] + "execution_count": 47, + "outputs": [] }, { - "cell_type": "markdown", - "id": "o9JbDSBH5fsF", + "cell_type": "code", + "source": [ + "###### Polyline ######\n", + "\n", + "\n", + "# Python Annotation \n", + "\n", + "polyline_prediction = ObjectAnnotation(\n", + " name = \"polyline\", # must match your ontology feature's name\n", + " confidence=0.5, ## Not supported for python annotation tools\n", + " value=Line( # Coordinates for the keypoints in your polyline\n", + " points=[Point(x=2534.353, y=249.471),Point(x=2429.492, y=182.092),Point(x=2294.322, y=221.962),Point(x=2224.491, y=180.463),Point(x=2136.123, y=204.716),\n", + " Point(x=1712.247, y=173.949),Point(x=1703.838, y=84.438),Point(x=1579.772, y=82.61),Point(x=1583.442, y=167.552),\n", + " Point(x=1478.869, y=164.903),Point(x=1418.941, y=318.149),Point(x=1243.128, y=400.815),Point(x=1022.067, y=319.007),\n", + " Point(x=892.367, y=379.216),Point(x=670.273, y=364.408),Point(x=613.114, y=288.16),Point(x=377.559, y=238.251),\n", + " Point(x=368.087, y=185.064),Point(x=246.557, y=167.286),Point(x=236.648, y=285.61),Point(x=90.929, y=326.412)]\n", + " ),\n", + ")\n", + "\n", + "# NDJSON\n", + "polyline_prediction_ndjson = {\n", + " 'name': 'polyline',\n", + " 'confidence':0.5,\n", + " 'classifications': [],\n", + " 'line': [\n", + " {'x': 2534.353, 'y': 249.471},\n", + " {'x': 2429.492, 'y': 182.092},\n", + " {'x': 2294.322, 'y': 221.962},\n", + " {'x': 2224.491, 'y': 180.463},\n", + " {'x': 2136.123, 'y': 204.716},\n", + " {'x': 1712.247, 'y': 173.949},\n", + " {'x': 1703.838, 'y': 84.438},\n", + " {'x': 1579.772, 'y': 82.61},\n", + " {'x': 1583.442, 'y': 167.552},\n", + " {'x': 1478.869, 'y': 164.903},\n", + " {'x': 1418.941, 'y': 318.149},\n", + " {'x': 1243.128, 'y': 400.815},\n", + " {'x': 1022.067, 'y': 319.007},\n", + " {'x': 892.367, 'y': 379.216},\n", + " {'x': 670.273, 'y': 364.408},\n", + " {'x': 613.114, 'y': 288.16},\n", + " {'x': 377.559, 'y': 238.251},\n", + " {'x': 368.087, 'y': 185.064},\n", + " {'x': 246.557, 'y': 167.286},\n", + " {'x': 236.648, 'y': 285.61},\n", + " {'x': 90.929, 'y': 326.412}\n", + " ]\n", + "}\n" + ], "metadata": { - "id": "o9JbDSBH5fsF" + "id": "mrjb8qY3z9VY" }, + "execution_count": 48, + "outputs": [] + }, + { + "cell_type": "markdown", "source": [ - "### Create a dataset with a data row\n", - "We will upload predictions and annotations on this data row. " - ] + "## Step 1: Import data rows into Catalog" + ], + "metadata": { + "id": "U-o15yu9IPDo" + } }, { "cell_type": "code", - "execution_count": 9, - "id": "WCFSlblL5gDc", + "source": [ + "# send a sample image as batch to the project\n", + "test_img_url = {\n", + " \"row_data\": \"https://storage.googleapis.com/labelbox-datasets/image_sample_data/2560px-Kitano_Street_Kobe01s5s4110.jpeg\",\n", + " \"global_key\": str(uuid.uuid4())\n", + "}\n", + "dataset = client.create_dataset(name=\"image_prediction_demo\")\n", + "data_row = dataset.create_data_row(test_img_url)\n", + "print(data_row)" + ], "metadata": { + "id": "HjH9gTV8IBG9", "colab": { "base_uri": "https://localhost:8080/" }, - "id": "WCFSlblL5gDc", - "outputId": "571c1b3f-9d20-4b6c-a30f-87c20dd1dba8" + "outputId": "800b120e-c41e-4f6f-a509-8d089f3e20bc" }, + "execution_count": 49, "outputs": [ { - "name": "stdout", "output_type": "stream", + "name": "stdout", "text": [ "\n" ] } - ], - "source": [ - "# # Create one Labelbox dataset\n", - "dataset = client.create_dataset(name=\"image_prediction_import_demo_dataset\")\n", - "# Grab an example image and create a Labelbox data row in the dataset\n", - "uploads = {\n", - " \"row_data\": \"https://raw.githubusercontent.com/Labelbox/labelbox-python/develop/examples/assets/2560px-Kitano_Street_Kobe01s5s4110.jpg\",\n", - " # To learn more about Global Keys : https://docs.labelbox.com/docs/global-keys\n", - " \"global_key\": \"TEST-ID-%id\" % uuid.uuid1()\n", - " }\n", - "data_row = dataset.create_data_row(uploads)\n", - "print(data_row)" ] }, { "cell_type": "markdown", - "id": "8eRGvN8ynJD6", - "metadata": { - "id": "8eRGvN8ynJD6" - }, "source": [ - "### Send the data row to the Model Run and to the project" - ] - }, - { - "cell_type": "markdown", - "id": "U-yBDwZuBn_M", + "## Step 2: Create/select an Ontology for your model predictions\n", + "Your project should have the correct ontology setup with all the tools and classifications supported for your annotations, and the tool names and classification instructions should match the name/instructions fields in your annotations to ensure the correct feature schemas are matched.\n" + ], "metadata": { - "id": "U-yBDwZuBn_M" - }, - "source": [ - "Get the data row IDs that we just uploaded" - ] + "id": "oy0umzuNIceP" + } }, { "cell_type": "code", - "execution_count": 10, - "id": "nphpP2OmBnGQ", + "source": [ + "ontology_builder = OntologyBuilder(\n", + " classifications=[ # List of Classification objects\n", + " Classification( # Radio classification given the name \"text\" with two options: \"first_radio_answer\" and \"second_radio_answer\"\n", + " class_type=Classification.Type.RADIO, \n", + " instructions=\"radio_question\", \n", + " options=[\n", + " Option(value=\"first_radio_answer\"),\n", + " Option(value=\"second_radio_answer\")\n", + " ]\n", + " ),\n", + " Classification( # Checklist classification given the name \"text\" with two options: \"first_checklist_answer\" and \"second_checklist_answer\"\n", + " class_type=Classification.Type.CHECKLIST, \n", + " instructions=\"checklist_question\", \n", + " options=[\n", + " Option(value=\"first_checklist_answer\"),\n", + " Option(value=\"second_checklist_answer\") \n", + " ]\n", + " ), \n", + " Classification( # Text classification given the name \"text\"\n", + " class_type=Classification.Type.TEXT,\n", + " instructions=\"free_text\"\n", + " ),\n", + " Classification(\n", + " class_type=Classification.Type.RADIO, \n", + " instructions=\"nested_radio_question\",\n", + " options=[\n", + " Option(\"first_radio_answer\",\n", + " options=[\n", + " Classification(\n", + " class_type=Classification.Type.RADIO,\n", + " instructions=\"sub_radio_question\",\n", + " options=[Option(\"first_sub_radio_answer\")]\n", + " )\n", + " ]\n", + " )\n", + " ] \n", + " ),\n", + " Classification(\n", + " class_type=Classification.Type.CHECKLIST, \n", + " instructions=\"nested_checklist_question\",\n", + " options=[\n", + " Option(\"first_checklist_answer\",\n", + " options=[\n", + " Classification(\n", + " class_type=Classification.Type.CHECKLIST, \n", + " instructions=\"sub_checklist_question\", \n", + " options=[Option(\"first_sub_checklist_answer\")]\n", + " )\n", + " ]\n", + " )\n", + " ]\n", + " ), \n", + " ],\n", + " tools=[ # List of Tool objects\n", + " Tool( # Bounding Box tool given the name \"box\"\n", + " tool=Tool.Type.BBOX, \n", + " name=\"bounding_box\"), \n", + " Tool( # Bounding Box tool given the name \"box\"\n", + " tool=Tool.Type.BBOX, \n", + " name=\"bbox_with_radio_subclass\",\n", + " classifications=[\n", + " Classification(\n", + " class_type=Classification.Type.RADIO,\n", + " instructions=\"sub_radio_question\",\n", + " options=[\n", + " Option(value=\"first_sub_radio_answer\")\n", + " ]\n", + " ),\n", + " ]\n", + " ), \n", + " Tool( # Polygon tool given the name \"polygon\"\n", + " tool=Tool.Type.POLYGON, \n", + " name=\"polygon\"),\n", + " Tool( # Segmentation mask tool given the name \"mask\"\n", + " tool=Tool.Type.SEGMENTATION, \n", + " name=\"mask\"),\n", + " \t Tool( # Point tool given the name \"point\"\n", + " tool=Tool.Type.POINT, \n", + " name=\"point\"), \n", + " Tool( # Polyline tool given the name \"line\"\n", + " tool=Tool.Type.LINE, \n", + " name=\"polyline\")]\n", + ")\n", + "\n", + "ontology = client.create_ontology(\"Image Prediction Import Demo\", ontology_builder.asdict(), media_type=MediaType.Image)" + ], "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "nphpP2OmBnGQ", - "outputId": "9bb863b3-c02e-4d9a-f184-633ec60cd523" + "id": "Kt4XWWqgIiWk" }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "datarow_ids: ['clbycrqin0kfb08wea4mp0jkb']\n" - ] - } - ], - "source": [ - "# Data row ID(s) to send to the Model Run and to the project.\n", - "datarow_ids = [dr.uid for dr in list(dataset.export_data_rows())]\n", - "print(\"datarow_ids: \",datarow_ids)" - ] + "execution_count": 50, + "outputs": [] }, { "cell_type": "markdown", - "id": "38FLeQKMBF9z", - "metadata": { - "id": "38FLeQKMBF9z" - }, "source": [ - "Send the data row to the Model Run" - ] + "## Step 3: Create a Model and Model Run" + ], + "metadata": { + "id": "ZjN8jxHvIvHP" + } }, { "cell_type": "code", - "execution_count": 11, - "id": "T1vk_EvzBI3u", + "source": [ + "# create Model\n", + "model = client.create_model(name=\"image_model_run_\" + str(uuid.uuid4()),\n", + " ontology_id=ontology.uid)\n", + "# create Model Run\n", + "model_run = model.create_model_run(\"iteration 1\")" + ], "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "T1vk_EvzBI3u", - "outputId": "e866fab9-2d89-4ea1-9b7f-ec1436834643" + "id": "8n-AvzdiOR6d" }, - "outputs": [ - { - "data": { - "text/plain": [ - "True" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "model_run.upsert_data_rows(datarow_ids)" - ] + "execution_count": 51, + "outputs": [] }, { "cell_type": "markdown", - "id": "5D7wBx41BJa9", - "metadata": { - "id": "5D7wBx41BJa9" - }, "source": [ - "Send the data row to the project" - ] + "## Step 4: Send data rows to the Model Run" + ], + "metadata": { + "id": "NX6L0axRJN5J" + } }, { "cell_type": "code", - "execution_count": 12, - "id": "yfNPsINLnPcO", + "source": [ + "model_run.upsert_data_rows([data_row.uid])" + ], "metadata": { + "id": "6sngCgIwJSae", "colab": { "base_uri": "https://localhost:8080/" }, - "id": "yfNPsINLnPcO", - "outputId": "1ab9fbde-126c-4a21-da18-ea70c52c4d75" + "outputId": "4eb2e619-1139-49c5-a9e7-d29838336c05" }, + "execution_count": 52, "outputs": [ { + "output_type": "execute_result", "data": { "text/plain": [ - "" + "True" ] }, - "execution_count": 12, "metadata": {}, - "output_type": "execute_result" + "execution_count": 52 } - ], - "source": [ - "project.create_batch(\n", - " \"first-batch\", # Each batch in a project must have a unique name\n", - " datarow_ids, # A list of data rows or data row ids\n", - " 5 # priority between 1(Highest) - 5(lowest)\n", - ")" ] }, { "cell_type": "markdown", - "id": "8da94c48-72a5-4535-ab66-6d14b0b79aed", - "metadata": { - "id": "8da94c48-72a5-4535-ab66-6d14b0b79aed" - }, "source": [ - "### Create the predictions payload\n", - "We will upload it to the Model Run.\n", + "## Step 5. Create the predictions payload\n", "\n", + "Create the prediction payload using the snippets of code in ***Supported Predictions*** section. \n", "\n", - "It is recommended to use the Python SDK's annotation types when importing labels into Labelbox." - ] - }, - { - "cell_type": "markdown", - "id": "ee9b9191-6c0d-4cba-859b-e2e9a1b887c8", + "The resulting label_ndjson should have exactly the same content for predictions that are supported by both (with exception of the uuid strings that are generated)" + ], "metadata": { - "id": "ee9b9191-6c0d-4cba-859b-e2e9a1b887c8" - }, - "source": [ - "Object predictions" - ] + "id": "6FZyvnrqSGuc" + } }, { "cell_type": "code", - "execution_count": 13, - "id": "qzBqhV4Pv3yp", - "metadata": { - "id": "qzBqhV4Pv3yp" - }, - "outputs": [], "source": [ - "# Confidence scores are optional.\n", - "# If no confidence is provided, \n", - "# the prediction will be treated as if the confidence score equals 1\n", - "\n", - "point_prediction=ObjectAnnotation(\n", - " value=Point(x=850,y=150), # Coordinates for this point annotation\n", - " name=\"point\", # Name of the tool in your ontology\n", - " confidence=0.5\n", - ")\n", - "\n", - "box_prediction=ObjectAnnotation(\n", - " value=Rectangle( # Coordinates for the top-left and bottom-right points of your bounding box, respectively\n", - " start=Point(x=537,y=878),\n", - " end=Point(x=832,y=1120)\n", - " ),\n", - " name=\"box\", # Name of the tool in your ontology\n", - " confidence=0.5\n", - ")\n", - "\n", - "polyline_prediction=ObjectAnnotation(\n", - " value=Line( # Coordinates for the keypoints in your polyline\n", - " points=[Point(x=2514.353, y=229.471),Point(x=2409.492, y=152.092),Point(x=2254.322, y=201.962),Point(x=2204.491, y=140.463),Point(x=2116.123, y=254.716),\n", - " Point(x=1752.247, y=133.949),Point(x=1753.838, y=34.438),Point(x=1539.772, y=32.61),Point(x=1543.442, y=107.552),\n", - " Point(x=1438.869, y=124.903),Point(x=1408.941, y=308.149),Point(x=1143.128, y=370.815),Point(x=822.067, y=219.007),\n", - " Point(x=782.367, y=319.216),Point(x=620.273, y=314.408),Point(x=573.114, y=238.16),Point(x=327.559, y=218.251),\n", - " Point(x=318.087, y=125.064),Point(x=226.557, y=117.286),Point(x=216.648, y=235.61),Point(x=40.929, y=306.412)]\n", - " ),\n", - " name=\"line\", # Name of the tool in your ontology\n", - " confidence=0.5\n", + "# Create a Label for predictions\n", + "label_prediction = Label(\n", + " data=ImageData(uid=data_row.uid),\n", + " annotations = [\n", + " radio_prediction,\n", + " checklist_prediction, \n", + " bbox_prediction, \n", + " bbox_with_radio_subclass_prediction, \n", + " polygon_prediction, \n", + " mask_prediction, \n", + " point_prediction,\n", + " text_annotation\n", + " ]\n", ")\n", "\n", - "polygon_prediction=ObjectAnnotation(\n", - " value=Polygon( # Coordinates for the verticies of your polygon\n", - " points=[Point(x=1389.581,y=183.934),Point(x=2178.306,y=256.885),Point(x=2328.197,y=200.437),Point(x=2460.0,y=335.419),\n", - " Point(x=2457.386,y=503.165),Point(x=2220.596,y=503.103),Point(x=2056.083, y=628.943),Point(x=2061.111,y=785.519),\n", - " Point(x=1902.115, y=894.647),Point(x=1738.456,y=877.874),Point(x=1336.53,y=874.636),Point(x=1311.403,y=758.579),\n", - " Point(x=1253.853,y=751.74),Point(x=1245.264, y=453.461),Point(x=1326.011,y=421.129)]\n", - " ),\n", - " name=\"polygon\", # Name of the tool in your ontology\n", - " confidence=0.5\n", - ")\n", + "# Create a label list \n", + "label_list_prediction = [label_prediction]\n", "\n", - "mask_prediction=ObjectAnnotation(\n", - " value=Mask( # Numpy array representation of a segmentation mask with the corresponding values within the array that represent the segmentation mask\n", - " mask=MaskData(\n", - " arr=np.zeros([300,350,3],dtype='uint8') # Creating an example numpy array to represent a mask creating a square from pixels 0,0 to 128,128\n", - " ),\n", - " color=(0,0,0) # Identifying what values in the numpy array correspond to the mask annotation (since I made a 3-D array with all zeroes, here it's 0,0,0)\n", - " ),\n", - " name=\"mask\", # Name of the tool in your ontology\n", - " confidence=0.5\n", - ")" - ] + "# Convert the prediction label from a Labelbox class object to the underlying NDJSON format required for upload - uploads can be directly built in this syntax as well\n", + "ndjson_prediction = list(NDJsonConverter.serialize(label_list_prediction))" + ], + "metadata": { + "id": "zv2OLTXKSGWv" + }, + "execution_count": 53, + "outputs": [] }, { "cell_type": "markdown", - "id": "291f9c97-37ba-42f5-b8f0-e118bdc5c848", - "metadata": { - "id": "291f9c97-37ba-42f5-b8f0-e118bdc5c848" - }, "source": [ - "Classification predictions" - ] + "If using NDJSON" + ], + "metadata": { + "id": "HaIjOzZggv56" + } }, { "cell_type": "code", - "execution_count": 14, - "id": "f2RtQQPCymOB", - "metadata": { - "id": "f2RtQQPCymOB" - }, - "outputs": [], "source": [ - "# Confidence scores are optional.\n", - "# If no confidence is provided, \n", - "# the prediction will be treated as if the confidence score equals 1\n", - "\n", - "checklist_prediction=ClassificationAnnotation(\n", - " value=Checklist(\n", - " answer=[ # List of the checklist answers in your ontology\n", - " ClassificationAnswer(\n", - " name=\"first_checklist_answer\",\n", - " confidence=0.5\n", - " ),\n", - " ClassificationAnswer(\n", - " name=\"second_checklist_answer\",\n", - " confidence=0.5\n", - " )\n", - " ]\n", - " ), \n", - " name=\"checklist\" # Name of the classification in your ontology\n", - ")\n", - "\n", - "radio_prediction=ClassificationAnnotation(\n", - " value=Radio(\n", - " answer=ClassificationAnswer(\n", - " name=\"first_radio_answer\", # Name of the radio answer in your ontology\n", - " confidence=0.5\n", - " )\n", - " ), \n", - " name=\"radio\" # Name of the classification in your ontology\n", - ")\n", "\n", - "# Confidence is not supported for text prediction\n", - "text_prediction=ClassificationAnnotation(\n", - " value=Text( # String value for the text annotation\n", - " answer=\"the answer to the text question\",\n", - " ), \n", - " name=\"text\" # Name of the classification in your ontology\n", - ")\n" - ] + "ndjson_prediction_method2 = []\n", + "for annot in [\n", + " radio_prediction_ndjson,\n", + " checklist_prediction_ndjson, \n", + " bbox_prediction_ndjson, \n", + " bbox_with_radio_subclass_prediction_ndjson, \n", + " polygon_prediction_ndjson, \n", + " mask_prediction_ndjson, \n", + " point_prediction_ndjson,\n", + " polyline_prediction_ndjson,\n", + " text_annotation_ndjson, \n", + " nested_radio_prediction_ndjson,\n", + " nested_checklist_prediction_ndjson\n", + " \n", + " \n", + "]:\n", + " annot.update({\n", + " 'uuid': str(uuid.uuid4()),\n", + " 'dataRow': {'id': data_row.uid},\n", + " })\n", + " ndjson_prediction_method2.append(annot)" + ], + "metadata": { + "id": "F-Y7sSyAV3tn" + }, + "execution_count": 60, + "outputs": [] }, { "cell_type": "markdown", - "id": "15bd593b-509d-4114-af95-ae0be081c42d", - "metadata": { - "id": "15bd593b-509d-4114-af95-ae0be081c42d" - }, "source": [ - "Create a Label object with all of the predictions created previously." - ] + "## Step 6. Upload the predictions payload to the Model Run " + ], + "metadata": { + "id": "viFHCnBeTD1Y" + } }, { "cell_type": "code", - "execution_count": 15, - "id": "6d72fe25-ff7e-4e0a-94cf-095e4df73da0", + "source": [ + "# Upload the prediction label to the Model Run\n", + "upload_job_prediction = model_run.add_predictions(\n", + " name=\"prediction_upload_job\"+str(uuid.uuid4()),\n", + " predictions=ndjson_prediction_method2)\n", + "\n", + "# Errors will appear for prediction uploads that failed.\n", + "print(\"Errors:\", upload_job_prediction.errors)" + ], "metadata": { + "id": "0VN3ZRzyb4cl", "colab": { "base_uri": "https://localhost:8080/" }, - "id": "6d72fe25-ff7e-4e0a-94cf-095e4df73da0", - "outputId": "407eef2f-7931-45fe-f4f6-a098cf1a1dc6" + "outputId": "c6ab8349-c327-49df-e3f6-00e06370c7e3" }, + "execution_count": 61, "outputs": [ { - "data": { - "text/plain": [ - "{'uid': None,\n", - " 'data': ImageData(im_bytes=None,file_path=None,url=None,arr=None),\n", - " 'annotations': [ObjectAnnotation(confidence=0.5, name='point', feature_schema_id=None, extra={}, value=Point(extra={}, x=850.0, y=150.0), classifications=[]),\n", - " ObjectAnnotation(confidence=0.5, name='box', feature_schema_id=None, extra={}, value=Rectangle(extra={}, start=Point(extra={}, x=537.0, y=878.0), end=Point(extra={}, x=832.0, y=1120.0)), classifications=[]),\n", - " ClassificationAnnotation(name='text', feature_schema_id=None, extra={}, value=Text(answer='the answer to the text question')),\n", - " ClassificationAnnotation(name='checklist', feature_schema_id=None, extra={}, value=Checklist(name='checklist', answer=[ClassificationAnswer(confidence=0.5, name='first_checklist_answer', feature_schema_id=None, extra={}, keyframe=None), ClassificationAnswer(confidence=0.5, name='second_checklist_answer', feature_schema_id=None, extra={}, keyframe=None)])),\n", - " ClassificationAnnotation(name='radio', feature_schema_id=None, extra={}, value=Radio(answer=ClassificationAnswer(confidence=0.5, name='first_radio_answer', feature_schema_id=None, extra={}, keyframe=None))),\n", - " ObjectAnnotation(confidence=0.5, name='polygon', feature_schema_id=None, extra={}, value=Polygon(extra={}, points=[Point(extra={}, x=1389.581, y=183.934), Point(extra={}, x=2178.306, y=256.885), Point(extra={}, x=2328.197, y=200.437), Point(extra={}, x=2460.0, y=335.419), Point(extra={}, x=2457.386, y=503.165), Point(extra={}, x=2220.596, y=503.103), Point(extra={}, x=2056.083, y=628.943), Point(extra={}, x=2061.111, y=785.519), Point(extra={}, x=1902.115, y=894.647), Point(extra={}, x=1738.456, y=877.874), Point(extra={}, x=1336.53, y=874.636), Point(extra={}, x=1311.403, y=758.579), Point(extra={}, x=1253.853, y=751.74), Point(extra={}, x=1245.264, y=453.461), Point(extra={}, x=1326.011, y=421.129), Point(extra={}, x=1389.581, y=183.934)]), classifications=[]),\n", - " ObjectAnnotation(confidence=0.5, name='mask', feature_schema_id=None, extra={}, value=Mask(extra={}, mask=MaskData(im_bytes=None,file_path=None,url=https://storage.labelbox.com/cjhfn5y6s0pk507024nz1ocys%2Fd2543c9e-656c-b2ba-e9ff-602dff29148c-1?Expires=1671755760855&KeyName=labelbox-assets-key-3&Signature=DmHaF41rIGM-fL9TxixxX3k3ynU,arr=...), color=(0, 0, 0)), classifications=[]),\n", - " ObjectAnnotation(confidence=None, name='line', feature_schema_id=None, extra={}, value=Line(extra={}, points=[Point(extra={}, x=2514.353, y=229.471), Point(extra={}, x=2409.492, y=152.092), Point(extra={}, x=2254.322, y=201.962), Point(extra={}, x=2204.491, y=140.463), Point(extra={}, x=2116.123, y=254.716), Point(extra={}, x=1752.247, y=133.949), Point(extra={}, x=1753.838, y=34.438), Point(extra={}, x=1539.772, y=32.61), Point(extra={}, x=1543.442, y=107.552), Point(extra={}, x=1438.869, y=124.903), Point(extra={}, x=1408.941, y=308.149), Point(extra={}, x=1143.128, y=370.815), Point(extra={}, x=822.067, y=219.007), Point(extra={}, x=782.367, y=319.216), Point(extra={}, x=620.273, y=314.408), Point(extra={}, x=573.114, y=238.16), Point(extra={}, x=327.559, y=218.251), Point(extra={}, x=318.087, y=125.064), Point(extra={}, x=226.557, y=117.286), Point(extra={}, x=216.648, y=235.61), Point(extra={}, x=40.929, y=306.412)]), classifications=[])],\n", - " 'extra': {}}" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" + "output_type": "stream", + "name": "stdout", + "text": [ + "Errors: []\n" + ] } - ], - "source": [ - "# Create a Label object by identifying the applicavle data row in Labelbox and providing a list of annotations\n", - "label_prediction = Label(\n", - " data=ImageData(\n", - " uid=data_row.uid),\n", - " annotations = [\n", - " point_prediction, box_prediction, text_prediction, \n", - " checklist_prediction, radio_prediction,\n", - " polygon_prediction, mask_prediction, polyline_prediction, \n", - "\n", - " ]\n", - ")\n", - "\n", - "# Create urls to mask data for upload\n", - "def signing_function(obj_bytes: bytes) -> str:\n", - " url = client.upload_data(content=obj_bytes, sign=True)\n", - " return url\n", - "\n", - "label_prediction.add_url_to_masks(signing_function)\n", - "\n", - "label_prediction.__dict__" ] }, { "cell_type": "markdown", - "id": "gAva__YCCzjL", - "metadata": { - "id": "gAva__YCCzjL" - }, "source": [ - "### Create the annotations payload\n", - "We will upload it to the project.\n", - "\n", - "It is recommended to use the Python SDK's annotation types when importing labels into Labelbox." - ] + "## Step 7: Send annotations to a model run\n", + "To visualize both annotations and predictions in the model run we will create a project with ground truth annotations. \n", + "To send annotations to a Model Run, we must first import them into a project, create a label payload and then send them to the Model Run." + ], + "metadata": { + "id": "T-ZHWWI3JgmX" + } }, { "cell_type": "markdown", - "id": "wbhzltpNCzjL", - "metadata": { - "id": "wbhzltpNCzjL" - }, "source": [ - "Object annotations" - ] + "##### 7.1. Create a labelbox project" + ], + "metadata": { + "id": "CYRiqHr2O_aL" + } }, { "cell_type": "code", - "execution_count": 16, - "id": "apatq9StCzjM", + "source": [ + "# Create a Labelbox project\n", + "project = client.create_project(name=\"image_prediction_demo\", \n", + " queue_mode=QueueMode.Batch,\n", + " # Quality Settings setup \n", + " auto_audit_percentage=1,\n", + " auto_audit_number_of_labels=1,\n", + " media_type=MediaType.Image)\n", + "project.setup_editor(ontology)" + ], "metadata": { - "id": "apatq9StCzjM" + "id": "jEtoDiDrPFvI" }, - "outputs": [], - "source": [ - "point_annotation=ObjectAnnotation(\n", - " value=Point(x=882,y=159), # Coordinates for this point annotation\n", - " name=\"point\" # Name of the tool in your ontology\n", - ")\n", - "\n", - "box_annotation=ObjectAnnotation(\n", - " value=Rectangle( # Coordinates for the top-left and bottom-right points of your bounding box, respectively\n", - " start=Point(x=557,y=898),\n", - " end=Point(x=852,y=1140)\n", - " ),\n", - " name=\"box\" # Name of the tool in your ontology\n", - ")\n", - "\n", - "polyline_annotation=ObjectAnnotation(\n", - " value=Line( # Coordinates for the keypoints in your polyline\n", - " points=[Point(x=2534.353, y=249.471),Point(x=2429.492, y=182.092),Point(x=2294.322, y=221.962),Point(x=2224.491, y=180.463),Point(x=2136.123, y=204.716),\n", - " Point(x=1712.247, y=173.949),Point(x=1703.838, y=84.438),Point(x=1579.772, y=82.61),Point(x=1583.442, y=167.552),\n", - " Point(x=1478.869, y=164.903),Point(x=1418.941, y=318.149),Point(x=1243.128, y=400.815),Point(x=1022.067, y=319.007),\n", - " Point(x=892.367, y=379.216),Point(x=670.273, y=364.408),Point(x=613.114, y=288.16),Point(x=377.559, y=238.251),\n", - " Point(x=368.087, y=185.064),Point(x=246.557, y=167.286),Point(x=236.648, y=285.61),Point(x=90.929, y=326.412)]\n", - " ),\n", - " name=\"line\" # Name of the tool in your ontology\n", - ")\n", - "\n", - "polygon_annotation=ObjectAnnotation(\n", - " value=Polygon( # Coordinates for the verticies of your polygon\n", - " points=[Point(x=1489.581,y=183.934),Point(x=2278.306,y=256.885),Point(x=2428.197,y=200.437),Point(x=2560.0,y=335.419),\n", - " Point(x=2557.386,y=503.165),Point(x=2320.596,y=503.103),Point(x=2156.083, y=628.943),Point(x=2161.111,y=785.519),\n", - " Point(x=2002.115, y=894.647),Point(x=1838.456,y=877.874),Point(x=1436.53,y=874.636),Point(x=1411.403,y=758.579),\n", - " Point(x=1353.853,y=751.74),Point(x=1345.264, y=453.461),Point(x=1426.011,y=421.129)]\n", - " ),\n", - " name=\"polygon\" # Name of the tool in your ontology\n", - ")\n", - "\n", - "mask_annotation=ObjectAnnotation(\n", - " value=Mask( # Numpy array representation of a segmentation mask with the corresponding values within the array that represent the segmentation mask\n", - " mask=MaskData(\n", - " arr=np.zeros([400,450,3],dtype='uint8') # Creating an example numpy array to represent a mask creating a square from pixels 0,0 to 128,128\n", - " ),\n", - " color=(0,0,0) # Identifying what values in the numpy array correspond to the mask annotation (since I made a 3-D array with all zeroes, here it's 0,0,0)\n", - " ),\n", - " name=\"mask\" # Name of the tool in your ontology\n", - ")" - ] + "execution_count": 62, + "outputs": [] }, { "cell_type": "markdown", - "id": "aqSYAaBiCzjN", - "metadata": { - "id": "aqSYAaBiCzjN" - }, "source": [ - "Classification annotations" - ] + "##### 7.2. Create a batch to send to the project " + ], + "metadata": { + "id": "7FEyC-nBPPuD" + } }, { "cell_type": "code", - "execution_count": 17, - "id": "9NAdIp6OCzjN", - "metadata": { - "id": "9NAdIp6OCzjN" - }, - "outputs": [], "source": [ - "text_annotation=ClassificationAnnotation(\n", - " value=Text( # String value for the text annotation\n", - " answer=\"the answer to the text question\" \n", - " ), \n", - " name=\"text\" # Name of the classification in your ontology\n", - ")\n", - "\n", - "checklist_annotation=ClassificationAnnotation(\n", - " value=Checklist(\n", - " answer=[ # List of the checklist answers in your ontology\n", - " ClassificationAnswer(name=\"first_checklist_answer\"),\n", - " ClassificationAnswer(name=\"second_checklist_answer\")\n", - " ]\n", - " ), \n", - " name=\"checklist\" # Name of the classification in your ontology\n", - ")\n", - "\n", - "radio_annotation=ClassificationAnnotation(\n", - " value=Radio(\n", - " answer=ClassificationAnswer(\n", - " name=\"second_radio_answer\" # Name of the radio answer in your ontology\n", - " )\n", - " ), \n", - " name=\"radio\" # Name of the classification in your ontology\n", + "project.create_batch(\n", + " \"batch_predictions_demo\", # Each batch in a project must have a unique name\n", + " dataset.export_data_rows(), # A list of data rows or data row ids\n", + " 5 # priority between 1(Highest) - 5(lowest)\n", ")" - ] - }, - { - "cell_type": "markdown", - "id": "zMcYJxpMCzjN", - "metadata": { - "id": "zMcYJxpMCzjN" - }, - "source": [ - "Create a Label object with all of the annotations created previously." - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "OHofSRa1CzjN", + ], "metadata": { + "id": "WRr5tdVEPXXy", "colab": { "base_uri": "https://localhost:8080/" }, - "id": "OHofSRa1CzjN", - "outputId": "c5d0c340-96b9-4200-f7d0-646fab1f7c1b" + "outputId": "262e1be8-3e43-42dd-ac99-e47378f9a705" }, + "execution_count": 63, "outputs": [ { + "output_type": "execute_result", "data": { "text/plain": [ - "{'uid': None,\n", - " 'data': ImageData(im_bytes=None,file_path=None,url=None,arr=None),\n", - " 'annotations': [ObjectAnnotation(confidence=None, name='point', feature_schema_id=None, extra={}, value=Point(extra={}, x=882.0, y=159.0), classifications=[]),\n", - " ObjectAnnotation(confidence=None, name='box', feature_schema_id=None, extra={}, value=Rectangle(extra={}, start=Point(extra={}, x=557.0, y=898.0), end=Point(extra={}, x=852.0, y=1140.0)), classifications=[]),\n", - " ObjectAnnotation(confidence=None, name='line', feature_schema_id=None, extra={}, value=Line(extra={}, points=[Point(extra={}, x=2534.353, y=249.471), Point(extra={}, x=2429.492, y=182.092), Point(extra={}, x=2294.322, y=221.962), Point(extra={}, x=2224.491, y=180.463), Point(extra={}, x=2136.123, y=204.716), Point(extra={}, x=1712.247, y=173.949), Point(extra={}, x=1703.838, y=84.438), Point(extra={}, x=1579.772, y=82.61), Point(extra={}, x=1583.442, y=167.552), Point(extra={}, x=1478.869, y=164.903), Point(extra={}, x=1418.941, y=318.149), Point(extra={}, x=1243.128, y=400.815), Point(extra={}, x=1022.067, y=319.007), Point(extra={}, x=892.367, y=379.216), Point(extra={}, x=670.273, y=364.408), Point(extra={}, x=613.114, y=288.16), Point(extra={}, x=377.559, y=238.251), Point(extra={}, x=368.087, y=185.064), Point(extra={}, x=246.557, y=167.286), Point(extra={}, x=236.648, y=285.61), Point(extra={}, x=90.929, y=326.412)]), classifications=[]),\n", - " ObjectAnnotation(confidence=None, name='polygon', feature_schema_id=None, extra={}, value=Polygon(extra={}, points=[Point(extra={}, x=1489.581, y=183.934), Point(extra={}, x=2278.306, y=256.885), Point(extra={}, x=2428.197, y=200.437), Point(extra={}, x=2560.0, y=335.419), Point(extra={}, x=2557.386, y=503.165), Point(extra={}, x=2320.596, y=503.103), Point(extra={}, x=2156.083, y=628.943), Point(extra={}, x=2161.111, y=785.519), Point(extra={}, x=2002.115, y=894.647), Point(extra={}, x=1838.456, y=877.874), Point(extra={}, x=1436.53, y=874.636), Point(extra={}, x=1411.403, y=758.579), Point(extra={}, x=1353.853, y=751.74), Point(extra={}, x=1345.264, y=453.461), Point(extra={}, x=1426.011, y=421.129), Point(extra={}, x=1489.581, y=183.934)]), classifications=[]),\n", - " ObjectAnnotation(confidence=None, name='mask', feature_schema_id=None, extra={}, value=Mask(extra={}, mask=MaskData(im_bytes=None,file_path=None,url=https://storage.labelbox.com/cjhfn5y6s0pk507024nz1ocys%2F54fa3928-8aac-c97e-b0ad-353e0ca84e51-1?Expires=1671755761195&KeyName=labelbox-assets-key-3&Signature=zf-AAyerLdXdpYOTq-djtDyBWDk,arr=...), color=(0, 0, 0)), classifications=[]),\n", - " ClassificationAnnotation(name='text', feature_schema_id=None, extra={}, value=Text(answer='the answer to the text question')),\n", - " ClassificationAnnotation(name='checklist', feature_schema_id=None, extra={}, value=Checklist(name='checklist', answer=[ClassificationAnswer(confidence=None, name='first_checklist_answer', feature_schema_id=None, extra={}, keyframe=None), ClassificationAnswer(confidence=None, name='second_checklist_answer', feature_schema_id=None, extra={}, keyframe=None)])),\n", - " ClassificationAnnotation(name='radio', feature_schema_id=None, extra={}, value=Radio(answer=ClassificationAnswer(confidence=None, name='second_radio_answer', feature_schema_id=None, extra={}, keyframe=None)))],\n", - " 'extra': {}}" + "" ] }, - "execution_count": 18, "metadata": {}, - "output_type": "execute_result" + "execution_count": 63 } - ], - "source": [ - "# Create a Label object by identifying the applicavle data row in Labelbox and providing a list of annotations\n", - "label_annotation = Label(\n", - " data=ImageData(\n", - " uid=data_row.uid),\n", - " annotations = [\n", - " point_annotation, box_annotation, polyline_annotation, polygon_annotation, mask_annotation,\n", - " text_annotation, checklist_annotation, radio_annotation\n", - " ]\n", - ")\n", - "\n", - "# Create urls to mask data for upload\n", - "def signing_function(obj_bytes: bytes) -> str:\n", - " url = client.upload_data(content=obj_bytes, sign=True)\n", - " return url\n", - "\n", - "label_annotation.add_url_to_masks(signing_function)\n", - "\n", - "label_annotation.__dict__" ] }, { "cell_type": "markdown", - "id": "KIEvdVzTGL09", - "metadata": { - "id": "KIEvdVzTGL09" - }, "source": [ - "### Upload the annotations payload to the project" - ] + "##### 7.3 Create the annotations payload" + ], + "metadata": { + "id": "FTGAI730UlZ3" + } }, { "cell_type": "code", - "execution_count": 19, - "id": "d_95gQMAGJXq", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "d_95gQMAGJXq", - "outputId": "cab6f910-4bc7-452b-f5b5-ee0eb6f54091" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Errors: []\n" - ] - } - ], "source": [ - "## Create a label list \n", - "label_list_annotation = [label_annotation]\n", + "########### Annotations ###########\n", + "radio_annotation_ndjson = {\n", + " 'name': 'radio_question',\n", + " 'answer': {'name': 'second_radio_answer'}\n", + "} \n", "\n", - "# Convert the annotation label from a Labelbox class object to the underlying NDJSON format required for upload - uploads can be directly built in this syntax as well\n", - "ndjson_annotation = list(NDJsonConverter.serialize(label_list_annotation))\n", + "nested_radio_annotation_ndjson = {\n", + " \"name\": \"nested_radio_question\",\n", + " \"answer\": {\"name\": \"first_radio_answer\"},\n", + " \"classifications\" : [\n", + " {'name': 'sub_radio_question', 'answer': {'name': 'first_sub_radio_answer'}}\n", + " ]\n", + "}\n", "\n", - "# Upload the annotation label to the project using Label Import\n", - "upload_job_annotation = LabelImport.create_from_objects(\n", - " client = client,\n", - " project_id = project.uid,\n", - " name=\"annotation_import_job\",\n", - " labels=ndjson_annotation)\n", + "checklist_annotation_ndjson = {\n", + " 'name': 'checklist_question',\n", + " 'answer': [\n", + " {'name': 'first_checklist_answer'},\n", + " {'name': 'second_checklist_answer'}\n", + " ]\n", + "}\n", "\n", - "# This will provide information only after the upload_job is complete, so we do not need to worry about having to rerun\n", - "upload_job_annotation.wait_until_done()\n", - "# Errors will appear for annotation uploads that failed.\n", - "print(\"Errors:\", upload_job_annotation.errors)" - ] + "bbox_annotation_ndjson = {\n", + " 'name': 'bounding_box',\n", + " 'bbox': {\n", + " \"top\": 977,\n", + " \"left\": 1690,\n", + " \"height\": 330,\n", + " \"width\": 225\n", + " }\n", + "}\n", + "\n", + "bbox_with_radio_subclass_ndjson = {\n", + " \"name\": \"bbox_with_radio_subclass\", \n", + " \"classifications\": [{\n", + " \"name\": \"sub_radio_question\",\n", + " \"answer\": \n", + " { \"name\":\"first_sub_radio_answer\" }\n", + " \n", + " }],\n", + " \"bbox\": {\n", + " \"top\": 933,\n", + " \"left\": 541,\n", + " \"height\": 191,\n", + " \"width\": 330\n", + " }\n", + "}\n", + "\n", + "polygon_annotation_ndjson = {\n", + " 'name': 'polygon',\n", + " 'polygon': [\n", + " {'x': 1489.581, 'y': 183.934},\n", + " {'x': 2278.306, 'y': 256.885},\n", + " {'x': 2428.197, 'y': 200.437},\n", + " {'x': 2560.0, 'y': 335.419},\n", + " {'x': 2557.386, 'y': 503.165},\n", + " {'x': 2320.596, 'y': 503.103},\n", + " {'x': 2156.083, 'y': 628.943},\n", + " {'x': 2161.111, 'y': 785.519},\n", + " {'x': 2002.115, 'y': 894.647},\n", + " {'x': 1838.456, 'y': 877.874},\n", + " {'x': 1436.53, 'y': 874.636},\n", + " {'x': 1411.403, 'y': 758.579},\n", + " {'x': 1353.853, 'y': 751.74},\n", + " {'x': 1345.264, 'y': 453.461},\n", + " {'x': 1426.011, 'y': 421.129},\n", + " {'x': 1489.581, 'y': 183.934}\n", + " ]\n", + "}\n", + "\n", + "mask_annotation_ndjson = {\n", + " 'name': 'mask',\n", + " 'classifications': [],\n", + " 'mask': {'instanceURI': 'https://storage.labelbox.com/cjhfn5y6s0pk507024nz1ocys%2F1d60856c-59b7-3060-2754-83f7e93e0d01-1?Expires=1666901963361&KeyName=labelbox-assets-key-3&Signature=t-2s2DB4YjFuWEFak0wxYqfBfZA',\n", + " 'colorRGB': (0, 0, 0)}\n", + "}\n", + "\n", + "\n", + "point_annotation_ndjson = {\n", + " 'name': 'point',\n", + " 'classifications': [],\n", + " 'point': {'x': 1166.606, 'y': 1441.768}\n", + "}\n", + "\n", + "point_annotation_ndjson = {\n", + " 'name': 'point',\n", + " 'classifications': [],\n", + " 'point': {'x': 1166.606, 'y': 1441.768}\n", + "}\n", + "\n", + "polyline_annotation_ndjson = {\n", + " 'name': 'polyline',\n", + " 'classifications': [],\n", + " 'line': [\n", + " {'x': 2534.353, 'y': 249.471},\n", + " {'x': 2429.492, 'y': 182.092},\n", + " {'x': 2294.322, 'y': 221.962},\n", + " {'x': 2224.491, 'y': 180.463},\n", + " {'x': 2136.123, 'y': 204.716},\n", + " {'x': 1712.247, 'y': 173.949},\n", + " {'x': 1703.838, 'y': 84.438},\n", + " {'x': 1579.772, 'y': 82.61},\n", + " {'x': 1583.442, 'y': 167.552},\n", + " {'x': 1478.869, 'y': 164.903},\n", + " {'x': 1418.941, 'y': 318.149},\n", + " {'x': 1243.128, 'y': 400.815},\n", + " {'x': 1022.067, 'y': 319.007},\n", + " {'x': 892.367, 'y': 379.216},\n", + " {'x': 670.273, 'y': 364.408},\n", + " {'x': 613.114, 'y': 288.16},\n", + " {'x': 377.559, 'y': 238.251},\n", + " {'x': 368.087, 'y': 185.064},\n", + " {'x': 246.557, 'y': 167.286},\n", + " {'x': 236.648, 'y': 285.61},\n", + " {'x': 90.929, 'y': 326.412}\n", + " ]\n", + "}\n", + "\n", + "nested_checklist_annotation_ndjson = {\n", + " \"name\": \"nested_checklist_question\",\n", + " \"answer\": [{\n", + " \"name\": \"first_checklist_answer\", \n", + " \"classifications\" : [\n", + " {\n", + " \"name\": \"sub_checklist_question\", \n", + " \"answer\": {\"name\": \"first_sub_checklist_answer\"}\n", + " } \n", + " ] \n", + " }]\n", + "}\n", + "\n", + "text_annotation_ndjson = {\n", + " 'name': 'free_text',\n", + " 'answer': 'sample text',\n", + "}\n" + ], + "metadata": { + "id": "A8_HVvu9Uvfl" + }, + "execution_count": 64, + "outputs": [] }, { "cell_type": "markdown", - "id": "tcQpab5_GR72", - "metadata": { - "id": "tcQpab5_GR72" - }, "source": [ - "### Send the annotations to the Model Run" - ] + "##### 7.4. Create the label object" + ], + "metadata": { + "id": "8QwmguFvPltl" + } }, { - "cell_type": "markdown", - "id": "5I45AW4OHJvq", + "cell_type": "code", + "source": [ + "# Create a Label object by identifying the applicable data row in Labelbox and providing a list of annotations\n", + "ndjson_annotation = []\n", + "for annot in [\n", + " radio_annotation_ndjson, \n", + " checklist_annotation_ndjson, \n", + " bbox_annotation_ndjson, \n", + " bbox_with_radio_subclass_ndjson, \n", + " polygon_annotation_ndjson, \n", + " mask_annotation_ndjson, \n", + " point_annotation_ndjson, \n", + " polyline_annotation_ndjson,\n", + " nested_radio_annotation_ndjson,\n", + " nested_checklist_annotation_ndjson,\n", + " text_annotation_ndjson\n", + "]:\n", + " annot.update({\n", + " 'uuid': str(uuid.uuid4()),\n", + " 'dataRow': {'id': data_row.uid},\n", + " })\n", + " ndjson_annotation.append(annot) \n", + "\n" + ], "metadata": { - "id": "5I45AW4OHJvq" + "id": "9gD_alThQA3G" }, + "execution_count": 65, + "outputs": [] + }, + { + "cell_type": "markdown", "source": [ - "Get the label IDs that we just uploaded\n" - ] + "##### 7.5. Upload annotations to the project using Label Import" + ], + "metadata": { + "id": "nGVNQlvPQ-kF" + } }, { "cell_type": "code", - "execution_count": 20, - "id": "wGwk8s0SHiIg", + "source": [ + "upload_job_annotation = LabelImport.create_from_objects(\n", + " client = client,\n", + " project_id = project.uid,\n", + " name=\"annotation_import_\" + str(uuid.uuid4()),\n", + " labels=ndjson_annotation)\n", + "\n", + "upload_job_annotation.wait_until_done()\n", + "# Errors will appear for annotation uploads that failed.\n", + "print(\"Errors:\", upload_job_annotation.errors)\n" + ], "metadata": { + "id": "HYh9AzrlRYX-", "colab": { "base_uri": "https://localhost:8080/" }, - "id": "wGwk8s0SHiIg", - "outputId": "59d5ec00-6830-48f0-e5d0-655a35393ec8" + "outputId": "eba0209a-bcde-4816-b386-d69f97899678" }, + "execution_count": 66, "outputs": [ { - "name": "stdout", "output_type": "stream", + "name": "stdout", "text": [ - "label_ids: ['clbycrz8s000l0g3ejhyi3w4g']\n" + "Errors: []\n" ] } - ], - "source": [ - "# get the labels id from the project\n", - "label_ids = [x['ID'] for x in project.export_labels(download=True)]\n", - "print(\"label_ids: \",label_ids)" ] }, + { + "cell_type": "markdown", + "source": [ + "##### 7.6 Send the annotations to the Model Run" + ], + "metadata": { + "id": "Y3rgM-5cRrxM" + } + }, { "cell_type": "code", - "execution_count": 21, - "id": "nZVuxM5yGR73", + "source": [ + "# get the labels id from the project\n", + "label_ids = [x['ID'] for x in project.export_labels(download=True)]\n", + "model_run.upsert_labels(label_ids)" + ], "metadata": { + "id": "i2BrS8CcSBzo", "colab": { "base_uri": "https://localhost:8080/" }, - "id": "nZVuxM5yGR73", - "outputId": "bc8e870e-5312-4b22-ef44-153548335894" + "outputId": "b2d68ab6-6d1f-4ce2-d633-8048e8209af3" }, + "execution_count": 67, "outputs": [ { + "output_type": "execute_result", "data": { "text/plain": [ "True" ] }, - "execution_count": 21, "metadata": {}, - "output_type": "execute_result" + "execution_count": 67 } - ], - "source": [ - "model_run.upsert_labels(label_ids)" ] }, { "cell_type": "markdown", - "id": "mFlJY439GSHl", - "metadata": { - "id": "mFlJY439GSHl" - }, "source": [ - "### Upload the predictions payload to the Model Run" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "HFgB6qaSGSHm", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "HFgB6qaSGSHm", - "outputId": "6c521aa5-9fb9-49b6-c0f7-4503f2070c7d" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Errors: []\n" - ] - } + "## Optional deletions for cleanup \n" ], - "source": [ - "## Create a label list \n", - "label_list_prediction = [label_prediction]\n", - "\n", - "# Convert the prediction label from a Labelbox class object to the underlying NDJSON format required for upload - uploads can be directly built in this syntax as well\n", - "ndjson_prediction = list(NDJsonConverter.serialize(label_list_prediction))\n", - "\n", - "# Upload the prediction label to the Model Run\n", - "upload_job_prediction = model_run.add_predictions(\n", - " name=\"prediction_upload_job\"+str(uuid.uuid4()),\n", - " predictions=ndjson_prediction)\n", - "\n", - "# Errors will appear for annotation uploads that failed.\n", - "print(\"Errors:\", upload_job_prediction.errors)\n" - ] - }, - { - "cell_type": "markdown", - "id": "OhgYk6byutP4", "metadata": { - "id": "OhgYk6byutP4" - }, - "source": [ - "## Cleanup " - ] + "id": "DMtOfWWDWFbJ" + } }, { "cell_type": "code", - "execution_count": 23, - "id": "_9FDSkrhur2q", - "metadata": { - "id": "_9FDSkrhur2q" - }, - "outputs": [], "source": [ - "# mal_project.delete()\n", - "# li_project.delete()\n", + "# project.delete()\n", "# dataset.delete()" - ] - } - ], - "metadata": { - "colab": { - "provenance": [] - }, - "kernelspec": { - "display_name": "Python 3.9.2 64-bit", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 + ], + "metadata": { + "id": "aAhkyvJlWK1p" }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.2" - }, - "vscode": { - "interpreter": { - "hash": "397704579725e15f5c7cb49fe5f0341eb7531c82d19f2c29d197e8b64ab5776b" - } + "execution_count": 68, + "outputs": [] } - }, - "nbformat": 4, - "nbformat_minor": 5 + ] }